diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index 854d62b0afb..50e7b464a30 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -981,6 +981,214 @@ void ciEnv::validate_compile_task_dependencies(ciMethod* target) { } } +// scc_entry != nullptr implies loading compiled code from AOT code cache +bool ciEnv::is_compilation_valid(JavaThread* thread, ciMethod* target, bool preload, bool install_code, CodeBuffer* code_buffer, SCCEntry* scc_entry) { + methodHandle method(thread, target->get_Method()); + + // We require method counters to store some method state (max compilation levels) required by the compilation policy. + if (!preload && method->get_method_counters(thread) == nullptr) { + record_failure("can't create method counters"); + if (scc_entry == nullptr || !UseNewCode2) { + // All buffers in the CodeBuffer are allocated in the CodeCache. + // If the code buffer is created on each compile attempt + // as in C2, then it must be freed. + // But keep shared code. + code_buffer->free_blob(); + } + return false; + } + + if (scc_entry != nullptr) { + // Invalid compilation states: + // - SCCache is closed, SCC entry is garbage. + // - SCC entry indicates this shared code was marked invalid while it was loaded. + if (!SCCache::is_on() || scc_entry->not_entrant()) { + if (!UseNewCode2) { + code_buffer->free_blob(); + } + return false; + } + } + + // Change in Jvmti state may invalidate compilation. + if (!failing() && jvmti_state_changed()) { + record_failure("Jvmti state change invalidated dependencies"); + } + + // Change in DTrace flags may invalidate compilation. + if (!failing() && + ( (!dtrace_method_probes() && DTraceMethodProbes) || + (!dtrace_alloc_probes() && DTraceAllocProbes) )) { + record_failure("DTrace flags change invalidated dependencies"); + } + + if (!preload && !failing() && target->needs_clinit_barrier() && + target->holder()->is_in_error_state()) { + record_failure("method holder is in error state"); + } + + if (!failing() && (scc_entry == nullptr)) { + if (log() != nullptr) { + // Log the dependencies which this compilation declares. + dependencies()->log_all_dependencies(); + } + + // Encode the dependencies now, so we can check them right away. + dependencies()->encode_content_bytes(); + } + // Check for {class loads, evolution, breakpoints, ...} during compilation + if (!failing() && install_code) { + // Check for {class loads, evolution, breakpoints, ...} during compilation + validate_compile_task_dependencies(target); + if (failing() && preload) { + ResourceMark rm; + char *method_name = method->name_and_sig_as_C_string(); + log_info(scc)("preload code for '%s' failed dependency check", method_name); + } + } + + if (failing()) { + // While not a true deoptimization, it is a preemptive decompile. + MethodData* mdo = method()->method_data(); + if (mdo != nullptr && _inc_decompile_count_on_failure) { + mdo->inc_decompile_count(); + } + + if (scc_entry == nullptr && !UseNewCode2) { + // All buffers in the CodeBuffer are allocated in the CodeCache. + // If the code buffer is created on each compile attempt + // as in C2, then it must be freed. + code_buffer->free_blob(); + } + return false; + } + return true; +} + +void ciEnv::make_code_usable(JavaThread* thread, ciMethod* target, bool preload, int entry_bci, SCCEntry* scc_entry, nmethod* nm) { + methodHandle method(thread, target->get_Method()); + + if (entry_bci == InvocationEntryBci) { + if (TieredCompilation) { + // If there is an old version we're done with it + nmethod* old = method->code(); + if (TraceMethodReplacement && old != nullptr) { + ResourceMark rm; + char *method_name = method->name_and_sig_as_C_string(); + tty->print_cr("Replacing method %s", method_name); + } + if (old != nullptr) { + old->make_not_used(); + } + } + + LogTarget(Info, nmethod, install) lt; + if (lt.is_enabled()) { + ResourceMark rm; + char *method_name = method->name_and_sig_as_C_string(); + lt.print("Installing method (L%d) %s id=%d scc=%s%s%u", + task()->comp_level(), method_name, compile_id(), + task()->is_scc() ? "A" : "", preload ? "P" : "", + (scc_entry != nullptr ? scc_entry->offset() : 0)); + } + // Allow the code to be executed + MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag); + if (nm->make_in_use()) { +#ifdef ASSERT + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + if (bs_nm != nullptr && bs_nm->supports_entry_barrier(nm)) { + if (!bs_nm->is_armed(nm)) { + log_info(init)("nmethod %d %d not armed", nm->compile_id(), nm->comp_level()); + } + } +#endif // ASSERT + if (preload) { + method->set_preload_code(nm); + } + if (!preload || target->holder()->is_linked()) { + method->set_code(method, nm); + } + } + } else { + LogTarget(Info, nmethod, install) lt; + if (lt.is_enabled()) { + ResourceMark rm; + char *method_name = method->name_and_sig_as_C_string(); + lt.print("Installing osr method (L%d) %s @ %d id=%u scc=%s%u", + task()->comp_level(), method_name, entry_bci, compile_id(), + task()->is_scc() ? "A" : "", + (scc_entry != nullptr ? scc_entry->offset() : 0)); + } + MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag); + if (nm->make_in_use()) { + method->method_holder()->add_osr_nmethod(nm); + } + } +} + +void ciEnv::register_aot_method(ciMethod* target, + AbstractCompiler* compiler, + int entry_bci, + GrowableArray& oop_list, + GrowableArray& metadata_list, + GrowableArray& reloc_imm_oop_list, + GrowableArray& reloc_imm_metadata_list, + SCCReader* scc_reader, + SCnmethod* scnm) { + SCCEntry* scc_entry = task()->scc_entry(); + assert(scc_entry != nullptr, "must be"); + VM_ENTRY_MARK; + nmethod* nm = nullptr; + { + methodHandle method(THREAD, target->get_Method()); + bool preload = task()->preload(); // Code is preloaded before Java method execution + + // Check if memory should be freed before allocation + CodeCache::gc_on_allocation(); + + // To prevent compile queue updates. + MutexLocker locker(THREAD, MethodCompileQueue_lock); + + // Prevent InstanceKlass::add_to_hierarchy from running + // and invalidating our dependencies until we install this method. + // No safepoints are allowed. Otherwise, class redefinition can occur in between. + MutexLocker ml(Compile_lock); + NoSafepointVerifier nsv; + + if (!is_compilation_valid(THREAD, target, preload, true /*install_code*/, nullptr /*code_buffer*/, scc_entry)) { + return; + } + + nm = nmethod::new_nmethod(method, + compiler, + compile_id(), + CompLevel(task()->comp_level()), + entry_bci, + preload, + oop_list, + metadata_list, + reloc_imm_oop_list, + reloc_imm_metadata_list, + scc_reader, + scnm); + + if (nm != nullptr) { + make_code_usable(THREAD, target, preload, entry_bci, scc_entry, nm); + } + } + + NoSafepointVerifier nsv; + if (nm != nullptr) { + // Compilation succeeded, post what we know about it + nm->post_compiled_method(task()); + task()->set_num_inlined_bytecodes(num_inlined_bytecodes()); + } else { + // The CodeCache is full. + record_failure("code cache is full"); + } + // safepoints are allowed again +} + // ------------------------------------------------------------------ // ciEnv::register_method void ciEnv::register_method(ciMethod* target, @@ -1008,17 +1216,6 @@ void ciEnv::register_method(ciMethod* target, methodHandle method(THREAD, target->get_Method()); bool preload = task()->preload(); // Code is preloaded before Java method execution - // We require method counters to store some method state (max compilation levels) required by the compilation policy. - if (!preload && method->get_method_counters(THREAD) == nullptr) { - record_failure("can't create method counters"); - // All buffers in the CodeBuffer are allocated in the CodeCache. - // If the code buffer is created on each compile attempt - // as in C2, then it must be freed. - // But keep shared code. - code_buffer->free_blob(); - return; - } - // Check if memory should be freed before allocation CodeCache::gc_on_allocation(); @@ -1031,71 +1228,14 @@ void ciEnv::register_method(ciMethod* target, MutexLocker ml(Compile_lock); NoSafepointVerifier nsv; - if (scc_entry != nullptr) { - // Invalid compilation states: - // - SCCache is closed, SCC entry is garbage. - // - SCC entry indicates this shared code was marked invalid while it was loaded. - if (!SCCache::is_on() || scc_entry->not_entrant()) { - code_buffer->free_blob(); - return; - } - } - - // Change in Jvmti state may invalidate compilation. - if (!failing() && jvmti_state_changed()) { - record_failure("Jvmti state change invalidated dependencies"); - } - - // Change in DTrace flags may invalidate compilation. - if (!failing() && - ( (!dtrace_method_probes() && DTraceMethodProbes) || - (!dtrace_alloc_probes() && DTraceAllocProbes) )) { - record_failure("DTrace flags change invalidated dependencies"); - } - - if (!preload && !failing() && target->needs_clinit_barrier() && - target->holder()->is_in_error_state()) { - record_failure("method holder is in error state"); - } - - if (!failing() && (scc_entry == nullptr)) { - if (log() != nullptr) { - // Log the dependencies which this compilation declares. - dependencies()->log_all_dependencies(); - } - - // Encode the dependencies now, so we can check them right away. - dependencies()->encode_content_bytes(); - } - // Check for {class loads, evolution, breakpoints, ...} during compilation - if (!failing() && install_code) { - // Check for {class loads, evolution, breakpoints, ...} during compilation - validate_compile_task_dependencies(target); - if (failing() && preload) { - ResourceMark rm; - char *method_name = method->name_and_sig_as_C_string(); - log_info(scc)("preload code for '%s' failed dependency check", method_name); - } - } - - if (failing()) { - // While not a true deoptimization, it is a preemptive decompile. - MethodData* mdo = method()->method_data(); - if (mdo != nullptr && _inc_decompile_count_on_failure) { - mdo->inc_decompile_count(); - } - - // All buffers in the CodeBuffer are allocated in the CodeCache. - // If the code buffer is created on each compile attempt - // as in C2, then it must be freed. - code_buffer->free_blob(); + if (!is_compilation_valid(THREAD, target, preload, install_code, code_buffer, scc_entry)) { return; } assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry"); assert(offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry"); - if (scc_entry == nullptr) { + if (scc_entry == nullptr && !UseNewCode2) { scc_entry = SCCache::store_nmethod(method, compile_id(), entry_bci, @@ -1149,62 +1289,23 @@ void ciEnv::register_method(ciMethod* target, nm->set_has_clinit_barriers(has_clinit_barriers); assert(!method->is_synchronized() || nm->has_monitors(), ""); - if (entry_bci == InvocationEntryBci) { - if (TieredCompilation) { - // If there is an old version we're done with it - nmethod* old = method->code(); - if (TraceMethodReplacement && old != nullptr) { - ResourceMark rm; - char *method_name = method->name_and_sig_as_C_string(); - tty->print_cr("Replacing method %s", method_name); - } - if (old != nullptr) { - old->make_not_used(); - } - } - - LogTarget(Info, nmethod, install) lt; - if (lt.is_enabled()) { - ResourceMark rm; - char *method_name = method->name_and_sig_as_C_string(); - lt.print("Installing method (L%d) %s id=%d scc=%s%s%u", - task()->comp_level(), method_name, compile_id(), - task()->is_scc() ? "A" : "", preload ? "P" : "", - (scc_entry != nullptr ? scc_entry->offset() : 0)); - } - // Allow the code to be executed - MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag); - if (nm->make_in_use()) { -#ifdef ASSERT - BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != nullptr && bs_nm->supports_entry_barrier(nm)) { - if (!bs_nm->is_armed(nm)) { - log_info(init)("nmethod %d %d not armed", nm->compile_id(), nm->comp_level()); - } - } -#endif // ASSERT - if (preload) { - method->set_preload_code(nm); - } - if (!preload || target->holder()->is_linked()) { - method->set_code(method, nm); - } - } - } else { - LogTarget(Info, nmethod, install) lt; - if (lt.is_enabled()) { - ResourceMark rm; - char *method_name = method->name_and_sig_as_C_string(); - lt.print("Installing osr method (L%d) %s @ %d id=%u scc=%s%u", - task()->comp_level(), method_name, entry_bci, compile_id(), - task()->is_scc() ? "A" : "", - (scc_entry != nullptr ? scc_entry->offset() : 0)); - } - MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag); - if (nm->make_in_use()) { - method->method_holder()->add_osr_nmethod(nm); - } + if (scc_entry == nullptr && UseNewCode2) { + scc_entry = SCCache::store_nmethod_v1(nm, + compiler, + dependencies()->size_in_bytes(), + for_preload); + if (scc_entry != nullptr) { + scc_entry->set_inlined_bytecodes(num_inlined_bytecodes()); + if (has_clinit_barriers) { + set_scc_clinit_barriers_entry(scc_entry); // Record it + return; + } else if (!for_preload) { + SCCEntry* previous_entry = scc_clinit_barriers_entry(); + scc_entry->set_next(previous_entry); // Link it for case of deoptimization + } + } } + make_code_usable(THREAD, target, preload, entry_bci, scc_entry, nm); } } diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp index 646ad8a041a..aaa12cf6d4b 100644 --- a/src/hotspot/share/ci/ciEnv.hpp +++ b/src/hotspot/share/ci/ciEnv.hpp @@ -40,6 +40,8 @@ class CompileTask; class OopMapSet; class SCCEntry; +class SCCReader; +class SCnmethod; // ciEnv // @@ -292,6 +294,12 @@ class ciEnv : StackObj { // Helper routine for determining the validity of a compilation with // respect to method dependencies (e.g. concurrent class loading). void validate_compile_task_dependencies(ciMethod* target); + + // Helper rountimes to factor out common code used by routines that register a method + // i.e. register_aot_method() and register_method() + bool is_compilation_valid(JavaThread* thread, ciMethod* target, bool preload, bool install_code, CodeBuffer* code_buffer, SCCEntry* scc_entry); + void make_code_usable(JavaThread* thread, ciMethod* target, bool preload, int entry_bci, SCCEntry* scc_entry, nmethod* nm); + public: enum { MethodCompilable, @@ -364,6 +372,17 @@ class ciEnv : StackObj { int comp_level(); // task()->comp_level() int compile_id(); // task()->compile_id() + // Register method loaded from AOT code cache + void register_aot_method(ciMethod* target, + AbstractCompiler* compiler, + int entry_bci, + GrowableArray& oop_list, + GrowableArray& metadata_list, + GrowableArray& reloc_imm_oop_list, + GrowableArray& reloc_imm_metadata_list, + SCCReader* scc_reader, + SCnmethod* scnm); + // Register the result of a compilation. void register_method(ciMethod* target, int entry_bci, diff --git a/src/hotspot/share/code/SCCache.cpp b/src/hotspot/share/code/SCCache.cpp index ff89e7570be..6d6a93cd5de 100644 --- a/src/hotspot/share/code/SCCache.cpp +++ b/src/hotspot/share/code/SCCache.cpp @@ -776,7 +776,19 @@ SCCache* SCCache::open_for_write() { return nullptr; } -void copy_bytes(const char* from, address to, uint size) { +bool SCCache::is_address_in_aot_cache(address p) { + SCCache* cache = open_for_read(); + if (cache == nullptr) { + return false; + } + if ((p >= (address)cache->cache_buffer()) && + (p < (address)(cache->cache_buffer() + cache->load_size()))) { + return true; + } + return false; +} + +void SCCache::copy_bytes(const char* from, address to, uint size) { assert(size > 0, "sanity"); bool by_words = true; if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) { @@ -828,6 +840,25 @@ bool SCCache::align_write() { return true; } +// Check to see if AOT code cache has required space to store "nbytes" of data +address SCCache::reserve_bytes(uint nbytes) { + assert(for_write(), "Code Cache file is not created"); + uint new_position = _write_position + nbytes; + if (new_position >= (uint)((char*)_store_entries - _store_buffer)) { + log_warning(scc)("Failed to ensure %d bytes at offset %d to Startup Code Cache file '%s'. Increase CachedCodeMaxSize.", + nbytes, _write_position, _cache_path); + set_failed(); + exit_vm_on_store_failure(); + return nullptr; + } + address buffer = (address)(_store_buffer + _write_position); + _write_position += nbytes; + if (_store_size < _write_position) { + _store_size = _write_position; + } + return buffer; +} + uint SCCache::write_bytes(const void* buffer, uint nbytes) { assert(for_write(), "Code Cache file is not created"); if (nbytes == 0) { @@ -1818,7 +1849,7 @@ bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer, cs->expand_locs(reloc_count); } relocInfo* reloc_start = cs->locs_start(); - copy_bytes(addr(code_offset), (address)reloc_start, reloc_size); + SCCache::copy_bytes(addr(code_offset), (address)reloc_start, reloc_size); code_offset += reloc_size; cs->set_locs_end(reloc_start + reloc_count); cs->set_locs_point(cs->start() + locs_point_off); @@ -1844,7 +1875,8 @@ bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer, if (r->oop_is_immediate()) { assert(reloc_data[j] == (uint)j, "should be"); methodHandle comp_method(THREAD, target->get_Method()); - jobject jo = read_oop(THREAD, comp_method); + oop obj = read_oop(THREAD, comp_method); + jobject jo = JNIHandles::make_local(THREAD, obj); if (lookup_failed()) { success = false; break; @@ -1982,7 +2014,7 @@ bool SCCReader::read_code(CodeBuffer* buffer, CodeBuffer* orig_buffer, uint code // Load code to new buffer. address code_start = cs->start(); - copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align); + SCCache::copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align); cs->set_end(code_start + orig_size); } @@ -2355,12 +2387,12 @@ DebugInformationRecorder* SCCReader::read_debug_info(OopRecorder* oop_recorder) assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity"); DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length); - copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align); + SCCache::copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align); recorder->stream()->set_position(data_size); code_offset += data_size; uint pcs_size = pcs_length * sizeof(PcDesc); - copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size); + SCCache::copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size); code_offset += pcs_size; set_read_position(code_offset); return recorder; @@ -2410,12 +2442,12 @@ OopMapSet* SCCReader::read_oop_maps() { CompressedWriteStream* stream = oop_map->write_stream(); // Read data which overwrites default data - copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap)); + SCCache::copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap)); code_offset += sizeof(OopMap); stream->set_position(data_size); oop_map->set_write_stream(stream); if (data_size > 0) { - copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size); + SCCache::copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size); code_offset += data_size; } #ifdef ASSERT @@ -2453,7 +2485,7 @@ bool SCCache::write_oop_maps(OopMapSet* oop_maps) { return true; } -jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) { +oop SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) { uint code_offset = read_position(); oop obj = nullptr; DataKind kind = *(DataKind*)addr(code_offset); @@ -2462,7 +2494,7 @@ jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) if (kind == DataKind::Null) { return nullptr; } else if (kind == DataKind::No_Data) { - return (jobject)Universe::non_oop_word(); + return cast_to_oop(Universe::non_oop_word()); } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) { Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared)); if (k == nullptr) { @@ -2522,7 +2554,7 @@ jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) compile_id(), comp_level(), (int)kind); return nullptr; } - return JNIHandles::make_local(thread, obj); + return obj; } bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) { @@ -2538,10 +2570,11 @@ bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) { VM_ENTRY_MARK; methodHandle comp_method(THREAD, target->get_Method()); for (int i = 1; i < oop_count; i++) { - jobject jo = read_oop(THREAD, comp_method); + oop obj = read_oop(THREAD, comp_method); if (lookup_failed()) { return false; } + jobject jo = JNIHandles::make_local(THREAD, obj); if (oop_recorder->is_real(jo)) { oop_recorder->find_index(jo); } else { @@ -2646,16 +2679,20 @@ bool SCCReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) { } bool SCCache::write_oop(jobject& jo) { + oop obj = JNIHandles::resolve(jo); + return write_oop(obj); +} + +bool SCCache::write_oop(oop obj) { DataKind kind; uint n = 0; - oop obj = JNIHandles::resolve(jo); - if (jo == nullptr) { + if (obj == nullptr) { kind = DataKind::Null; n = write_bytes(&kind, sizeof(int)); if (n != sizeof(int)) { return false; } - } else if (jo == (jobject)Universe::non_oop_word()) { + } else if (cast_from_oop(obj) == Universe::non_oop_word()) { kind = DataKind::No_Data; n = write_bytes(&kind, sizeof(int)); if (n != sizeof(int)) { @@ -2870,6 +2907,7 @@ bool SCCReader::read_dependencies(Dependencies* dependencies) { bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) { TraceTime t1("SC total load time", &_t_totalLoad, enable_timers(), false); CompileTask* task = env->task(); + task->mark_aot_load_start(os::elapsed_counter()); SCCEntry* entry = task->scc_entry(); bool preload = task->preload(); assert(entry != nullptr, "sanity"); @@ -2897,12 +2935,18 @@ bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, Abstract } SCCReader reader(cache, entry, task); - bool success = reader.compile(env, target, entry_bci, compiler); + bool success = false; + if (UseNewCode2) { + success = reader.compile_nmethod(env, target, entry_bci, compiler); + } else { + success = reader.compile(env, target, entry_bci, compiler); + } if (success) { task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes()); } else { entry->set_load_fail(); } + task->mark_aot_load_finish(os::elapsed_counter()); return success; } @@ -2987,7 +3031,7 @@ bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCom if (exc_table_length > 0) { handler_table.set_length(exc_table_length); uint exc_table_size = handler_table.size_in_bytes(); - copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size); + SCCache::copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size); code_offset += exc_table_size; } @@ -2999,7 +3043,7 @@ bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCom nul_chk_table.set_size(nul_chk_length); nul_chk_table.set_len(nul_chk_length); uint nul_chk_size = nul_chk_table.size_in_bytes(); - copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry)); + SCCache::copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry)); code_offset += nul_chk_size; } @@ -3061,6 +3105,248 @@ bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCom return success; } +bool SCCReader::read_oop_metadata_list(ciMethod* target, GrowableArray &oop_list, GrowableArray &metadata_list, OopRecorder* oop_recorder) { + VM_ENTRY_MARK + methodHandle comp_method(JavaThread::current(), target->get_Method()); + JavaThread* current = JavaThread::current(); + uint offset = read_position(); + int count = *(int *)addr(offset); + offset += sizeof(int); + set_read_position(offset); + for (int i = 0; i < count; i++) { + oop obj = read_oop(current, comp_method); + if (lookup_failed()) { + return false; + } + oop_list.append(obj); + if (oop_recorder != nullptr) { + jobject jo = JNIHandles::make_local(THREAD, obj); + if (oop_recorder->is_real(jo)) { + oop_recorder->find_index(jo); + } else { + oop_recorder->allocate_oop_index(jo); + } + } + LogStreamHandle(Debug, scc, oops) log; + if (log.is_enabled()) { + log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj)); + if (obj == Universe::non_oop_word()) { + log.print("non-oop word"); + } else if (obj == nullptr) { + log.print("nullptr-oop"); + } else { + obj->print_value_on(&log); + } + log.cr(); + } + } + + offset = read_position(); + count = *(int *)addr(offset); + offset += sizeof(int); + set_read_position(offset); + for (int i = 0; i < count; i++) { + Metadata* m = read_metadata(comp_method); + if (lookup_failed()) { + return false; + } + metadata_list.append(m); + if (oop_recorder != nullptr) { + if (oop_recorder->is_real(m)) { + oop_recorder->find_index(m); + } else { + oop_recorder->allocate_metadata_index(m); + } + } + LogTarget(Debug, scc, metadata) log; + if (log.is_enabled()) { + LogStream ls(log); + ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m)); + if (m == (Metadata*)Universe::non_oop_word()) { + ls.print("non-metadata word"); + } else if (m == nullptr) { + ls.print("nullptr-oop"); + } else { + Metadata::print_value_on_maybe_null(&ls, m); + } + ls.cr(); + } + } + return true; +} + +bool SCCReader::compile_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler) { + CompileTask* task = env->task(); + SCCEntry *scc_entry = (SCCEntry*)_entry; + bool preload = task->preload(); // Code is preloaded before Java method execution + nmethod* nm = nullptr; + + uint entry_position = scc_entry->offset(); + uint scnm_offset = entry_position + scc_entry->code_offset(); + set_read_position(scnm_offset); + + SCnmethod* scnm = (SCnmethod*)addr(scnm_offset); + + //Read oops and metadata + GrowableArray reloc_immediate_oop_list, oop_list; + GrowableArray reloc_immediate_metadata_list, metadata_list; + + OopRecorder* oop_recorder = new OopRecorder(env->arena()); + env->set_oop_recorder(oop_recorder); + + uint buffer_offset = entry_position + scnm->oop_metadata_offset(); + set_read_position(buffer_offset); + if (!read_oop_metadata_list(target, oop_list, metadata_list, oop_recorder)) { + return false; + } + + buffer_offset = entry_position + scnm->reloc_immediates_offset(); + set_read_position(buffer_offset); + if (!read_oop_metadata_list(target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) { + return false; + } + + // Read Dependencies (compressed already) + Dependencies* dependencies = new Dependencies(env); + address dep_content = (address)addr(entry_position + scnm->immutable_data_offset()); + int dep_size = scnm->dependencies_size(); + dependencies->set_content(dep_content, dep_size); + env->set_dependencies(dependencies); + + if (VerifyCachedCode) { + return false; + } + + TraceTime t1("SC total nmethod register time", &_t_totalRegister, enable_timers(), false); + env->register_aot_method(target, + compiler, + entry_bci, + oop_list, + metadata_list, + reloc_immediate_oop_list, + reloc_immediate_metadata_list, + this, + scnm); + bool success = task->is_success(); + if (success) { + scc_entry->set_loaded(); + } + return success; +} + +void SCCReader::apply_relocations(SCnmethod* scnm, nmethod* nm, GrowableArray &oop_list, GrowableArray &metadata_list) { + LogStreamHandle(Info, scc, reloc) log; + uint buffer_offset = _entry->offset() + scnm->extra_reloc_offset(); + int count = *(int*)addr(buffer_offset); + buffer_offset += sizeof(int); + if (log.is_enabled()) { + log.print_cr("======== extra relocations count=%d", count); + } + uint* reloc_data = (uint*)addr(buffer_offset); + buffer_offset += (count * sizeof(uint)); + set_read_position(buffer_offset); + + RelocIterator iter(nm); + int j = 0; + + while (iter.next()) { + switch (iter.type()) { + case relocInfo::none: + break; + case relocInfo::oop_type: { + oop_Relocation* r = (oop_Relocation*)iter.reloc(); + if (r->oop_is_immediate()) { + r->set_value(cast_from_oop
(oop_list.at(reloc_data[j]))); + } else { + r->fix_oop_relocation(); + } + break; + } + case relocInfo::metadata_type: { + metadata_Relocation* r = (metadata_Relocation*)iter.reloc(); + Metadata* m; + if (r->metadata_is_immediate()) { + m = metadata_list.at(reloc_data[j]); + } else { + // Get already updated value from nmethod. + int index = r->metadata_index(); + m = nm->metadata_at(index); + } + r->set_value((address)m); + break; + } + case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs. + case relocInfo::opt_virtual_call_type: + case relocInfo::static_call_type: { + address dest = _cache->address_for_id(reloc_data[j]); + if (dest != (address)-1) { + ((CallRelocation*)iter.reloc())->set_destination(dest); + } + break; + } + case relocInfo::trampoline_stub_type: { + address dest = _cache->address_for_id(reloc_data[j]); + if (dest != (address)-1) { + ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest); + } + break; + } + case relocInfo::static_stub_type: + break; + case relocInfo::runtime_call_type: { + address dest = _cache->address_for_id(reloc_data[j]); + if (dest != (address)-1) { + ((CallRelocation*)iter.reloc())->set_destination(dest); + } + break; + } + case relocInfo::runtime_call_w_cp_type: + fatal("runtime_call_w_cp_type unimplemented"); + //address destination = iter.reloc()->value(); + break; + case relocInfo::external_word_type: { + address target = _cache->address_for_id(reloc_data[j]); + // Add external address to global table + int index = ExternalsRecorder::find_index(target); + // Update index in relocation + Relocation::add_jint(iter.data(), index); + external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc(); + assert(reloc->target() == target, "sanity"); + reloc->set_value(target); // Patch address in the code + //TODO: Is fix_relocation_after_move() required here? + //iter.reloc()->fix_relocation_after_move(orig_buffer, buffer); + break; + } + case relocInfo::internal_word_type: { + internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc(); + r->fix_relocation_after_aot_load(scnm->dumptime_content_start_addr(), nm->content_begin()); + break; + } + case relocInfo::section_word_type: { + section_word_Relocation* r = (section_word_Relocation*)iter.reloc(); + r->fix_relocation_after_aot_load(scnm->dumptime_content_start_addr(), nm->content_begin()); + break; + } + case relocInfo::poll_type: + break; + case relocInfo::poll_return_type: + break; + case relocInfo::post_call_nop_type: + break; + case relocInfo::entry_guard_type: + break; + default: + fatal("relocation %d unimplemented", (int)iter.type()); + break; + } + if (log.is_enabled()) { + iter.print_current_on(&log); + } + j++; + } + assert(j == count, "must be"); +} + // No concurency for writing to cache file because this method is called from // ciEnv::register_method() under MethodCompileQueue_lock and Compile_lock locks. SCCEntry* SCCache::store_nmethod(const methodHandle& method, @@ -3366,6 +3652,460 @@ SCCEntry* SCCache::write_nmethod(const methodHandle& method, return entry; } +SCCEntry* SCCache::store_nmethod_v1(nmethod* nm, AbstractCompiler* compiler, int dependencies_size, bool for_preload) { + if (!CDSConfig::is_dumping_cached_code()) { + return nullptr; // The metadata and heap in the CDS image haven't been finalized yet. + } + if (nm->is_osr_method()) { + return nullptr; // No OSR + } + if (!compiler->is_c1() && !compiler->is_c2()) { + // Only c1 and c2 compilers + return nullptr; + } + int comp_level = nm->comp_level(); + if (comp_level == CompLevel_full_profile) { + // Do not cache C1 compiles with full profile i.e. tier3 + return nullptr; + } + assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be"); + + TraceTime t1("SC total store time", &_t_totalStore, enable_timers(), false); + SCCache* cache = open_for_write(); + if (cache == nullptr) { + return nullptr; // Cache file is closed + } + SCCEntry* entry = cache->write_nmethod_v1(nm, dependencies_size, for_preload); + if (entry == nullptr) { + log_info(scc, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level); + } + return entry; +} + +uint SCnmethod::compute_flags(nmethod* nm) { + return (nm->has_unsafe_access() ? HAS_UNSAFE_ACCESS : 0) | + (nm->has_method_handle_invokes() ? HAS_MH_INVOKE : 0) | + (nm->has_wide_vectors() ? HAS_WIDE_VECTORS : 0) | + (nm->has_monitors() ? HAS_MONITORS : 0) | + (nm->has_scoped_access() ? HAS_SCOPED_ACCESS : 0) | + (nm->has_clinit_barriers() ? HAS_CLINIT_BARRIERS: 0); +} + +void SCnmethod::init_nmethod_data(nmethod* nm) { + _size = nm->size(); // size covers relocations, content and data regions + _relocation_size = nm->relocation_size(); + _content_size = nm->content_size(); + // "content" holds consts, insts and stubs + _consts_offset = nm->consts_begin() - nm->content_begin(); + // "code" holds insts and stubs + _code_offset = nm->code_begin() - nm->content_begin(); + _stub_offset = nm->stub_begin() - nm->content_begin(); + // "data" holds oops and metadata + _oops_count = nm->oops_end() - nm->oops_begin(); + _metadata_offset= (address)nm->metadata_begin() - nm->data_begin(); + _metadata_count = nm->metadata_end() - nm->metadata_begin(); +#if INCLUDE_JVMCI + assert(nm->jvmci_data_size() == 0, "JVMCI compile is not supported"); + _jvmci_data_offset = nm->jvmci_data_begin() - nm->data_begin(); +#endif + // other CodeBlob data + _frame_complete_offset = nm->frame_complete_offset(); + _frame_size = nm->frame_size(); + + // other nmethod data + _flags = compute_flags(nm); + _entry_offset = nm->entry_point() - nm->code_begin(); + _verified_entry_offset = nm->verified_entry_point() - nm->code_begin(); + _skipped_instructions_size = nm->skipped_instructions_size(); + + // Exception handler and deopt handler are in the stub section + _exception_offset = nm->exception_begin() - nm->content_begin(); + _deopt_handler_offset = nm->deopt_handler_begin() - nm->content_begin(); + _deopt_mh_handler_offset = (nm->deopt_mh_handler_begin() != nullptr) ? (nm->deopt_mh_handler_begin() - nm->content_begin()) : -1; + // unwind_handler is offset from the end of the insts sections + _unwind_handler_offset = (nm->unwind_handler_begin() != nullptr) ? nm->insts_end() - nm->unwind_handler_begin() : -1; + _orig_pc_offset = nm->orig_pc_offset(); + + // immutable data in nmethod + _immutable_data_size = nm->immutable_data_size(); + _nul_chk_table_offset = nm->nul_chk_table_begin() - nm->immutable_data_begin(); + _handler_table_offset = nm->handler_table_begin() - nm->immutable_data_begin(); + _scopes_pcs_offset = (address)nm->scopes_pcs_begin() - nm->immutable_data_begin(); + _scopes_data_offset = nm->scopes_data_begin() - nm->immutable_data_begin(); +#if INCLUDE_JVMCI + assert(nm->speculations_size() == 0, "JVMCI compile is not supported"); + _speculation_offset = nm->speculations_begin() - nm->immutable_data_begin(); + _speculations_len = 0; +#endif + + // addtional info required for loading back nmethod + _dumptime_content_start_addr = nm->content_begin(); +} + +bool SCCache::write_oops(nmethod* nm) { + for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) { + if (!write_oop(*p)) { + return false; + } + } + return true; +} + +bool SCCache::write_metadata(nmethod* nm) { + for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) { + if (!write_metadata(*p)) { + return false; + } + } + return true; +} + +bool SCCache::write_oop_map_set(nmethod* nm) { + ImmutableOopMapSet* oopmaps = nm->oop_maps(); + uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes()); + if (n != (uint)oopmaps->nr_of_bytes()) { + return false; + } + return true; +} + +SCCEntry* SCCache::write_nmethod_v1(nmethod* nm, int dependencies_size, bool for_preload) { + assert(!nm->has_clinit_barriers() || _gen_preload_code, "sanity"); + uint comp_id = nm->compile_id(); + uint comp_level = nm->comp_level(); + Method* method = nm->method(); + bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)method); + InstanceKlass* holder = method->method_holder(); + bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class(); + bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data(); + if (!builtin_loader) { + ResourceMark rm; + log_info(scc, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name()); + return nullptr; + } + if (for_preload && !(method_in_cds && klass_in_cds)) { + ResourceMark rm; + log_info(scc, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string()); + return nullptr; + } + assert(!for_preload || method_in_cds, "sanity"); + _for_preload = for_preload; + _has_clinit_barriers = nm->has_clinit_barriers(); + + if (!align_write()) { + return nullptr; + } + + uint entry_position = _write_position; + + uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count(); + + // Is this one-step workflow assembly phase? + // In this phase compilation is done based on saved profiling data + // without application run. Ignore decompilation counters in such case. + // Also ignore it for C1 code because it is decompiled unconditionally + // when C2 generated code is published. + bool ignore_decompile = (comp_level == CompLevel_limited_profile) || + CDSConfig::is_dumping_final_static_archive(); + + // Write name + uint name_offset = 0; + uint name_size = 0; + uint hash = 0; + uint n; + { + ResourceMark rm; + const char* name = method->name_and_sig_as_C_string(); + log_info(scc, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to Startup Code Cache '%s'", + comp_id, (int)comp_level, name, comp_level, decomp, + (ignore_decompile ? ", ignore_decomp" : ""), + (nm->has_clinit_barriers() ? ", has clinit barriers" : ""), _cache_path); + + LogStreamHandle(Info, scc, loader) log; + if (log.is_enabled()) { + oop loader = holder->class_loader(); + oop domain = holder->protection_domain(); + log.print("Holder: "); + holder->print_value_on(&log); + log.print(" loader: "); + if (loader == nullptr) { + log.print("nullptr"); + } else { + loader->print_value_on(&log); + } + log.print(" domain: "); + if (domain == nullptr) { + log.print("nullptr"); + } else { + domain->print_value_on(&log); + } + log.cr(); + } + name_offset = _write_position - entry_position; + name_size = (uint)strlen(name) + 1; // Includes '/0' + n = write_bytes(name, name_size); + if (n != name_size) { + return nullptr; + } + hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name)); + } + + if (!align_write()) { + return nullptr; + } + + uint scnm_offset = _write_position - entry_position; + SCnmethod* scnm = (SCnmethod*)reserve_bytes(sizeof(SCnmethod)); + if (scnm == nullptr) { + return nullptr; + } + scnm->init_nmethod_data(nm); + scnm->set_dependencies_size(dependencies_size); + + if (!align_write()) { + return nullptr; + } + + scnm->set_relocation_data_offset(_write_position - entry_position); + n = write_bytes(nm->relocation_begin(), nm->relocation_size()); + if (n != (uint)nm->relocation_size()) { + return nullptr; + } + scnm->set_content_offset(_write_position - entry_position); + n = write_bytes(nm->content_begin(), nm->content_size()); + if (n != (uint)nm->content_size()) { + return nullptr; + } + if (!align_write()) { + return nullptr; + } + + scnm->set_oop_metadata_offset(_write_position - entry_position); + int count = scnm->oops_count(); + if (!write_bytes(&count, sizeof(int))) { + return nullptr; + } + // Write oops and metadata in the nmethod's data region + if (!write_oops(nm)) { + if (lookup_failed() && !failed()) { + // Skip this method and reposition file + set_write_position(entry_position); + } + return nullptr; + } + count = scnm->metadata_count(); + if (!write_bytes(&count, sizeof(int))) { + return nullptr; + } + if (!write_metadata(nm)) { + if (lookup_failed() && !failed()) { + // Skip this method and reposition file + set_write_position(entry_position); + } + return nullptr; + } + if (!align_write()) { + return nullptr; + } + scnm->set_oop_map_offset(_write_position - entry_position); + if (!write_oop_map_set(nm)) { + return nullptr; + } + if (!align_write()) { + return nullptr; + } + scnm->set_immutable_data_offset(_write_position - entry_position); + n = write_bytes(nm->immutable_data_begin(), nm->immutable_data_size()); + if (n != (uint)nm->immutable_data_size()) { + return nullptr; + } + if (!align_write()) { + return nullptr; + } + + if (!write_nmethod_extra_relocations(scnm, nm, entry_position)) { + return nullptr; + } + + uint entry_size = _write_position - entry_position; + SCCEntry* entry = new (this) SCCEntry(entry_position, entry_size, name_offset, name_size, + scnm_offset, 0, 0, 0, + SCCEntry::Code, hash, comp_level, comp_id, decomp, + nm->has_clinit_barriers(), for_preload, ignore_decompile); + if (method_in_cds) { + entry->set_method(method); + } +#ifdef ASSERT + if (nm->has_clinit_barriers() || for_preload) { + assert(for_preload, "sanity"); + assert(entry->method() != nullptr, "sanity"); + } +#endif + { + ResourceMark rm; + const char* name = nm->method()->name_and_sig_as_C_string(); + log_info(scc, nmethod)("%d (L%d): Wrote nmethod '%s'%s to Startup Code Cache '%s'", + comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""), _cache_path); + } + if (VerifyCachedCode) { + return nullptr; + } + return entry; +} + +bool SCCache::write_nmethod_reloc_immediates(nmethod* nm, GrowableArray& oop_list, GrowableArray& metadata_list) { + int count = oop_list.length(); + if (!write_bytes(&count, sizeof(int))) { + return false; + } + for (GrowableArrayIterator iter = oop_list.begin(); + iter != oop_list.end(); ++iter) { + oop obj = *iter; + if (!write_oop(obj)) { + return false; + } + } + + count = metadata_list.length(); + if (!write_bytes(&count, sizeof(int))) { + return false; + } + for (GrowableArrayIterator iter = metadata_list.begin(); + iter != metadata_list.end(); ++iter) { + Metadata* m = *iter; + if (!write_metadata(m)) { + return false; + } + } + return true; +} + +bool SCCache::write_nmethod_extra_relocations(SCnmethod* scnm, nmethod* nm, int entry_position) { + GrowableArray oop_list; + GrowableArray metadata_list; + + nm->create_reloc_immediates_list(oop_list, metadata_list); + scnm->set_reloc_immediates_offset(_write_position - entry_position); + if (!write_nmethod_reloc_immediates(nm, oop_list, metadata_list)) { + if (lookup_failed() && !failed()) { + // Skip this method and reposition file + set_write_position(entry_position); + } + return false; + } + + scnm->set_extra_reloc_offset(_write_position - entry_position); + LogStreamHandle(Info, scc, reloc) log; + GrowableArray reloc_data; + // Collect additional data + RelocIterator iter(nm); + bool has_immediate = false; + while (iter.next()) { + int idx = reloc_data.append(0); // default value + switch (iter.type()) { + case relocInfo::none: + break; + case relocInfo::oop_type: { + oop_Relocation* r = (oop_Relocation*)iter.reloc(); + if (r->oop_is_immediate()) { + // store index of oop in the reloc immediate oop list + int oop_idx = oop_list.find(r->oop_value()); + assert(oop_idx != -1, "sanity check"); + reloc_data.at_put(idx, (uint)oop_idx); + has_immediate = true; + } + break; + } + case relocInfo::metadata_type: { + metadata_Relocation* r = (metadata_Relocation*)iter.reloc(); + if (r->metadata_is_immediate()) { + // store index of metadata in the reloc immediate metadata list + int metadata_idx = metadata_list.find(r->metadata_value()); + assert(metadata_idx != -1, "sanity check"); + reloc_data.at_put(idx, (uint)metadata_idx); + has_immediate = true; + } + break; + } + case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs. + case relocInfo::opt_virtual_call_type: + case relocInfo::static_call_type: { + CallRelocation* r = (CallRelocation*)iter.reloc(); + address dest = r->destination(); + if (dest == r->addr()) { // possible call via trampoline on Aarch64 + dest = (address)-1; // do nothing in this case when loading this relocation + } + reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr)); + break; + } + case relocInfo::trampoline_stub_type: { + address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination(); + reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr)); + break; + } + case relocInfo::static_stub_type: + break; + case relocInfo::runtime_call_type: { + // Record offset of runtime destination + CallRelocation* r = (CallRelocation*)iter.reloc(); + address dest = r->destination(); + if (dest == r->addr()) { // possible call via trampoline on Aarch64 + dest = (address)-1; // do nothing in this case when loading this relocation + } + reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr)); + break; + } + case relocInfo::runtime_call_w_cp_type: + fatal("runtime_call_w_cp_type unimplemented"); + break; + case relocInfo::external_word_type: { + // Record offset of runtime target + address target = ((external_word_Relocation*)iter.reloc())->target(); + reloc_data.at_put(idx, _table->id_for_address(target, iter, nullptr)); + break; + } + case relocInfo::internal_word_type: + break; + case relocInfo::section_word_type: + break; + case relocInfo::poll_type: + break; + case relocInfo::poll_return_type: + break; + case relocInfo::post_call_nop_type: + break; + case relocInfo::entry_guard_type: + break; + default: + fatal("relocation %d unimplemented", (int)iter.type()); + break; + } + if (log.is_enabled()) { + iter.print_current_on(&log); + } + } + + // Write additional relocation data: uint per relocation + // Write the count first + int count = reloc_data.length(); + write_bytes(&count, sizeof(int)); + uint data_size = count * sizeof(uint); + for (GrowableArrayIterator iter = reloc_data.begin(); + iter != reloc_data.end(); ++iter) { + uint value = *iter; + int n = write_bytes(&value, sizeof(uint)); + if (n != sizeof(uint)) { + return false; + break; + } + } + + if (!align_write()) { + return false; + } + return true; //success; +} + static void print_helper1(outputStream* st, const char* name, int count) { if (count > 0) { st->print(" %s=%d", name, count); diff --git a/src/hotspot/share/code/SCCache.hpp b/src/hotspot/share/code/SCCache.hpp index d818a007d16..f263718551d 100644 --- a/src/hotspot/share/code/SCCache.hpp +++ b/src/hotspot/share/code/SCCache.hpp @@ -322,6 +322,153 @@ enum class DataKind: int { MH_Oop_Shared = 11 }; +// Store necessary nmethod information during dump time +// Format: +// SCnmethod +// Relocations +// Contents +// Constants +// Instructions +// Stubs +// Data +// Oops +// Metadata +// ImmutableOopMapSet +// ImmutableData +// Dependencies +// NullCheck table +// ExceptionHandler table +// Reloc Immediate Oops +// Reloc Immediate Metadata +// Extra Relocation Data +struct SCnmethod { + private: + int _size; // total size of nmethod in code cache in bytes + int _relocation_size; // size of relocation (could be bigger than 64Kb) + + int _content_size; + int _consts_offset; // offset in content region where constants region begins + int _code_offset; // offset in content region where instructions region begins (this includes insts, stubs) + int _stub_offset; // offset in content region where stubs begin + + int _oops_count; + int _metadata_offset; // offset in data region where metadata begins + int _metadata_count; +#if INCLUDE_JVMCI + int _jvmci_data_offset; +#endif + + // Misc data from CodeBlob + int _frame_complete_offset; + int _frame_size; + + // Misc data from nmethod + uint _flags; + uint16_t _entry_offset; // entry point with class check; offset in code region + uint16_t _verified_entry_offset; // entry point without class check; offset in code region + int _skipped_instructions_size; + int _exception_offset; + int _deopt_handler_offset; + int _deopt_mh_handler_offset; + int16_t _unwind_handler_offset; + + int _immutable_data_size; + int _dependencies_size; + int _nul_chk_table_offset; // offset in immutable region where nullcheck table begins + int _handler_table_offset; // offset in immutable region where exception handler table begins + int _scopes_pcs_offset; // offset in immutable region + int _scopes_data_offset; // offset in immutable regio +#if INCLUDE_JVMCI + int _speculation_offset; + int _speculations_len; +#endif + + int _orig_pc_offset; + + address _dumptime_content_start_addr; + + uint _relocation_data_offset; + uint _content_offset; + uint _oop_metadata_offset; + uint _oop_map_offset; + uint _immutable_data_offset; + uint _reloc_immediates_offset; + uint _extra_reloc_offset; + + enum { + HAS_UNSAFE_ACCESS=0x1, + HAS_MH_INVOKE=0x2, + HAS_WIDE_VECTORS=0x4, + HAS_MONITORS=0x8, + HAS_SCOPED_ACCESS=0x10, + HAS_CLINIT_BARRIERS=0x20 + }; + + public: + void init_nmethod_data(nmethod* nm); + void set_relocation_data_offset(int offset) { _relocation_data_offset = offset; } + void set_content_offset(int offset) { _content_offset = offset; } + void set_oop_metadata_offset(int offset) { _oop_metadata_offset = offset; } + void set_oop_map_offset(int offset) { _oop_map_offset = offset; } + void set_immutable_data_offset(int offset) { _immutable_data_offset = offset; } + void set_reloc_immediates_offset(int offset) { _reloc_immediates_offset = offset; } + void set_extra_reloc_offset(int offset) { _extra_reloc_offset = offset; } + void set_dependencies_size(int size) { _dependencies_size = size; } + + int relocation_size() const { return _relocation_size; } + int content_size() const { return _content_size; } + int code_offset() const { return _code_offset; } + int stub_offset() const { return _stub_offset; } + int metadata_offset() const { return _metadata_offset; } +#if INCLUDE_JVMCI + int jvmci_data_offset() const { return _jvmci_data_offset; } +#endif + int oops_count() const { return _oops_count; } + int metadata_count() const { return _metadata_count; } + + int frame_complete_offset() const { return _frame_complete_offset; } + int frame_size() const { return _frame_size; } + + uint compute_flags(nmethod* nm); + bool has_unsafe_access() const { return _flags & HAS_UNSAFE_ACCESS; } + bool has_method_handle_invokes() const { return _flags & HAS_MH_INVOKE; } + bool has_wide_vectors() const { return _flags & HAS_WIDE_VECTORS; } + bool has_monitors() const { return _flags & HAS_MONITORS; } + bool has_scoped_access() const { return _flags & HAS_SCOPED_ACCESS; } + bool has_clinit_barriers() const { return _flags & HAS_CLINIT_BARRIERS; } + + uint16_t entry_offset() const { return _entry_offset; } + uint16_t verified_entry_offset() const { return _verified_entry_offset; } + int skipped_instructions_size() const { return _skipped_instructions_size; } + int exception_offset() const { return _exception_offset; } + int deopt_handler_offset() const { return _deopt_handler_offset; } + int deopt_mh_handler_offset() const { return _deopt_mh_handler_offset; } + int16_t unwind_handler_offset() const { return _unwind_handler_offset; } + + int immutable_data_size() const { return _immutable_data_size; } + int dependencies_size() const { return _dependencies_size; } + int nul_chk_table_offset() const { return _nul_chk_table_offset; } + int handler_table_offset() const { return _handler_table_offset; } + int scopes_pcs_offset() const { return _scopes_pcs_offset; } + int scopes_data_offset() const { return _scopes_data_offset; } +#if INCLUDE_JVMCI + int speculation_offset() const { return _speculation_offset; } + int speculations_len() const { return _speculations_len; } +#endif + + int orig_pc_offset() const { return _orig_pc_offset; } + + address dumptime_content_start_addr() const { return _dumptime_content_start_addr; } + + uint relocation_data_offset() const { return _relocation_data_offset; } + uint content_offset() const { return _content_offset; } + uint oop_metadata_offset() const { return _oop_metadata_offset; } + uint oop_map_offset() const { return _oop_map_offset; } + uint immutable_data_offset() const { return _immutable_data_offset; } + uint reloc_immediates_offset() const { return _reloc_immediates_offset; } + uint extra_reloc_offset() const { return _extra_reloc_offset; } +}; + class SCCache; class SCCReader { // Concurent per compilation request @@ -348,7 +495,12 @@ class SCCReader { // Concurent per compilation request public: SCCReader(SCCache* cache, SCCEntry* entry, CompileTask* task); + SCCEntry* scc_entry() { return (SCCEntry*)_entry; } + + // convenience method to convert offset in SCCEntry data to its address + const char* addr_of_entry_offset(uint offset_in_entry) const { return addr(_entry->offset() + offset_in_entry); } bool compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler); + bool compile_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler); bool compile_blob(CodeBuffer* buffer, int* pc_offset); Klass* read_klass(const methodHandle& comp_method, bool shared); @@ -360,11 +512,14 @@ class SCCReader { // Concurent per compilation request OopMapSet* read_oop_maps(); bool read_dependencies(Dependencies* dependencies); - jobject read_oop(JavaThread* thread, const methodHandle& comp_method); + oop read_oop(JavaThread* thread, const methodHandle& comp_method); Metadata* read_metadata(const methodHandle& comp_method); bool read_oops(OopRecorder* oop_recorder, ciMethod* target); bool read_metadata(OopRecorder* oop_recorder, ciMethod* target); + bool read_oop_metadata_list(ciMethod* target, GrowableArray &oop_list, GrowableArray &metadata_list, OopRecorder* oop_recorder); + void apply_relocations(SCnmethod* scnm, nmethod* nm, GrowableArray &oop_list, GrowableArray &metadata_list); + void print_on(outputStream* st); }; @@ -415,6 +570,8 @@ class SCCache : public CHeapObj { void clear_lookup_failed() { _lookup_failed = false; } bool lookup_failed() const { return _lookup_failed; } + address reserve_bytes(uint nbytes); + SCCEntry* write_nmethod(const methodHandle& method, int compile_id, int entry_bci, @@ -436,6 +593,8 @@ class SCCache : public CHeapObj { bool has_monitors, bool has_scoped_access); + SCCEntry* write_nmethod_v1(nmethod* nm, int dependencies_size, bool for_preload); + // States: // S >= 0: allow new readers, S readers are currently active // S < 0: no new readers are allowed; (-S-1) readers are currently active @@ -464,6 +623,9 @@ class SCCache : public CHeapObj { bool failed() const { return _failed; } void set_failed() { _failed = true; } + static void copy_bytes(const char* from, address to, uint size); + static bool is_address_in_aot_cache(address p); + uint load_size() const { return _load_size; } uint write_position() const { return _write_position; } @@ -507,15 +669,22 @@ class SCCache : public CHeapObj { bool write_debug_info(DebugInformationRecorder* recorder); bool write_oop_maps(OopMapSet* oop_maps); + bool write_oop_map_set(nmethod* nm); + bool write_nmethod_reloc_immediates(nmethod* nm, GrowableArray& oop_list, GrowableArray& metadata_list); + bool write_nmethod_extra_relocations(SCnmethod* scnm, nmethod* nm, int entry_position); + jobject read_oop(JavaThread* thread, const methodHandle& comp_method); Metadata* read_metadata(const methodHandle& comp_method); bool read_oops(OopRecorder* oop_recorder, ciMethod* target); bool read_metadata(OopRecorder* oop_recorder, ciMethod* target); bool write_oop(jobject& jo); + bool write_oop(oop obj); bool write_oops(OopRecorder* oop_recorder); bool write_metadata(Metadata* m); bool write_metadata(OopRecorder* oop_recorder); + bool write_oops(nmethod* nm); + bool write_metadata(nmethod* nm); static bool load_exception_blob(CodeBuffer* buffer, int* pc_offset); static bool store_exception_blob(CodeBuffer* buffer, int pc_offset); @@ -543,6 +712,8 @@ class SCCache : public CHeapObj { bool has_monitors, bool has_scoped_access); + static SCCEntry* store_nmethod_v1(nmethod* nm, AbstractCompiler* compiler, int dependencies_size, bool for_preload); + static uint store_entries_cnt() { if (is_on_for_write()) { return cache()->_store_entries_cnt; diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index 23f621ffec8..41a82491eeb 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "code/SCCache.hpp" #include "code/codeBlob.hpp" #include "code/codeCache.hpp" #include "code/relocInfo.hpp" @@ -73,6 +74,18 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { return size; } +// This must be consistent with the CodeBlob constructor's layout actions. +unsigned int CodeBlob::allocation_size(SCnmethod* scnm, int header_size) { + unsigned int size = header_size; + size += align_up(scnm->relocation_size(), oopSize); + // align the size to CodeEntryAlignment + size = align_code_offset(size); + size += align_up(scnm->content_size(), oopSize); + size += align_up(scnm->oops_count() * sizeof(oop*), oopSize); + size += align_up(scnm->metadata_count() * sizeof(Metadata*), oopSize); + return size; +} + CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : _oop_maps(nullptr), // will be set by set_oop_maps() call @@ -102,6 +115,31 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size set_oop_maps(oop_maps); } +CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, SCnmethod* scnm, int size, uint16_t header_size) : + _oop_maps(nullptr), // will be set later when oop maps are read from AOT code cache + _name(name), + _size(size), + _relocation_size(align_up(scnm->relocation_size(), oopSize)), + _content_offset(CodeBlob::align_code_offset(header_size + _relocation_size)), + _code_offset(_content_offset + scnm->code_offset()), + _data_offset(_content_offset + scnm->content_size()), + _frame_size(scnm->frame_size()), + _header_size(header_size), + _frame_complete_offset(scnm->frame_complete_offset()), + _kind(kind), + _caller_must_gc_arguments(false) +{ + assert(is_aligned(_size, oopSize), "unaligned size"); + assert(is_aligned(header_size, oopSize), "unaligned size"); + assert(is_aligned(_relocation_size, oopSize), "unaligned size"); + assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size); + assert(code_end() == content_end(), "must be the same - see code_end()"); +#ifdef COMPILER1 + // probably wrong for tiered + assert(_frame_size > -1, "must use frame size"); +#endif // COMPILER1 +} + // Simple CodeBlob used for simple BufferBlob. CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) : _oop_maps(nullptr), @@ -123,7 +161,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade } void CodeBlob::purge() { - if (_oop_maps != nullptr) { + if (_oop_maps != nullptr && !SCCache::is_address_in_aot_cache((address)_oop_maps)) { delete _oop_maps; _oop_maps = nullptr; } diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 8ecd9e21537..1efdaa3fc89 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -38,6 +38,7 @@ class ImmutableOopMap; class ImmutableOopMapSet; class JNIHandleBlock; class OopMapSet; +class SCnmethod; // CodeBlob Types // Used in the CodeCache to assign CodeBlobs to different CodeHeaps @@ -132,6 +133,8 @@ class CodeBlob { CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); + CodeBlob(const char* name, CodeBlobKind kind, SCnmethod* scnmethod, int size, uint16_t header_size); + // Simple CodeBlob used for simple BufferBlob. CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size); @@ -145,6 +148,7 @@ class CodeBlob { // Returns the space needed for CodeBlob static unsigned int allocation_size(CodeBuffer* cb, int header_size); + static unsigned int allocation_size(SCnmethod* scnm, int header_size); static unsigned int align_code_offset(int offset); // Deletion @@ -218,6 +222,7 @@ class CodeBlob { // OopMap for frame ImmutableOopMapSet* oop_maps() const { return _oop_maps; } void set_oop_maps(OopMapSet* p); + void set_oop_maps(ImmutableOopMapSet* p) { _oop_maps = p; } const ImmutableOopMap* oop_map_for_slot(int slot, address return_address) const; const ImmutableOopMap* oop_map_for_return_address(address return_address) const; diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index ada8d0675cf..90acc79f33c 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -1123,6 +1123,31 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method, return nm; } +void nmethod::record_nmethod_dependency() { + // To make dependency checking during class loading fast, record + // the nmethod dependencies in the classes it is dependent on. + // This allows the dependency checking code to simply walk the + // class hierarchy above the loaded class, checking only nmethods + // which are dependent on those classes. The slow way is to + // check every nmethod for dependencies which makes it linear in + // the number of methods compiled. For applications with a lot + // classes the slow way is too slow. + for (Dependencies::DepStream deps(this); deps.next(); ) { + if (deps.type() == Dependencies::call_site_target_value) { + // CallSite dependencies are managed on per-CallSite instance basis. + oop call_site = deps.argument_oop(0); + MethodHandles::add_dependent_nmethod(call_site, this); + } else { + InstanceKlass* ik = deps.context_type(); + if (ik == nullptr) { + continue; // ignore things like evol_method + } + // record this nmethod as dependent on this klass + ik->add_dependent_nmethod(this); + } + } +} + nmethod* nmethod::new_nmethod(const methodHandle& method, int compile_id, int entry_bci, @@ -1190,29 +1215,8 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, ); if (nm != nullptr) { - // To make dependency checking during class loading fast, record - // the nmethod dependencies in the classes it is dependent on. - // This allows the dependency checking code to simply walk the - // class hierarchy above the loaded class, checking only nmethods - // which are dependent on those classes. The slow way is to - // check every nmethod for dependencies which makes it linear in - // the number of methods compiled. For applications with a lot - // classes the slow way is too slow. - for (Dependencies::DepStream deps(nm); deps.next(); ) { - if (deps.type() == Dependencies::call_site_target_value) { - // CallSite dependencies are managed on per-CallSite instance basis. - oop call_site = deps.argument_oop(0); - MethodHandles::add_dependent_nmethod(call_site, nm); - } else { - InstanceKlass* ik = deps.context_type(); - if (ik == nullptr) { - continue; // ignore things like evol_method - } - // record this nmethod as dependent on this klass - ik->add_dependent_nmethod(nm); - } - } - NOT_PRODUCT(if (nm != nullptr) note_java_nmethod(nm)); + nm->record_nmethod_dependency(); + NOT_PRODUCT(note_java_nmethod(nm)); } } // Do verification and logging outside CodeCache_lock. @@ -1236,6 +1240,164 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, return nm; } +nmethod* nmethod::new_nmethod(const methodHandle& method, + AbstractCompiler* compiler, + int compile_id, + CompLevel comp_level, + int entry_bci, + bool preload, + GrowableArray& oop_list, + GrowableArray& metadata_list, + GrowableArray& reloc_imm_oop_list, + GrowableArray& reloc_imm_metadata_list, + SCCReader* scc_reader, + SCnmethod* scnm) +{ + nmethod* nm = nullptr; + int nmethod_size = CodeBlob::allocation_size(scnm, sizeof(nmethod)); + // create nmethod + { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + nm = new (nmethod_size, comp_level) + nmethod(method(), + nmethod_size, + compiler, + compile_id, + comp_level, + entry_bci, + preload, + oop_list, + metadata_list, + reloc_imm_oop_list, + reloc_imm_metadata_list, + scc_reader, + scnm); + if (nm != nullptr) { + nm->record_nmethod_dependency(); + NOT_PRODUCT(note_java_nmethod(nm)); + } + } + // Do verification and logging outside CodeCache_lock. + if (nm != nullptr) { +#ifdef ASSERT + LogTarget(Debug, scc, nmethod) log; + if (log.is_enabled()) { + LogStream out(log); + out.print_cr("== new_nmethod 2"); + FlagSetting fs(PrintRelocations, true); + nm->print(&out); + nm->decode(&out); + } +#endif + // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. + DEBUG_ONLY(nm->verify();) + nm->log_new_nmethod(); + } + return nm; +} + +nmethod::nmethod(Method* method, + int nmethod_size, + AbstractCompiler* compiler, + int compile_id, + CompLevel comp_level, + int entry_bci, + bool preload, + GrowableArray& oop_list, + GrowableArray& metadata_list, + GrowableArray& reloc_imm_oop_list, + GrowableArray& reloc_imm_metadata_list, + SCCReader* scc_reader, + SCnmethod* scnm) + : CodeBlob("nmethod", CodeBlobKind::Nmethod, scnm, nmethod_size, sizeof(nmethod)), + _deoptimization_generation(0), + _gc_epoch(CodeCache::gc_epoch()), + _method(method), + _osr_link(nullptr) +{ + _exception_cache = nullptr; + _gc_data = nullptr; + _oops_do_mark_link = nullptr; + _compiled_ic_data = nullptr; // set in post_init + _is_unloading_state = 0; + _state = not_installed; + _osr_entry_point = nullptr; // OSR methods are not in AOT cache + _entry_offset = scnm->entry_offset(); + _verified_entry_offset = scnm->verified_entry_offset(); + _entry_bci = entry_bci; + + _immutable_data_size = scnm->immutable_data_size(); + _skipped_instructions_size = scnm->skipped_instructions_size(); + _stub_offset = content_offset() + scnm->stub_offset(); + _exception_offset = content_offset() + scnm->exception_offset(); + _deopt_handler_offset = content_offset() + scnm->deopt_handler_offset(); + _deopt_mh_handler_offset = content_offset() + scnm->deopt_mh_handler_offset(); + _unwind_handler_offset = scnm->unwind_handler_offset(); + + _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots(); + + _metadata_offset = scnm->metadata_offset(); +#if INCLUDE_JVMCI + _jvmci_data_offset = scnm->jvmci_data_offset(); +#endif + + _nul_chk_table_offset = scnm->nul_chk_table_offset(); + _handler_table_offset = scnm->handler_table_offset(); + _scopes_pcs_offset = scnm->scopes_pcs_offset(); + _scopes_data_offset = scnm->scopes_data_offset(); +#if INCLUDE_JVMCI + _speculations_offset = scnm->speculation_offset(); + DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(scnm->speculations_len(), oopSize); ) +#else + DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); ) +#endif + assert(immutable_data_end_offset <= _immutable_data_size, "wrong read-only data size: %d > %d", + immutable_data_end_offset, _immutable_data_size); + + _orig_pc_offset = scnm->orig_pc_offset(); + _compile_id = compile_id; + _comp_level = comp_level; + _compiler_type = compiler->type(); + + set_has_unsafe_access(scnm->has_unsafe_access()); + set_has_method_handle_invokes(scnm->has_method_handle_invokes()); + set_has_wide_vectors(scnm->has_wide_vectors()); + set_has_monitors(scnm->has_monitors()); + set_has_scoped_access(scnm->has_scoped_access()); + set_has_clinit_barriers(scnm->has_clinit_barriers()); + + set_preloaded(preload); + + SCCEntry* scc_entry = scc_reader->scc_entry(); + const char* reloc_addr = scc_reader->addr_of_entry_offset(scnm->relocation_data_offset()); + SCCache::copy_bytes(reloc_addr, (address)relocation_begin(), scnm->relocation_size()); + const char* content_addr = scc_reader->addr_of_entry_offset(scnm->content_offset()); + SCCache::copy_bytes(content_addr, content_begin(), scnm->content_size()); + + ImmutableOopMapSet* oop_map_set = (ImmutableOopMapSet*)scc_reader->addr_of_entry_offset(scnm->oop_map_offset()); + set_oop_maps(oop_map_set); + address immutable_data = (address)scc_reader->addr_of_entry_offset(scnm->immutable_data_offset()); + set_immutable_data(immutable_data); + + copy_values(&oop_list); + copy_values(&metadata_list); + + scc_reader->apply_relocations(scnm, this, reloc_imm_oop_list, reloc_imm_metadata_list); + + // Create cache after PcDesc data is copied - it will be used to initialize cache + _pc_desc_container = new PcDescContainer(scopes_pcs_begin()); + + _scc_entry = scc_entry; + + post_init(); + + // we use the information of entry points to find out if a method is + // static or non static + assert(compiler->is_c2() || compiler->is_jvmci() || + _method->is_static() == (entry_point() == verified_entry_point()), + " entry points must be same for static methods and vice versa"); +} + // Fill in default values for various fields void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) { // avoid uninitialized fields, even for short time periods @@ -1755,6 +1917,14 @@ inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { } } +void nmethod::copy_values(GrowableArray* array) { + int length = array->length(); + assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough"); + oop* dest = oops_begin(); + for (int index = 0 ; index < length; index++) { + dest[index] = array->at(index); + } +} // Have to have the same name because it's called by a template void nmethod::copy_values(GrowableArray* array) { @@ -1802,6 +1972,25 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im } } +void nmethod::create_reloc_immediates_list(GrowableArray& oop_list, GrowableArray& metadata_list) { + RelocIterator iter(this); + while (iter.next()) { + if (iter.type() == relocInfo::oop_type) { + oop_Relocation* reloc = iter.oop_reloc(); + if (reloc->oop_is_immediate()) { + oop dest = reloc->oop_value(); + oop_list.append(dest); + } + } else if (iter.type() == relocInfo::metadata_type) { + metadata_Relocation* reloc = iter.metadata_reloc(); + if (reloc->metadata_is_immediate()) { + Metadata* m = reloc->metadata_value(); + metadata_list.append(m); + } + } + } +} + static void install_post_call_nop_displacement(nmethod* nm, address pc) { NativePostCallNop* nop = nativePostCallNop_at((address) pc); intptr_t cbaddr = (intptr_t) nm; @@ -2166,9 +2355,11 @@ void nmethod::purge(bool unregister_nmethod) { if (_pc_desc_container != nullptr) { delete _pc_desc_container; } - delete[] _compiled_ic_data; + if (_compiled_ic_data != nullptr) { + delete[] _compiled_ic_data; + } - if (_immutable_data != data_end()) { + if (_immutable_data != data_end() && !SCCache::is_address_in_aot_cache((address)_oop_maps)) { os::free(_immutable_data); _immutable_data = data_end(); // Valid not null address } @@ -2962,35 +3153,39 @@ void nmethod::verify() { fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this)); } - for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { - if (! p->verify(this)) { - tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this)); + // Verification can triggered during shutdown after SCCache is closed. + // If the Scopes data is in the SCCache, then we should avoid verification during shutdown. + if (!UseNewCode2 || (!is_scc() || SCCache::is_on())) { + for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { + if (! p->verify(this)) { + tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this)); + } } - } #ifdef ASSERT #if INCLUDE_JVMCI - { - // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap - ImmutableOopMapSet* oms = oop_maps(); - ImplicitExceptionTable implicit_table(this); - for (uint i = 0; i < implicit_table.len(); i++) { - int exec_offset = (int) implicit_table.get_exec_offset(i); - if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) { - assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc"); - bool found = false; - for (int i = 0, imax = oms->count(); i < imax; i++) { - if (oms->pair_at(i)->pc_offset() == exec_offset) { - found = true; - break; - } - } - assert(found, "missing oopmap"); + { + // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap + ImmutableOopMapSet* oms = oop_maps(); + ImplicitExceptionTable implicit_table(this); + for (uint i = 0; i < implicit_table.len(); i++) { + int exec_offset = (int) implicit_table.get_exec_offset(i); + if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) { + assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc"); + bool found = false; + for (int i = 0, imax = oms->count(); i < imax; i++) { + if (oms->pair_at(i)->pc_offset() == exec_offset) { + found = true; + break; + } + } + assert(found, "missing oopmap"); + } } } - } #endif #endif + } VerifyOopsClosure voc(this); oops_do(&voc); @@ -2999,7 +3194,9 @@ void nmethod::verify() { assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT, nm->method()->external_name(), p2i(_oops_do_mark_link)); - verify_scopes(); + if (!is_scc() || SCCache::is_on()) { + verify_scopes(); + } CompiledICLocker nm_verify(this); VerifyMetadataClosure vmc; diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 16533f6958b..0b498b94f9d 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -45,7 +45,9 @@ class JvmtiThreadState; class MetadataClosure; class NativeCallWrapper; class OopIterateClosure; +class SCCReader; class SCCEntry; +class SCnmethod; class ScopeDesc; class xmlStream; @@ -305,6 +307,21 @@ class nmethod : public CodeBlob { // Post initialization void post_init(); + // For nmethods loaded from AOT code cache + nmethod(Method* method, + int nmethod_size, + AbstractCompiler* compiler, + int compile_id, + CompLevel comp_level, + int entry_bci, + bool preload, + GrowableArray& oop_list, + GrowableArray& metadata_list, + GrowableArray& reloc_imm_oop_list, + GrowableArray& reloc_imm_metadata_list, + SCCReader* scc_reader, + SCnmethod* scnm); + // For native wrappers nmethod(Method* method, CompilerType type, @@ -479,7 +496,22 @@ class nmethod : public CodeBlob { // transitions). void oops_do_set_strong_done(nmethod* old_head); + void record_nmethod_dependency(); public: + // create nmethod using data from AOT code cache + static nmethod* new_nmethod(const methodHandle& method, + AbstractCompiler* compiler, + int compile_id, + CompLevel comp_level, + int entry_bci, + bool preload, + GrowableArray& oop_list, + GrowableArray& metadata_list, + GrowableArray& reloc_imm_oop_list, + GrowableArray& reloc_imm_metadata_list, + SCCReader* scc_reader, + SCnmethod* scnm); + // create nmethod with entry_bci static nmethod* new_nmethod(const methodHandle& method, int compile_id, @@ -515,6 +547,7 @@ class nmethod : public CodeBlob { int exception_handler = -1); Method* method () const { return _method; } + uint16_t entry_bci () const { return _entry_bci; } bool is_native_method() const { return _method != nullptr && _method->is_native(); } bool is_java_method () const { return _method != nullptr && !_method->is_native(); } bool is_osr_method () const { return _entry_bci != InvocationEntryBci; } @@ -541,7 +574,7 @@ class nmethod : public CodeBlob { address stub_end () const { return data_begin() ; } address exception_begin () const { return header_begin() + _exception_offset ; } address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; } - address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; } + address deopt_mh_handler_begin() const { return _deopt_mh_handler_offset != -1 ? (header_begin() + _deopt_mh_handler_offset) : nullptr; } address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; } // mutable data @@ -557,6 +590,7 @@ class nmethod : public CodeBlob { #endif // immutable data + void set_immutable_data(address data) { _immutable_data = data; } address immutable_data_begin () const { return _immutable_data; } address immutable_data_end () const { return _immutable_data + _immutable_data_size ; } address dependencies_begin () const { return _immutable_data; } @@ -728,6 +762,7 @@ class nmethod : public CodeBlob { return &metadata_begin()[index - 1]; } + void copy_values(GrowableArray* array); void copy_values(GrowableArray* oops); void copy_values(GrowableArray* metadata); void copy_values(GrowableArray
* metadata) {} // Nothing to do @@ -744,6 +779,8 @@ class nmethod : public CodeBlob { void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); } + void create_reloc_immediates_list(GrowableArray& oop_list, GrowableArray& metadata_list); + bool is_at_poll_return(address pc); bool is_at_poll_or_poll_return(address pc); diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index ec600f56078..fd8ae30096d 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -149,7 +149,6 @@ void RelocIterator::initialize(nmethod* nm, address begin, address limit) { set_limits(begin, limit); } - RelocIterator::RelocIterator(CodeSection* cs, address begin, address limit) { initialize_misc(); assert(((cs->locs_start() != nullptr) && (cs->locs_end() != nullptr)), "valid start and end pointer"); @@ -772,6 +771,14 @@ void internal_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, set_value(target); } +void internal_word_Relocation::fix_relocation_after_aot_load(address orig_base_addr, address current_base_addr) { + address target = _target; + if (target == nullptr) { + target = this->target(); + target = current_base_addr + (target - orig_base_addr); + } + set_value(target); +} address internal_word_Relocation::target() { address target = _target; diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp index 7919e9ee980..5bb1a093fb1 100644 --- a/src/hotspot/share/code/relocInfo.hpp +++ b/src/hotspot/share/code/relocInfo.hpp @@ -1397,6 +1397,8 @@ class internal_word_Relocation : public DataRelocation { void unpack_data() override; void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) override; + void fix_relocation_after_aot_load(address orig_base_addr, address current_base_addr); + address target(); // if _target==nullptr, fetch addr from code stream int section() { return _section; } address value() override { return target(); } diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 48ff24b7a03..d5ea7719b09 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -2622,7 +2622,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { whitebox_lock_compilation(); } if (StoreCachedCode && task->is_precompiled()) { - install_code = false; // not suitable in the current context + if (!UseNewCode2) { + install_code = false; // not suitable in the current context + } } comp->compile_method(&ci_env, target, osr_bci, install_code, directive); @@ -2638,7 +2640,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { DirectivesStack::release(directive); - if (!ci_env.failing() && !task->is_success() && install_code) { + if (!ci_env.failing() && !task->is_success() && !task->is_precompiled()) { assert(ci_env.failure_reason() != nullptr, "expect failure reason"); assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); // The compiler elected, without comment, not to register a result. diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp index 7ce0f2479f6..e7d01762925 100644 --- a/src/hotspot/share/compiler/compileTask.cpp +++ b/src/hotspot/share/compiler/compileTask.cpp @@ -123,6 +123,8 @@ void CompileTask::initialize(int compile_id, _time_queued = 0; _time_started = 0; _time_finished = 0; + _aot_load_start = 0; + _aot_load_finish = 0; _compile_reason = compile_reason; _nm_content_size = 0; _nm_insts_size = 0; @@ -249,7 +251,8 @@ void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, i bool is_osr_method, int osr_bci, bool is_blocking, bool is_scc, bool is_preload, const char* compiler_name, const char* msg, bool short_form, bool cr, - jlong time_created, jlong time_queued, jlong time_started, jlong time_finished) { + jlong time_created, jlong time_queued, jlong time_started, jlong time_finished, + jlong aot_load_start, jlong aot_load_finish) { if (!short_form) { { stringStream ss; @@ -277,6 +280,13 @@ void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, i } st->print("%7s ", ss.freeze()); } + { // Time to load from AOT code cache + stringStream ss; + if (aot_load_start != 0 && aot_load_finish != 0) { + ss.print("A%.1f", TimeHelper::counter_to_millis(aot_load_finish - aot_load_start)); + } + st->print("%7s ", ss.freeze()); + } st->print(" "); } @@ -353,7 +363,7 @@ void CompileTask::print_inline_indent(int inline_level, outputStream* st) { void CompileTask::print(outputStream* st, const char* msg, bool short_form, bool cr) { bool is_osr_method = osr_bci() != InvocationEntryBci; print_impl(st, is_unloaded() ? nullptr : method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), is_scc(), preload(), - compiler()->name(), msg, short_form, cr, _time_created, _time_queued, _time_started, _time_finished); + compiler()->name(), msg, short_form, cr, _time_created, _time_queued, _time_started, _time_finished, _aot_load_start, _aot_load_finish); } // ------------------------------------------------------------------ diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp index 64b6d265e90..25f868e7108 100644 --- a/src/hotspot/share/compiler/compileTask.hpp +++ b/src/hotspot/share/compiler/compileTask.hpp @@ -125,6 +125,8 @@ class CompileTask : public CHeapObj { jlong _time_queued; // time when task was enqueued jlong _time_started; // time when compilation started jlong _time_finished; // time when compilation finished + jlong _aot_load_start; + jlong _aot_load_finish; Method* _hot_method; // which method actually triggered this task jobject _hot_method_holder; int _hot_count; // information about its invocation counter @@ -215,7 +217,8 @@ class CompileTask : public CHeapObj { void mark_queued(jlong time) { _time_queued = time; } void mark_started(jlong time) { _time_started = time; } void mark_finished(jlong time) { _time_finished = time; } - + void mark_aot_load_start(jlong time) { _aot_load_start = time; } + void mark_aot_load_finish(jlong time) { _aot_load_finish = time; } int comp_level() { return _comp_level;} void set_comp_level(int comp_level) { _comp_level = comp_level;} @@ -253,7 +256,8 @@ class CompileTask : public CHeapObj { bool is_scc = false, bool is_preload = false, const char* compiler_name = nullptr, const char* msg = nullptr, bool short_form = false, bool cr = true, - jlong time_created = 0, jlong time_queued = 0, jlong time_started = 0, jlong time_finished = 0); + jlong time_created = 0, jlong time_queued = 0, jlong time_started = 0, jlong time_finished = 0, + jlong aot_load_start = 0, jlong aot_load_finish = 0); public: void print(outputStream* st = tty, const char* msg = nullptr, bool short_form = false, bool cr = true);