From 946196583bcd4a01625c21dbdb4321344379881c Mon Sep 17 00:00:00 2001 From: zifeihan Date: Tue, 24 Oct 2023 10:14:02 +0800 Subject: [PATCH] RISCV: Removed dead code in interpreters --- src/hotspot/cpu/riscv/interp_masm_riscv.cpp | 91 +----- src/hotspot/cpu/riscv/interp_masm_riscv.hpp | 6 +- .../templateInterpreterGenerator_riscv.cpp | 7 +- src/hotspot/cpu/riscv/templateTable_riscv.cpp | 298 ++++++++++-------- src/hotspot/cpu/riscv/templateTable_riscv.hpp | 9 +- 5 files changed, 191 insertions(+), 220 deletions(-) diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 70c6758603a7c..e127c3dc3e6ac 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -38,6 +38,7 @@ #include "oops/methodData.hpp" #include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" +#include "oops/resolvedMethodEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" @@ -229,71 +230,6 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, } } -// Return -// Rindex: index into constant pool -// Rcache: address of cache entry - ConstantPoolCache::base_offset() -// -// A caller must add ConstantPoolCache::base_offset() to Rcache to get -// the true address of the cache entry. -// -void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, - Register index, - int bcp_offset, - size_t index_size) { - assert_different_registers(cache, index); - assert_different_registers(cache, xcpool); - // register "cache" is trashed in next shadd, so lets use it as a temporary register - get_cache_index_at_bcp(index, cache, bcp_offset, index_size); - assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); - // Convert from field index to ConstantPoolCacheEntry - // riscv already has the cache in xcpool so there is no need to - // install it in cache. Instead we pre-add the indexed offset to - // xcpool and return it in cache. All clients of this method need to - // be modified accordingly. - shadd(cache, index, xcpool, cache, 5); -} - - -void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, - Register index, - Register bytecode, - int byte_no, - int bcp_offset, - size_t index_size) { - get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); - // We use a 32-bit load here since the layout of 64-bit words on - // little-endian machines allow us that. - // n.b. unlike x86 cache already includes the index offset - la(bytecode, Address(cache, - ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::indices_offset())); - membar(MacroAssembler::AnyAny); - lwu(bytecode, bytecode); - membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); - const int shift_count = (1 + byte_no) * BitsPerByte; - slli(bytecode, bytecode, XLEN - (shift_count + BitsPerByte)); - srli(bytecode, bytecode, XLEN - BitsPerByte); -} - -void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, - Register tmp, - int bcp_offset, - size_t index_size) { - assert_different_registers(cache, tmp); - // register "cache" is trashed in next ld, so lets use it as a temporary register - get_cache_index_at_bcp(tmp, cache, bcp_offset, index_size); - assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); - // Convert from field index to ConstantPoolCacheEntry index - // and from word offset to byte offset - assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, - "else change next line"); - ld(cache, Address(fp, frame::interpreter_frame_cache_offset * wordSize)); - // skip past the header - add(cache, cache, in_bytes(ConstantPoolCache::base_offset())); - // construct pointer to cache entry - shadd(cache, tmp, cache, tmp, 2 + LogBytesPerWord); -} - // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index, Register tmp) { @@ -319,18 +255,6 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset( ld(klass, Address(klass, Array::base_offset_in_bytes())); } -void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no, - Register method, - Register cache) { - const int method_offset = in_bytes( - ConstantPoolCache::base_offset() + - ((byte_no == TemplateTable::f2_byte) - ? ConstantPoolCacheEntry::f2_offset() - : ConstantPoolCacheEntry::f1_offset())); - - ld(method, Address(cache, method_offset)); // get f1 Method* -} - // Generate a subtype check: branch to ok_is_subtype if sub_klass is a // subtype of super_klass. // @@ -1992,6 +1916,19 @@ void InterpreterMacroAssembler::get_method_counters(Register method, bind(has_counters); } +void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) { + // Get index out of bytecode pointer + get_cache_index_at_bcp(index, cache, bcp_offset, sizeof(u2)); + mv(cache, sizeof(ResolvedMethodEntry)); + mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry) + + // Get address of field entries array + ld(cache, Address(xcpool, ConstantPoolCache::method_entries_offset())); + add(cache, cache, Array::base_offset_in_bytes()); + add(cache, cache, index); + la(cache, Address(cache, 0)); +} + #ifdef ASSERT void InterpreterMacroAssembler::verify_access_flags(Register access_flags, uint32_t flag, const char* msg, bool stop_by_hit) { diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp index 8f447cdba43ce..d3959d68d9cfb 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp @@ -136,9 +136,6 @@ class InterpreterMacroAssembler: public MacroAssembler { } void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); - void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2)); - void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); - void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_method_counters(Register method, Register mcs, Label& skip); @@ -148,8 +145,6 @@ class InterpreterMacroAssembler: public MacroAssembler { // Load cpool->resolved_klass_at(index). void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp); - void load_resolved_method_at_index(int byte_no, Register method, Register cache); - void pop_ptr(Register r = x10); void pop_i(Register r = x10); void pop_l(Register r = x10); @@ -302,6 +297,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void load_resolved_indy_entry(Register cache, Register index); void load_field_entry(Register cache, Register index, int bcp_offset = 1); + void load_method_entry(Register cache, Register index, int bcp_offset = 1); #ifdef ASSERT void verify_access_flags(Register access_flags, uint32_t flag, diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp index 7402cb4a54ad8..860a7afb5e332 100644 --- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp @@ -41,6 +41,7 @@ #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "oops/resolvedIndyEntry.hpp" +#include "oops/resolvedMethodEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" @@ -454,9 +455,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, __ shadd(esp, cache, esp, t0, 3); } else { // Pop N words from the stack - __ get_cache_and_index_at_bcp(cache, index, 1, index_size); - __ ld(cache, Address(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); - __ andi(cache, cache, ConstantPoolCacheEntry::parameter_size_mask); + assert(index_size == sizeof(u2), "Can only be u2"); + __ load_method_entry(cache, index); + __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); __ shadd(esp, cache, esp, t0, 3); } diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index a0b3f037ab2a0..5b6abf37f80a5 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -40,6 +40,7 @@ #include "oops/oop.inline.hpp" #include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" +#include "oops/resolvedMethodEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/frame.inline.hpp" @@ -471,14 +472,14 @@ void TemplateTable::condy_helper(Label& Done) { // VMr = obj = base address to find primitive value to push // VMr2 = flags = (tos, off) using format of CPCE::_flags __ mv(off, flags); - __ mv(t0, ConstantPoolCacheEntry::field_index_mask); + __ mv(t0, ConstantPoolCache::field_index_mask); __ andrw(off, off, t0); __ add(off, obj, off); const Address field(off, 0); // base + R---->base + offset - __ slli(flags, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + ConstantPoolCacheEntry::tos_state_bits)); - __ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); // (1 << 5) - 4 --> 28~31==> flags:0~3 + __ slli(flags, flags, XLEN - (ConstantPoolCache::tos_state_shift + ConstantPoolCache::tos_state_bits)); + __ srli(flags, flags, XLEN - ConstantPoolCache::tos_state_bits); // (1 << 5) - 4 --> 28~31==> flags:0~3 switch (bytecode()) { case Bytecodes::_ldc: // fall through @@ -2168,19 +2169,32 @@ void TemplateTable::_return(TosState state) { // volatile-stores although it could just as well go before // volatile-loads. -void TemplateTable::resolve_cache_and_index(int byte_no, - Register Rcache, - Register index, - size_t index_size) { +void TemplateTable::resolve_cache_and_index_for_method(int byte_no, + Register Rcache, + Register index) { const Register temp = x9; assert_different_registers(Rcache, index, temp); + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); Label resolved, clinit_barrier_slow; Bytecodes::Code code = bytecode(); + __ load_method_entry(Rcache, index); + switch(byte_no) { + case f1_byte: + __ add(temp, Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset())); + __ la(temp, Address(temp, 0)); + break; + case f2_byte: + __ add(temp, Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset())); + __ la(temp, Address(temp, 0)); + break; + } + // Load-acquire the bytecode to match store-release in InterpreterRuntime + __ membar(MacroAssembler::AnyAny); + __ lbu(temp, Address(temp, 0)); + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); - assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); - __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); __ mv(t0, (int) code); __ beq(temp, t0, resolved); @@ -2193,14 +2207,14 @@ void TemplateTable::resolve_cache_and_index(int byte_no, __ call_VM(noreg, entry, temp); // Update registers with resolved info - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); + __ load_method_entry(Rcache, index); // n.b. unlike x86 Rcache is now rcpool plus the indexed offset // so all clients ofthis method must be modified accordingly __ bind(resolved); // Class initialization barrier for static methods if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { - __ load_resolved_method_at_index(byte_no, temp, Rcache); + __ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset()))); __ load_method_holder(temp, temp); __ clinit_barrier(temp, t0, nullptr, &clinit_barrier_slow); } @@ -2271,32 +2285,111 @@ void TemplateTable::load_resolved_field_entry(Register obj, } } -// The Rcache and index registers must be set before call -// n.b unlike x86 cache already includes the index offset -void TemplateTable::load_field_cp_cache_entry(Register obj, - Register cache, - Register index, - Register off, - Register flags, - bool is_static = false) { - assert_different_registers(cache, index, flags, off); +void TemplateTable::load_resolved_method_entry_special_or_static(Register cache, + Register method, + Register flags) { - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - // Field offset - __ ld(off, Address(cache, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f2_offset()))); - // Flags - __ lwu(flags, Address(cache, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); + // setup registers + const Register index = x14; + assert_different_registers(method, flags); + assert_different_registers(method, cache, index); - // klass overwrite register - if (is_static) { - __ ld(obj, Address(cache, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f1_offset()))); - const int mirror_offset = in_bytes(Klass::java_mirror_offset()); - __ ld(obj, Address(obj, mirror_offset)); - __ resolve_oop_handle(obj, x15, t1); - } + // determine constant pool cache field offsets + resolve_cache_and_index_for_method(f1_byte, cache, index); + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); + __ ld(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); +} + +void TemplateTable::load_resolved_method_entry_handle(Register cache, + Register method, + Register ref_index, + Register flags) { + // setup registers + const Register index = x14; + assert_different_registers(method, flags); + assert_different_registers(method, cache, index); + + // determine constant pool cache field offsets + resolve_cache_and_index_for_method(f1_byte, cache, index); + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); + + // invokehandle uses an index into the resolved references array + __ ld(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); + __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset()))); + + // maybe push appendix to arguments (just before return address) + Label L_no_push; + __ test_bit(t0, flags, ResolvedMethodEntry::has_appendix_shift); + __ beqz(t0, L_no_push); + // Push the appendix as a trailing parameter. + // This must be done before we get the receiver, + // since the parameter_size includes it. + __ push_reg(x9); + __ mv(x9, ref_index); + __ load_resolved_reference_at_index(ref_index, x9); + __ pop_reg(x9); + __ push_reg(ref_index); // push appendix (MethodType, CallSite, etc.) + __ bind(L_no_push); +} + +void TemplateTable::load_resolved_method_entry_interface(Register cache, + Register klass, + Register method_or_table_index, + Register flags) { + // setup registers + const Register index = x14; + assert_different_registers(method_or_table_index, cache, flags); + + // determine constant pool cache field offsets + resolve_cache_and_index_for_method(f1_byte, cache, index); + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); + + // Invokeinterface can behave in different ways: + // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will + // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or + // vtable index is placed in the register. + // Otherwise, the registers will be populated with the klass and method. + + Label NotVirtual; Label NotVFinal; Label Done; + __ test_bit(t0, flags, ResolvedMethodEntry::is_forced_virtual_shift); + __ beqz(t0, NotVirtual); + __ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift); + __ beqz(t0, NotVFinal); + __ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); + __ j(Done); + + __ bind(NotVFinal); + __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); + __ j(Done); + + __ bind(NotVirtual); + __ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); + __ ld(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset()))); + __ bind(Done); +} + +void TemplateTable::load_resolved_method_entry_virtual(Register cache, + Register method_or_table_index, + Register flags) { + // setup registers + const Register index = x14; + assert_different_registers(method_or_table_index, flags); + assert_different_registers(method_or_table_index, cache, index); + + // determine constant pool cache field offsets + resolve_cache_and_index_for_method(f2_byte, cache, index); + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); + + // method_or_table_index can either be an itable index or a method depending on the virtual final flag + Label NotVFinal; Label Done; + __ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift); + __ beqz(t0, NotVFinal); + __ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); + __ j(Done); + + __ bind(NotVFinal); + __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); + __ bind(Done); } // The xmethod register is input and overwritten to be the adapter method for the @@ -2369,42 +2462,6 @@ void TemplateTable::load_invokedynamic_entry(Register method) { } } -void TemplateTable::load_invoke_cp_cache_entry(int byte_no, - Register method, - Register itable_index, - Register flags, - bool is_invokevirtual, - bool is_invokevfinal, /*unused*/ - bool is_invokedynamic /*unused*/) { - // setup registers - const Register cache = t1; - const Register index = x14; - assert_different_registers(method, flags); - assert_different_registers(method, cache, index); - assert_different_registers(itable_index, flags); - assert_different_registers(itable_index, cache, index); - // determine constant pool cache field offsets - assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); - const int method_offset = in_bytes(ConstantPoolCache::base_offset() + - (is_invokevirtual ? - ConstantPoolCacheEntry::f2_offset() : - ConstantPoolCacheEntry::f1_offset())); - const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()); - // access constant pool cache fields - const int index_offset = in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()); - - size_t index_size = sizeof(u2); - resolve_cache_and_index(byte_no, cache, index, index_size); - __ ld(method, Address(cache, method_offset)); - - if (itable_index != noreg) { - __ ld(itable_index, Address(cache, index_offset)); - } - __ lwu(flags, Address(cache, flags_offset)); -} - // The registers cache and index expected to be set before call. // Correct values of the cache and index registers are preserved. void TemplateTable::jvmti_post_field_access(Register cache, Register index, @@ -3181,67 +3238,32 @@ void TemplateTable::fast_xaccess(TosState state) { //----------------------------------------------------------------------------- // Calls -void TemplateTable::prepare_invoke(int byte_no, - Register method, // linked method (or i-klass) - Register index, // itable index, MethodType, etc. - Register recv, // if caller wants to see it +void TemplateTable::prepare_invoke(Register recv, // if caller wants to see it Register flags // if caller wants to test it ) { - // determine flags - const Bytecodes::Code code = bytecode(); - const bool is_invokeinterface = code == Bytecodes::_invokeinterface; - const bool is_invokedynamic = code == Bytecodes::_invokedynamic; - const bool is_invokehandle = code == Bytecodes::_invokehandle; - const bool is_invokevirtual = code == Bytecodes::_invokevirtual; - const bool is_invokespecial = code == Bytecodes::_invokespecial; - const bool load_receiver = (recv != noreg); - const bool save_flags = (flags != noreg); - assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); - assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); - assert(flags == noreg || flags == x13, ""); - assert(recv == noreg || recv == x12, ""); - - // setup registers & access constant pool cache - if (recv == noreg) { - recv = x12; - } - if (flags == noreg) { - flags = x13; - } - assert_different_registers(method, index, recv, flags); + + const Register cache = x12; + Bytecodes::Code code = bytecode(); + const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic); + assert_different_registers(recv, flags); // save 'interpreter return address' __ save_bcp(); - load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); - - // maybe push appendix to arguments (just before return address) - if (is_invokehandle) { - Label L_no_push; - __ test_bit(t0, flags, ConstantPoolCacheEntry::has_appendix_shift); - __ beqz(t0, L_no_push); - // Push the appendix as a trailing parameter. - // This must be done before we get the receiver, - // since the parameter_size includes it. - __ push_reg(x9); - __ mv(x9, index); - __ load_resolved_reference_at_index(index, x9); - __ pop_reg(x9); - __ push_reg(index); // push appendix (MethodType, CallSite, etc.) - __ bind(L_no_push); - } + // Load TOS state for later + __ load_unsigned_byte(t1, Address(cache, in_bytes(ResolvedMethodEntry::type_offset()))); // load receiver if needed (note: no return address pushed yet) if (load_receiver) { - __ andi(recv, flags, ConstantPoolCacheEntry::parameter_size_mask); // parameter_size_mask = 1 << 8 + __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); __ shadd(t0, recv, esp, t0, 3); __ ld(recv, Address(t0, -Interpreter::expr_offset_in_bytes(1))); __ verify_oop(recv); } // compute return type - __ slli(t1, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + ConstantPoolCacheEntry::tos_state_bits)); - __ srli(t1, t1, XLEN - ConstantPoolCacheEntry::tos_state_bits); // (1 << 5) - 4 --> 28~31==> t1:0~3 + // x86 uses a shift and mask or wings it with a shift plus assert + // the mask is not needed. aarch64 just uses bitfield extract // load return address { @@ -3259,7 +3281,7 @@ void TemplateTable::invokevirtual_helper(Register index, assert_different_registers(index, recv, x10, x13); // Test for an invoke of a final method Label notFinal; - __ test_bit(t0, flags, ConstantPoolCacheEntry::is_vfinal_shift); + __ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift); __ beqz(t0, notFinal); const Register method = index; // method must be xmethod @@ -3295,7 +3317,10 @@ void TemplateTable::invokevirtual(int byte_no) { transition(vtos, vtos); assert(byte_no == f2_byte, "use this argument"); - prepare_invoke(byte_no, xmethod, noreg, x12, x13); + load_resolved_method_entry_virtual(x12, // ResolvedMethodEntry* + xmethod, // Method* or itable index + x13); // flags + prepare_invoke(x12, x13); // recv, flags // xmethod: index (actually a Method*) // x12: receiver @@ -3308,8 +3333,11 @@ void TemplateTable::invokespecial(int byte_no) { transition(vtos, vtos); assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, xmethod, noreg, // get f1 Method* - x12); // get receiver also for null check + load_resolved_method_entry_special_or_static(x12, // ResolvedMethodEntry* + xmethod, // Method* + x13); // flags + prepare_invoke(x12, x13); // get receiver also for null check and flags + __ verify_oop(x12); __ null_check(x12); // do the call @@ -3322,7 +3350,11 @@ void TemplateTable::invokestatic(int byte_no) { transition(vtos, vtos); assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, xmethod); // get f1 Method* + load_resolved_method_entry_special_or_static(x12, // ResolvedMethodEntry* + xmethod, // Method* + x13); // flags + prepare_invoke(x12, x13); // get receiver also for null check and flags + // do the call __ profile_call(x10); __ profile_arguments_type(x10, xmethod, x14, false); @@ -3337,8 +3369,11 @@ void TemplateTable::invokeinterface(int byte_no) { transition(vtos, vtos); assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, x10, xmethod, // get f1 Klass*, f2 Method* - x12, x13); // recv, flags + load_resolved_method_entry_interface(x12, // ResolvedMethodEntry* + x10, // Klass* + xmethod, // Method* or itable/vtable index + x13); // flags + prepare_invoke(x12, x13); // receiver and flags // x10: interface klass (from f1) // xmethod: method (from f2) @@ -3351,7 +3386,7 @@ void TemplateTable::invokeinterface(int byte_no) { // Special case of invokeinterface called for virtual method of // java.lang.Object. See cpCache.cpp for details Label notObjectMethod; - __ test_bit(t0, x13, ConstantPoolCacheEntry::is_forced_virtual_shift); + __ test_bit(t0, x13, ResolvedMethodEntry::is_forced_virtual_shift); __ beqz(t0, notObjectMethod); invokevirtual_helper(xmethod, x12, x13); @@ -3361,7 +3396,7 @@ void TemplateTable::invokeinterface(int byte_no) { // Check for private method invocation - indicated by vfinal Label notVFinal; - __ test_bit(t0, x13, ConstantPoolCacheEntry::is_vfinal_shift); + __ test_bit(t0, x13, ResolvedMethodEntry::is_vfinal_shift); __ beqz(t0, notVFinal); // Check receiver klass into x13 @@ -3458,7 +3493,12 @@ void TemplateTable::invokehandle(int byte_no) { transition(vtos, vtos); assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, xmethod, x10, x12); + load_resolved_method_entry_handle(x12, // ResolvedMethodEntry* + xmethod, // Method* + x10, // Resolved reference + x13); // flags + prepare_invoke(x12, x13); + __ verify_method_ptr(x12); __ verify_oop(x12); __ null_check(x12); @@ -3480,9 +3520,9 @@ void TemplateTable::invokedynamic(int byte_no) { load_invokedynamic_entry(xmethod); // x10: CallSite object (from cpool->resolved_references[]) - // xmethod: MH.linkToCallSite method (from f2) + // xmethod: MH.linkToCallSite method - // Note: x10_callsite is already pushed by prepare_invoke + // Note: x10_callsite is already pushed // %%% should make a type profile for any invokedynamic that takes a ref argument // profile this call diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.hpp b/src/hotspot/cpu/riscv/templateTable_riscv.hpp index fcc86108d2839..33603dec86c62 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.hpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.hpp @@ -27,12 +27,9 @@ #ifndef CPU_RISCV_TEMPLATETABLE_RISCV_HPP #define CPU_RISCV_TEMPLATETABLE_RISCV_HPP -static void prepare_invoke(int byte_no, - Register method, // linked method (or i-klass) - Register index = noreg, // itable index, MethodType, etc. - Register recv = noreg, // if caller wants to see it - Register flags = noreg // if caller wants to test it - ); +static void prepare_invoke(Register recv, // if caller wants to see it + Register flags // if caller wants to test it + ); static void invokevirtual_helper(Register index, Register recv, Register flags);