Path: blob/master/src/hotspot/cpu/x86/interp_masm_x86.cpp
41144 views
/*1* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "compiler/compiler_globals.hpp"26#include "interp_masm_x86.hpp"27#include "interpreter/interpreter.hpp"28#include "interpreter/interpreterRuntime.hpp"29#include "logging/log.hpp"30#include "oops/arrayOop.hpp"31#include "oops/markWord.hpp"32#include "oops/methodData.hpp"33#include "oops/method.hpp"34#include "prims/jvmtiExport.hpp"35#include "prims/jvmtiThreadState.hpp"36#include "runtime/basicLock.hpp"37#include "runtime/biasedLocking.hpp"38#include "runtime/frame.inline.hpp"39#include "runtime/safepointMechanism.hpp"40#include "runtime/sharedRuntime.hpp"41#include "runtime/thread.inline.hpp"42#include "utilities/powerOfTwo.hpp"4344// Implementation of InterpreterMacroAssembler4546void InterpreterMacroAssembler::jump_to_entry(address entry) {47assert(entry, "Entry must have been generated by now");48jump(RuntimeAddress(entry));49}5051void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {52Label update, next, none;5354interp_verify_oop(obj, atos);5556testptr(obj, obj);57jccb(Assembler::notZero, update);58orptr(mdo_addr, TypeEntries::null_seen);59jmpb(next);6061bind(update);62Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);63load_klass(obj, obj, tmp_load_klass);6465xorptr(obj, mdo_addr);66testptr(obj, TypeEntries::type_klass_mask);67jccb(Assembler::zero, next); // klass seen before, nothing to68// do. The unknown bit may have been69// set already but no need to check.7071testptr(obj, TypeEntries::type_unknown);72jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.7374cmpptr(mdo_addr, 0);75jccb(Assembler::equal, none);76cmpptr(mdo_addr, TypeEntries::null_seen);77jccb(Assembler::equal, none);78// There is a chance that the checks above (re-reading profiling79// data from memory) fail if another thread has just set the80// profiling to this obj's klass81xorptr(obj, mdo_addr);82testptr(obj, TypeEntries::type_klass_mask);83jccb(Assembler::zero, next);8485// different than before. Cannot keep accurate profile.86orptr(mdo_addr, TypeEntries::type_unknown);87jmpb(next);8889bind(none);90// first time here. Set profile type.91movptr(mdo_addr, obj);9293bind(next);94}9596void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {97if (!ProfileInterpreter) {98return;99}100101if (MethodData::profile_arguments() || MethodData::profile_return()) {102Label profile_continue;103104test_method_data_pointer(mdp, profile_continue);105106int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());107108cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);109jcc(Assembler::notEqual, profile_continue);110111if (MethodData::profile_arguments()) {112Label done;113int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());114addptr(mdp, off_to_args);115116for (int i = 0; i < TypeProfileArgsLimit; i++) {117if (i > 0 || MethodData::profile_return()) {118// If return value type is profiled we may have no argument to profile119movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));120subl(tmp, i*TypeStackSlotEntries::per_arg_count());121cmpl(tmp, TypeStackSlotEntries::per_arg_count());122jcc(Assembler::less, done);123}124movptr(tmp, Address(callee, Method::const_offset()));125load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));126// stack offset o (zero based) from the start of the argument127// list, for n arguments translates into offset n - o - 1 from128// the end of the argument list129subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));130subl(tmp, 1);131Address arg_addr = argument_address(tmp);132movptr(tmp, arg_addr);133134Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);135profile_obj_type(tmp, mdo_arg_addr);136137int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());138addptr(mdp, to_add);139off_to_args += to_add;140}141142if (MethodData::profile_return()) {143movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));144subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());145}146147bind(done);148149if (MethodData::profile_return()) {150// We're right after the type profile for the last151// argument. tmp is the number of cells left in the152// CallTypeData/VirtualCallTypeData to reach its end. Non null153// if there's a return to profile.154assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");155shll(tmp, log2i_exact((int)DataLayout::cell_size));156addptr(mdp, tmp);157}158movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);159} else {160assert(MethodData::profile_return(), "either profile call args or call ret");161update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));162}163164// mdp points right after the end of the165// CallTypeData/VirtualCallTypeData, right after the cells for the166// return value type if there's one167168bind(profile_continue);169}170}171172void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {173assert_different_registers(mdp, ret, tmp, _bcp_register);174if (ProfileInterpreter && MethodData::profile_return()) {175Label profile_continue;176177test_method_data_pointer(mdp, profile_continue);178179if (MethodData::profile_return_jsr292_only()) {180assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");181182// If we don't profile all invoke bytecodes we must make sure183// it's a bytecode we indeed profile. We can't go back to the184// begining of the ProfileData we intend to update to check its185// type because we're right after it and we don't known its186// length187Label do_profile;188cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);189jcc(Assembler::equal, do_profile);190cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);191jcc(Assembler::equal, do_profile);192get_method(tmp);193cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));194jcc(Assembler::notEqual, profile_continue);195196bind(do_profile);197}198199Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));200mov(tmp, ret);201profile_obj_type(tmp, mdo_ret_addr);202203bind(profile_continue);204}205}206207void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {208if (ProfileInterpreter && MethodData::profile_parameters()) {209Label profile_continue;210211test_method_data_pointer(mdp, profile_continue);212213// Load the offset of the area within the MDO used for214// parameters. If it's negative we're not profiling any parameters215movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));216testl(tmp1, tmp1);217jcc(Assembler::negative, profile_continue);218219// Compute a pointer to the area for parameters from the offset220// and move the pointer to the slot for the last221// parameters. Collect profiling from last parameter down.222// mdo start + parameters offset + array length - 1223addptr(mdp, tmp1);224movptr(tmp1, Address(mdp, ArrayData::array_len_offset()));225decrement(tmp1, TypeStackSlotEntries::per_arg_count());226227Label loop;228bind(loop);229230int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));231int type_base = in_bytes(ParametersTypeData::type_offset(0));232Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size);233Address arg_off(mdp, tmp1, per_arg_scale, off_base);234Address arg_type(mdp, tmp1, per_arg_scale, type_base);235236// load offset on the stack from the slot for this parameter237movptr(tmp2, arg_off);238negptr(tmp2);239// read the parameter from the local area240movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale()));241242// profile the parameter243profile_obj_type(tmp2, arg_type);244245// go to next parameter246decrement(tmp1, TypeStackSlotEntries::per_arg_count());247jcc(Assembler::positive, loop);248249bind(profile_continue);250}251}252253void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,254int number_of_arguments) {255// interpreter specific256//257// Note: No need to save/restore bcp & locals registers258// since these are callee saved registers and no blocking/259// GC can happen in leaf calls.260// Further Note: DO NOT save/restore bcp/locals. If a caller has261// already saved them so that it can use rsi/rdi as temporaries262// then a save/restore here will DESTROY the copy the caller263// saved! There used to be a save_bcp() that only happened in264// the ASSERT path (no restore_bcp). Which caused bizarre failures265// when jvm built with ASSERTs.266#ifdef ASSERT267{268Label L;269cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);270jcc(Assembler::equal, L);271stop("InterpreterMacroAssembler::call_VM_leaf_base:"272" last_sp != NULL");273bind(L);274}275#endif276// super call277MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);278// interpreter specific279// LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals280// but since they may not have been saved (and we don't want to281// save them here (see note above) the assert is invalid.282}283284void InterpreterMacroAssembler::call_VM_base(Register oop_result,285Register java_thread,286Register last_java_sp,287address entry_point,288int number_of_arguments,289bool check_exceptions) {290// interpreter specific291//292// Note: Could avoid restoring locals ptr (callee saved) - however doesn't293// really make a difference for these runtime calls, since they are294// slow anyway. Btw., bcp must be saved/restored since it may change295// due to GC.296NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");)297save_bcp();298#ifdef ASSERT299{300Label L;301cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);302jcc(Assembler::equal, L);303stop("InterpreterMacroAssembler::call_VM_base:"304" last_sp != NULL");305bind(L);306}307#endif /* ASSERT */308// super call309MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,310entry_point, number_of_arguments,311check_exceptions);312// interpreter specific313restore_bcp();314restore_locals();315}316317void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {318if (JvmtiExport::can_pop_frame()) {319Label L;320// Initiate popframe handling only if it is not already being321// processed. If the flag has the popframe_processing bit set, it322// means that this code is called *during* popframe handling - we323// don't want to reenter.324// This method is only called just after the call into the vm in325// call_VM_base, so the arg registers are available.326Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit327LP64_ONLY(c_rarg0);328movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));329testl(pop_cond, JavaThread::popframe_pending_bit);330jcc(Assembler::zero, L);331testl(pop_cond, JavaThread::popframe_processing_bit);332jcc(Assembler::notZero, L);333// Call Interpreter::remove_activation_preserving_args_entry() to get the334// address of the same-named entrypoint in the generated interpreter code.335call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));336jmp(rax);337bind(L);338NOT_LP64(get_thread(java_thread);)339}340}341342void InterpreterMacroAssembler::load_earlyret_value(TosState state) {343Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);344NOT_LP64(get_thread(thread);)345movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));346const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());347const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());348const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());349#ifdef _LP64350switch (state) {351case atos: movptr(rax, oop_addr);352movptr(oop_addr, (int32_t)NULL_WORD);353interp_verify_oop(rax, state); break;354case ltos: movptr(rax, val_addr); break;355case btos: // fall through356case ztos: // fall through357case ctos: // fall through358case stos: // fall through359case itos: movl(rax, val_addr); break;360case ftos: load_float(val_addr); break;361case dtos: load_double(val_addr); break;362case vtos: /* nothing to do */ break;363default : ShouldNotReachHere();364}365// Clean up tos value in the thread object366movl(tos_addr, (int) ilgl);367movl(val_addr, (int32_t) NULL_WORD);368#else369const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()370+ in_ByteSize(wordSize));371switch (state) {372case atos: movptr(rax, oop_addr);373movptr(oop_addr, NULL_WORD);374interp_verify_oop(rax, state); break;375case ltos:376movl(rdx, val_addr1); // fall through377case btos: // fall through378case ztos: // fall through379case ctos: // fall through380case stos: // fall through381case itos: movl(rax, val_addr); break;382case ftos: load_float(val_addr); break;383case dtos: load_double(val_addr); break;384case vtos: /* nothing to do */ break;385default : ShouldNotReachHere();386}387#endif // _LP64388// Clean up tos value in the thread object389movl(tos_addr, (int32_t) ilgl);390movptr(val_addr, NULL_WORD);391NOT_LP64(movptr(val_addr1, NULL_WORD);)392}393394395void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {396if (JvmtiExport::can_force_early_return()) {397Label L;398Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread);399Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread);400401movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));402testptr(tmp, tmp);403jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;404405// Initiate earlyret handling only if it is not already being processed.406// If the flag has the earlyret_processing bit set, it means that this code407// is called *during* earlyret handling - we don't want to reenter.408movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset()));409cmpl(tmp, JvmtiThreadState::earlyret_pending);410jcc(Assembler::notEqual, L);411412// Call Interpreter::remove_activation_early_entry() to get the address of the413// same-named entrypoint in the generated interpreter code.414NOT_LP64(get_thread(java_thread);)415movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));416#ifdef _LP64417movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset()));418call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp);419#else420pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));421call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1);422#endif // _LP64423jmp(rax);424bind(L);425NOT_LP64(get_thread(java_thread);)426}427}428429void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {430assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");431load_unsigned_short(reg, Address(_bcp_register, bcp_offset));432bswapl(reg);433shrl(reg, 16);434}435436void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,437int bcp_offset,438size_t index_size) {439assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");440if (index_size == sizeof(u2)) {441load_unsigned_short(index, Address(_bcp_register, bcp_offset));442} else if (index_size == sizeof(u4)) {443movl(index, Address(_bcp_register, bcp_offset));444// Check if the secondary index definition is still ~x, otherwise445// we have to change the following assembler code to calculate the446// plain index.447assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");448notl(index); // convert to plain index449} else if (index_size == sizeof(u1)) {450load_unsigned_byte(index, Address(_bcp_register, bcp_offset));451} else {452ShouldNotReachHere();453}454}455456void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,457Register index,458int bcp_offset,459size_t index_size) {460assert_different_registers(cache, index);461get_cache_index_at_bcp(index, bcp_offset, index_size);462movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));463assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");464// convert from field index to ConstantPoolCacheEntry index465assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");466shll(index, 2);467}468469void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,470Register index,471Register bytecode,472int byte_no,473int bcp_offset,474size_t index_size) {475get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);476// We use a 32-bit load here since the layout of 64-bit words on477// little-endian machines allow us that.478movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));479const int shift_count = (1 + byte_no) * BitsPerByte;480assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||481(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),482"correct shift count");483shrl(bytecode, shift_count);484assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");485andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);486}487488void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,489Register tmp,490int bcp_offset,491size_t index_size) {492assert_different_registers(cache, tmp);493494get_cache_index_at_bcp(tmp, bcp_offset, index_size);495assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");496// convert from field index to ConstantPoolCacheEntry index497// and from word offset to byte offset498assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");499shll(tmp, 2 + LogBytesPerWord);500movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));501// skip past the header502addptr(cache, in_bytes(ConstantPoolCache::base_offset()));503addptr(cache, tmp); // construct pointer to cache entry504}505506// Load object from cpool->resolved_references(index)507void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result,508Register index,509Register tmp) {510assert_different_registers(result, index);511512get_constant_pool(result);513// load pointer for resolved_references[] objArray514movptr(result, Address(result, ConstantPool::cache_offset_in_bytes()));515movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));516resolve_oop_handle(result, tmp);517load_heap_oop(result, Address(result, index,518UseCompressedOops ? Address::times_4 : Address::times_ptr,519arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp);520}521522// load cpool->resolved_klass_at(index)523void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass,524Register cpool,525Register index) {526assert_different_registers(cpool, index);527528movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));529Register resolved_klasses = cpool;530movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes()));531movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));532}533534void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,535Register method,536Register cache,537Register index) {538assert_different_registers(cache, index);539540const int method_offset = in_bytes(541ConstantPoolCache::base_offset() +542((byte_no == TemplateTable::f2_byte)543? ConstantPoolCacheEntry::f2_offset()544: ConstantPoolCacheEntry::f1_offset()));545546movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method*547}548549// Generate a subtype check: branch to ok_is_subtype if sub_klass is a550// subtype of super_klass.551//552// Args:553// rax: superklass554// Rsub_klass: subklass555//556// Kills:557// rcx, rdi558void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,559Label& ok_is_subtype) {560assert(Rsub_klass != rax, "rax holds superklass");561LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)562LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)563assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");564assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");565566// Profile the not-null value's klass.567profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi568569// Do the check.570check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx571572// Profile the failure of the check.573profile_typecheck_failed(rcx); // blows rcx574}575576577#ifndef _LP64578void InterpreterMacroAssembler::f2ieee() {579if (IEEEPrecision) {580fstp_s(Address(rsp, 0));581fld_s(Address(rsp, 0));582}583}584585586void InterpreterMacroAssembler::d2ieee() {587if (IEEEPrecision) {588fstp_d(Address(rsp, 0));589fld_d(Address(rsp, 0));590}591}592#endif // _LP64593594// Java Expression Stack595596void InterpreterMacroAssembler::pop_ptr(Register r) {597pop(r);598}599600void InterpreterMacroAssembler::push_ptr(Register r) {601push(r);602}603604void InterpreterMacroAssembler::push_i(Register r) {605push(r);606}607608void InterpreterMacroAssembler::push_i_or_ptr(Register r) {609push(r);610}611612void InterpreterMacroAssembler::push_f(XMMRegister r) {613subptr(rsp, wordSize);614movflt(Address(rsp, 0), r);615}616617void InterpreterMacroAssembler::pop_f(XMMRegister r) {618movflt(r, Address(rsp, 0));619addptr(rsp, wordSize);620}621622void InterpreterMacroAssembler::push_d(XMMRegister r) {623subptr(rsp, 2 * wordSize);624movdbl(Address(rsp, 0), r);625}626627void InterpreterMacroAssembler::pop_d(XMMRegister r) {628movdbl(r, Address(rsp, 0));629addptr(rsp, 2 * Interpreter::stackElementSize);630}631632#ifdef _LP64633void InterpreterMacroAssembler::pop_i(Register r) {634// XXX can't use pop currently, upper half non clean635movl(r, Address(rsp, 0));636addptr(rsp, wordSize);637}638639void InterpreterMacroAssembler::pop_l(Register r) {640movq(r, Address(rsp, 0));641addptr(rsp, 2 * Interpreter::stackElementSize);642}643644void InterpreterMacroAssembler::push_l(Register r) {645subptr(rsp, 2 * wordSize);646movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r );647movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD );648}649650void InterpreterMacroAssembler::pop(TosState state) {651switch (state) {652case atos: pop_ptr(); break;653case btos:654case ztos:655case ctos:656case stos:657case itos: pop_i(); break;658case ltos: pop_l(); break;659case ftos: pop_f(xmm0); break;660case dtos: pop_d(xmm0); break;661case vtos: /* nothing to do */ break;662default: ShouldNotReachHere();663}664interp_verify_oop(rax, state);665}666667void InterpreterMacroAssembler::push(TosState state) {668interp_verify_oop(rax, state);669switch (state) {670case atos: push_ptr(); break;671case btos:672case ztos:673case ctos:674case stos:675case itos: push_i(); break;676case ltos: push_l(); break;677case ftos: push_f(xmm0); break;678case dtos: push_d(xmm0); break;679case vtos: /* nothing to do */ break;680default : ShouldNotReachHere();681}682}683#else684void InterpreterMacroAssembler::pop_i(Register r) {685pop(r);686}687688void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {689pop(lo);690pop(hi);691}692693void InterpreterMacroAssembler::pop_f() {694fld_s(Address(rsp, 0));695addptr(rsp, 1 * wordSize);696}697698void InterpreterMacroAssembler::pop_d() {699fld_d(Address(rsp, 0));700addptr(rsp, 2 * wordSize);701}702703704void InterpreterMacroAssembler::pop(TosState state) {705switch (state) {706case atos: pop_ptr(rax); break;707case btos: // fall through708case ztos: // fall through709case ctos: // fall through710case stos: // fall through711case itos: pop_i(rax); break;712case ltos: pop_l(rax, rdx); break;713case ftos:714if (UseSSE >= 1) {715pop_f(xmm0);716} else {717pop_f();718}719break;720case dtos:721if (UseSSE >= 2) {722pop_d(xmm0);723} else {724pop_d();725}726break;727case vtos: /* nothing to do */ break;728default : ShouldNotReachHere();729}730interp_verify_oop(rax, state);731}732733734void InterpreterMacroAssembler::push_l(Register lo, Register hi) {735push(hi);736push(lo);737}738739void InterpreterMacroAssembler::push_f() {740// Do not schedule for no AGI! Never write beyond rsp!741subptr(rsp, 1 * wordSize);742fstp_s(Address(rsp, 0));743}744745void InterpreterMacroAssembler::push_d() {746// Do not schedule for no AGI! Never write beyond rsp!747subptr(rsp, 2 * wordSize);748fstp_d(Address(rsp, 0));749}750751752void InterpreterMacroAssembler::push(TosState state) {753interp_verify_oop(rax, state);754switch (state) {755case atos: push_ptr(rax); break;756case btos: // fall through757case ztos: // fall through758case ctos: // fall through759case stos: // fall through760case itos: push_i(rax); break;761case ltos: push_l(rax, rdx); break;762case ftos:763if (UseSSE >= 1) {764push_f(xmm0);765} else {766push_f();767}768break;769case dtos:770if (UseSSE >= 2) {771push_d(xmm0);772} else {773push_d();774}775break;776case vtos: /* nothing to do */ break;777default : ShouldNotReachHere();778}779}780#endif // _LP64781782783// Helpers for swap and dup784void InterpreterMacroAssembler::load_ptr(int n, Register val) {785movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));786}787788void InterpreterMacroAssembler::store_ptr(int n, Register val) {789movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);790}791792793void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {794// set sender sp795lea(_bcp_register, Address(rsp, wordSize));796// record last_sp797movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register);798}799800801// Jump to from_interpreted entry of a call unless single stepping is possible802// in this thread in which case we must call the i2i entry803void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {804prepare_to_jump_from_interpreted();805806if (JvmtiExport::can_post_interpreter_events()) {807Label run_compiled_code;808// JVMTI events, such as single-stepping, are implemented partly by avoiding running809// compiled code in threads for which the event is enabled. Check here for810// interp_only_mode if these events CAN be enabled.811// interp_only is an int, on little endian it is sufficient to test the byte only812// Is a cmpl faster?813LP64_ONLY(temp = r15_thread;)814NOT_LP64(get_thread(temp);)815cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);816jccb(Assembler::zero, run_compiled_code);817jmp(Address(method, Method::interpreter_entry_offset()));818bind(run_compiled_code);819}820821jmp(Address(method, Method::from_interpreted_offset()));822}823824// The following two routines provide a hook so that an implementation825// can schedule the dispatch in two parts. x86 does not do this.826void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {827// Nothing x86 specific to be done here828}829830void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {831dispatch_next(state, step);832}833834void InterpreterMacroAssembler::dispatch_base(TosState state,835address* table,836bool verifyoop,837bool generate_poll) {838verify_FPU(1, state);839if (VerifyActivationFrameSize) {840Label L;841mov(rcx, rbp);842subptr(rcx, rsp);843int32_t min_frame_size =844(frame::link_offset - frame::interpreter_frame_initial_sp_offset) *845wordSize;846cmpptr(rcx, (int32_t)min_frame_size);847jcc(Assembler::greaterEqual, L);848stop("broken stack frame");849bind(L);850}851if (verifyoop) {852interp_verify_oop(rax, state);853}854855address* const safepoint_table = Interpreter::safept_table(state);856#ifdef _LP64857Label no_safepoint, dispatch;858if (table != safepoint_table && generate_poll) {859NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));860testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());861862jccb(Assembler::zero, no_safepoint);863lea(rscratch1, ExternalAddress((address)safepoint_table));864jmpb(dispatch);865}866867bind(no_safepoint);868lea(rscratch1, ExternalAddress((address)table));869bind(dispatch);870jmp(Address(rscratch1, rbx, Address::times_8));871872#else873Address index(noreg, rbx, Address::times_ptr);874if (table != safepoint_table && generate_poll) {875NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));876Label no_safepoint;877const Register thread = rcx;878get_thread(thread);879testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());880881jccb(Assembler::zero, no_safepoint);882ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);883jump(dispatch_addr);884bind(no_safepoint);885}886887{888ArrayAddress dispatch_addr(ExternalAddress((address)table), index);889jump(dispatch_addr);890}891#endif // _LP64892}893894void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {895dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);896}897898void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {899dispatch_base(state, Interpreter::normal_table(state));900}901902void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {903dispatch_base(state, Interpreter::normal_table(state), false);904}905906907void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {908// load next bytecode (load before advancing _bcp_register to prevent AGI)909load_unsigned_byte(rbx, Address(_bcp_register, step));910// advance _bcp_register911increment(_bcp_register, step);912dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);913}914915void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {916// load current bytecode917load_unsigned_byte(rbx, Address(_bcp_register, 0));918dispatch_base(state, table);919}920921void InterpreterMacroAssembler::narrow(Register result) {922923// Get method->_constMethod->_result_type924movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));925movptr(rcx, Address(rcx, Method::const_offset()));926load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset()));927928Label done, notBool, notByte, notChar;929930// common case first931cmpl(rcx, T_INT);932jcc(Assembler::equal, done);933934// mask integer result to narrower return type.935cmpl(rcx, T_BOOLEAN);936jcc(Assembler::notEqual, notBool);937andl(result, 0x1);938jmp(done);939940bind(notBool);941cmpl(rcx, T_BYTE);942jcc(Assembler::notEqual, notByte);943LP64_ONLY(movsbl(result, result);)944NOT_LP64(shll(result, 24);) // truncate upper 24 bits945NOT_LP64(sarl(result, 24);) // and sign-extend byte946jmp(done);947948bind(notByte);949cmpl(rcx, T_CHAR);950jcc(Assembler::notEqual, notChar);951LP64_ONLY(movzwl(result, result);)952NOT_LP64(andl(result, 0xFFFF);) // truncate upper 16 bits953jmp(done);954955bind(notChar);956// cmpl(rcx, T_SHORT); // all that's left957// jcc(Assembler::notEqual, done);958LP64_ONLY(movswl(result, result);)959NOT_LP64(shll(result, 16);) // truncate upper 16 bits960NOT_LP64(sarl(result, 16);) // and sign-extend short961962// Nothing to do for T_INT963bind(done);964}965966// remove activation967//968// Apply stack watermark barrier.969// Unlock the receiver if this is a synchronized method.970// Unlock any Java monitors from syncronized blocks.971// Remove the activation from the stack.972//973// If there are locked Java monitors974// If throw_monitor_exception975// throws IllegalMonitorStateException976// Else if install_monitor_exception977// installs IllegalMonitorStateException978// Else979// no error processing980void InterpreterMacroAssembler::remove_activation(981TosState state,982Register ret_addr,983bool throw_monitor_exception,984bool install_monitor_exception,985bool notify_jvmdi) {986// Note: Registers rdx xmm0 may be in use for the987// result check if synchronized method988Label unlocked, unlock, no_unlock;989990const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);991const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx);992const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx);993// monitor pointers need different register994// because rdx may have the result in it995NOT_LP64(get_thread(rthread);)996997// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,998// that would normally not be safe to use. Such bad returns into unsafe territory of999// the stack, will call InterpreterRuntime::at_unwind.1000Label slow_path;1001Label fast_path;1002safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);1003jmp(fast_path);1004bind(slow_path);1005push(state);1006set_last_Java_frame(rthread, noreg, rbp, (address)pc());1007super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);1008NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore1009reset_last_Java_frame(rthread, true);1010pop(state);1011bind(fast_path);10121013// get the value of _do_not_unlock_if_synchronized into rdx1014const Address do_not_unlock_if_synchronized(rthread,1015in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));1016movbool(rbx, do_not_unlock_if_synchronized);1017movbool(do_not_unlock_if_synchronized, false); // reset the flag10181019// get method access flags1020movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));1021movl(rcx, Address(rcx, Method::access_flags_offset()));1022testl(rcx, JVM_ACC_SYNCHRONIZED);1023jcc(Assembler::zero, unlocked);10241025// Don't unlock anything if the _do_not_unlock_if_synchronized flag1026// is set.1027testbool(rbx);1028jcc(Assembler::notZero, no_unlock);10291030// unlock monitor1031push(state); // save result10321033// BasicObjectLock will be first in list, since this is a1034// synchronized method. However, need to check that the object has1035// not been unlocked by an explicit monitorexit bytecode.1036const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *1037wordSize - (int) sizeof(BasicObjectLock));1038// We use c_rarg1/rdx so that if we go slow path it will be the correct1039// register for unlock_object to pass to VM directly1040lea(robj, monitor); // address of first monitor10411042movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes()));1043testptr(rax, rax);1044jcc(Assembler::notZero, unlock);10451046pop(state);1047if (throw_monitor_exception) {1048// Entry already unlocked, need to throw exception1049NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow1050call_VM(noreg, CAST_FROM_FN_PTR(address,1051InterpreterRuntime::throw_illegal_monitor_state_exception));1052should_not_reach_here();1053} else {1054// Monitor already unlocked during a stack unroll. If requested,1055// install an illegal_monitor_state_exception. Continue with1056// stack unrolling.1057if (install_monitor_exception) {1058NOT_LP64(empty_FPU_stack();)1059call_VM(noreg, CAST_FROM_FN_PTR(address,1060InterpreterRuntime::new_illegal_monitor_state_exception));1061}1062jmp(unlocked);1063}10641065bind(unlock);1066unlock_object(robj);1067pop(state);10681069// Check that for block-structured locking (i.e., that all locked1070// objects has been unlocked)1071bind(unlocked);10721073// rax, rdx: Might contain return value10741075// Check that all monitors are unlocked1076{1077Label loop, exception, entry, restart;1078const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;1079const Address monitor_block_top(1080rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);1081const Address monitor_block_bot(1082rbp, frame::interpreter_frame_initial_sp_offset * wordSize);10831084bind(restart);1085// We use c_rarg1 so that if we go slow path it will be the correct1086// register for unlock_object to pass to VM directly1087movptr(rmon, monitor_block_top); // points to current entry, starting1088// with top-most entry1089lea(rbx, monitor_block_bot); // points to word before bottom of1090// monitor block1091jmp(entry);10921093// Entry already locked, need to throw exception1094bind(exception);10951096if (throw_monitor_exception) {1097// Throw exception1098NOT_LP64(empty_FPU_stack();)1099MacroAssembler::call_VM(noreg,1100CAST_FROM_FN_PTR(address, InterpreterRuntime::1101throw_illegal_monitor_state_exception));1102should_not_reach_here();1103} else {1104// Stack unrolling. Unlock object and install illegal_monitor_exception.1105// Unlock does not block, so don't have to worry about the frame.1106// We don't have to preserve c_rarg1 since we are going to throw an exception.11071108push(state);1109mov(robj, rmon); // nop if robj and rmon are the same1110unlock_object(robj);1111pop(state);11121113if (install_monitor_exception) {1114NOT_LP64(empty_FPU_stack();)1115call_VM(noreg, CAST_FROM_FN_PTR(address,1116InterpreterRuntime::1117new_illegal_monitor_state_exception));1118}11191120jmp(restart);1121}11221123bind(loop);1124// check if current entry is used1125cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);1126jcc(Assembler::notEqual, exception);11271128addptr(rmon, entry_size); // otherwise advance to next entry1129bind(entry);1130cmpptr(rmon, rbx); // check if bottom reached1131jcc(Assembler::notEqual, loop); // if not at bottom then check this entry1132}11331134bind(no_unlock);11351136// jvmti support1137if (notify_jvmdi) {1138notify_method_exit(state, NotifyJVMTI); // preserve TOSCA1139} else {1140notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA1141}11421143// remove activation1144// get sender sp1145movptr(rbx,1146Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));1147if (StackReservedPages > 0) {1148// testing if reserved zone needs to be re-enabled1149Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);1150Label no_reserved_zone_enabling;11511152NOT_LP64(get_thread(rthread);)11531154cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);1155jcc(Assembler::equal, no_reserved_zone_enabling);11561157cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));1158jcc(Assembler::lessEqual, no_reserved_zone_enabling);11591160call_VM_leaf(1161CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);1162call_VM(noreg, CAST_FROM_FN_PTR(address,1163InterpreterRuntime::throw_delayed_StackOverflowError));1164should_not_reach_here();11651166bind(no_reserved_zone_enabling);1167}1168leave(); // remove frame anchor1169pop(ret_addr); // get return address1170mov(rsp, rbx); // set sp to sender sp1171}11721173void InterpreterMacroAssembler::get_method_counters(Register method,1174Register mcs, Label& skip) {1175Label has_counters;1176movptr(mcs, Address(method, Method::method_counters_offset()));1177testptr(mcs, mcs);1178jcc(Assembler::notZero, has_counters);1179call_VM(noreg, CAST_FROM_FN_PTR(address,1180InterpreterRuntime::build_method_counters), method);1181movptr(mcs, Address(method,Method::method_counters_offset()));1182testptr(mcs, mcs);1183jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory1184bind(has_counters);1185}118611871188// Lock object1189//1190// Args:1191// rdx, c_rarg1: BasicObjectLock to be used for locking1192//1193// Kills:1194// rax, rbx1195void InterpreterMacroAssembler::lock_object(Register lock_reg) {1196assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),1197"The argument is only for looks. It must be c_rarg1");11981199if (UseHeavyMonitors) {1200call_VM(noreg,1201CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),1202lock_reg);1203} else {1204Label done;12051206const Register swap_reg = rax; // Must use rax for cmpxchg instruction1207const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a1208// problematic case where tmp_reg = no_reg.1209const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop1210const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);12111212const int obj_offset = BasicObjectLock::obj_offset_in_bytes();1213const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();1214const int mark_offset = lock_offset +1215BasicLock::displaced_header_offset_in_bytes();12161217Label slow_case;12181219// Load object pointer into obj_reg1220movptr(obj_reg, Address(lock_reg, obj_offset));12211222if (DiagnoseSyncOnValueBasedClasses != 0) {1223load_klass(tmp_reg, obj_reg, rklass_decode_tmp);1224movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));1225testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);1226jcc(Assembler::notZero, slow_case);1227}12281229if (UseBiasedLocking) {1230biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);1231}12321233// Load immediate 1 into swap_reg %rax1234movl(swap_reg, (int32_t)1);12351236// Load (object->mark() | 1) into swap_reg %rax1237orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));12381239// Save (object->mark() | 1) into BasicLock's displaced header1240movptr(Address(lock_reg, mark_offset), swap_reg);12411242assert(lock_offset == 0,1243"displaced header must be first word in BasicObjectLock");12441245lock();1246cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));1247if (PrintBiasedLockingStatistics) {1248cond_inc32(Assembler::zero,1249ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));1250}1251jcc(Assembler::zero, done);12521253const int zero_bits = LP64_ONLY(7) NOT_LP64(3);12541255// Fast check for recursive lock.1256//1257// Can apply the optimization only if this is a stack lock1258// allocated in this thread. For efficiency, we can focus on1259// recently allocated stack locks (instead of reading the stack1260// base and checking whether 'mark' points inside the current1261// thread stack):1262// 1) (mark & zero_bits) == 0, and1263// 2) rsp <= mark < mark + os::pagesize()1264//1265// Warning: rsp + os::pagesize can overflow the stack base. We must1266// neither apply the optimization for an inflated lock allocated1267// just above the thread stack (this is why condition 1 matters)1268// nor apply the optimization if the stack lock is inside the stack1269// of another thread. The latter is avoided even in case of overflow1270// because we have guard pages at the end of all stacks. Hence, if1271// we go over the stack base and hit the stack of another thread,1272// this should not be in a writeable area that could contain a1273// stack lock allocated by that thread. As a consequence, a stack1274// lock less than page size away from rsp is guaranteed to be1275// owned by the current thread.1276//1277// These 3 tests can be done by evaluating the following1278// expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),1279// assuming both stack pointer and pagesize have their1280// least significant bits clear.1281// NOTE: the mark is in swap_reg %rax as the result of cmpxchg1282subptr(swap_reg, rsp);1283andptr(swap_reg, zero_bits - os::vm_page_size());12841285// Save the test result, for recursive case, the result is zero1286movptr(Address(lock_reg, mark_offset), swap_reg);12871288if (PrintBiasedLockingStatistics) {1289cond_inc32(Assembler::zero,1290ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));1291}1292jcc(Assembler::zero, done);12931294bind(slow_case);12951296// Call the runtime routine for slow case1297call_VM(noreg,1298CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),1299lock_reg);13001301bind(done);1302}1303}130413051306// Unlocks an object. Used in monitorexit bytecode and1307// remove_activation. Throws an IllegalMonitorException if object is1308// not locked by current thread.1309//1310// Args:1311// rdx, c_rarg1: BasicObjectLock for lock1312//1313// Kills:1314// rax1315// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)1316// rscratch1 (scratch reg)1317// rax, rbx, rcx, rdx1318void InterpreterMacroAssembler::unlock_object(Register lock_reg) {1319assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),1320"The argument is only for looks. It must be c_rarg1");13211322if (UseHeavyMonitors) {1323call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);1324} else {1325Label done;13261327const Register swap_reg = rax; // Must use rax for cmpxchg instruction1328const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark1329const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop13301331save_bcp(); // Save in case of exception13321333// Convert from BasicObjectLock structure to object and BasicLock1334// structure Store the BasicLock address into %rax1335lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));13361337// Load oop into obj_reg(%c_rarg3)1338movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));13391340// Free entry1341movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);13421343if (UseBiasedLocking) {1344biased_locking_exit(obj_reg, header_reg, done);1345}13461347// Load the old header from BasicLock structure1348movptr(header_reg, Address(swap_reg,1349BasicLock::displaced_header_offset_in_bytes()));13501351// Test for recursion1352testptr(header_reg, header_reg);13531354// zero for recursive case1355jcc(Assembler::zero, done);13561357// Atomic swap back the old header1358lock();1359cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));13601361// zero for simple unlock of a stack-lock case1362jcc(Assembler::zero, done);136313641365// Call the runtime routine for slow case.1366movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj1367call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);13681369bind(done);13701371restore_bcp();1372}1373}13741375void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,1376Label& zero_continue) {1377assert(ProfileInterpreter, "must be profiling interpreter");1378movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));1379testptr(mdp, mdp);1380jcc(Assembler::zero, zero_continue);1381}138213831384// Set the method data pointer for the current bcp.1385void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {1386assert(ProfileInterpreter, "must be profiling interpreter");1387Label set_mdp;1388push(rax);1389push(rbx);13901391get_method(rbx);1392// Test MDO to avoid the call if it is NULL.1393movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));1394testptr(rax, rax);1395jcc(Assembler::zero, set_mdp);1396// rbx: method1397// _bcp_register: bcp1398call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register);1399// rax: mdi1400// mdo is guaranteed to be non-zero here, we checked for it before the call.1401movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));1402addptr(rbx, in_bytes(MethodData::data_offset()));1403addptr(rax, rbx);1404bind(set_mdp);1405movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax);1406pop(rbx);1407pop(rax);1408}14091410void InterpreterMacroAssembler::verify_method_data_pointer() {1411assert(ProfileInterpreter, "must be profiling interpreter");1412#ifdef ASSERT1413Label verify_continue;1414push(rax);1415push(rbx);1416Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx);1417Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx);1418push(arg3_reg);1419push(arg2_reg);1420test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue1421get_method(rbx);14221423// If the mdp is valid, it will point to a DataLayout header which is1424// consistent with the bcp. The converse is highly probable also.1425load_unsigned_short(arg2_reg,1426Address(arg3_reg, in_bytes(DataLayout::bci_offset())));1427addptr(arg2_reg, Address(rbx, Method::const_offset()));1428lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset()));1429cmpptr(arg2_reg, _bcp_register);1430jcc(Assembler::equal, verify_continue);1431// rbx: method1432// _bcp_register: bcp1433// c_rarg3: mdp1434call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),1435rbx, _bcp_register, arg3_reg);1436bind(verify_continue);1437pop(arg2_reg);1438pop(arg3_reg);1439pop(rbx);1440pop(rax);1441#endif // ASSERT1442}144314441445void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,1446int constant,1447Register value) {1448assert(ProfileInterpreter, "must be profiling interpreter");1449Address data(mdp_in, constant);1450movptr(data, value);1451}145214531454void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,1455int constant,1456bool decrement) {1457// Counter address1458Address data(mdp_in, constant);14591460increment_mdp_data_at(data, decrement);1461}14621463void InterpreterMacroAssembler::increment_mdp_data_at(Address data,1464bool decrement) {1465assert(ProfileInterpreter, "must be profiling interpreter");1466// %%% this does 64bit counters at best it is wasting space1467// at worst it is a rare bug when counters overflow14681469if (decrement) {1470// Decrement the register. Set condition codes.1471addptr(data, (int32_t) -DataLayout::counter_increment);1472// If the decrement causes the counter to overflow, stay negative1473Label L;1474jcc(Assembler::negative, L);1475addptr(data, (int32_t) DataLayout::counter_increment);1476bind(L);1477} else {1478assert(DataLayout::counter_increment == 1,1479"flow-free idiom only works with 1");1480// Increment the register. Set carry flag.1481addptr(data, DataLayout::counter_increment);1482// If the increment causes the counter to overflow, pull back by 1.1483sbbptr(data, (int32_t)0);1484}1485}148614871488void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,1489Register reg,1490int constant,1491bool decrement) {1492Address data(mdp_in, reg, Address::times_1, constant);14931494increment_mdp_data_at(data, decrement);1495}14961497void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,1498int flag_byte_constant) {1499assert(ProfileInterpreter, "must be profiling interpreter");1500int header_offset = in_bytes(DataLayout::flags_offset());1501int header_bits = flag_byte_constant;1502// Set the flag1503orb(Address(mdp_in, header_offset), header_bits);1504}1505150615071508void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,1509int offset,1510Register value,1511Register test_value_out,1512Label& not_equal_continue) {1513assert(ProfileInterpreter, "must be profiling interpreter");1514if (test_value_out == noreg) {1515cmpptr(value, Address(mdp_in, offset));1516} else {1517// Put the test value into a register, so caller can use it:1518movptr(test_value_out, Address(mdp_in, offset));1519cmpptr(test_value_out, value);1520}1521jcc(Assembler::notEqual, not_equal_continue);1522}152315241525void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,1526int offset_of_disp) {1527assert(ProfileInterpreter, "must be profiling interpreter");1528Address disp_address(mdp_in, offset_of_disp);1529addptr(mdp_in, disp_address);1530movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);1531}153215331534void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,1535Register reg,1536int offset_of_disp) {1537assert(ProfileInterpreter, "must be profiling interpreter");1538Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);1539addptr(mdp_in, disp_address);1540movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);1541}154215431544void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,1545int constant) {1546assert(ProfileInterpreter, "must be profiling interpreter");1547addptr(mdp_in, constant);1548movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);1549}155015511552void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {1553assert(ProfileInterpreter, "must be profiling interpreter");1554push(return_bci); // save/restore across call_VM1555call_VM(noreg,1556CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),1557return_bci);1558pop(return_bci);1559}156015611562void InterpreterMacroAssembler::profile_taken_branch(Register mdp,1563Register bumped_count) {1564if (ProfileInterpreter) {1565Label profile_continue;15661567// If no method data exists, go to profile_continue.1568// Otherwise, assign to mdp1569test_method_data_pointer(mdp, profile_continue);15701571// We are taking a branch. Increment the taken count.1572// We inline increment_mdp_data_at to return bumped_count in a register1573//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));1574Address data(mdp, in_bytes(JumpData::taken_offset()));1575movptr(bumped_count, data);1576assert(DataLayout::counter_increment == 1,1577"flow-free idiom only works with 1");1578addptr(bumped_count, DataLayout::counter_increment);1579sbbptr(bumped_count, 0);1580movptr(data, bumped_count); // Store back out15811582// The method data pointer needs to be updated to reflect the new target.1583update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));1584bind(profile_continue);1585}1586}158715881589void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {1590if (ProfileInterpreter) {1591Label profile_continue;15921593// If no method data exists, go to profile_continue.1594test_method_data_pointer(mdp, profile_continue);15951596// We are taking a branch. Increment the not taken count.1597increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));15981599// The method data pointer needs to be updated to correspond to1600// the next bytecode1601update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));1602bind(profile_continue);1603}1604}16051606void InterpreterMacroAssembler::profile_call(Register mdp) {1607if (ProfileInterpreter) {1608Label profile_continue;16091610// If no method data exists, go to profile_continue.1611test_method_data_pointer(mdp, profile_continue);16121613// We are making a call. Increment the count.1614increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));16151616// The method data pointer needs to be updated to reflect the new target.1617update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));1618bind(profile_continue);1619}1620}162116221623void InterpreterMacroAssembler::profile_final_call(Register mdp) {1624if (ProfileInterpreter) {1625Label profile_continue;16261627// If no method data exists, go to profile_continue.1628test_method_data_pointer(mdp, profile_continue);16291630// We are making a call. Increment the count.1631increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));16321633// The method data pointer needs to be updated to reflect the new target.1634update_mdp_by_constant(mdp,1635in_bytes(VirtualCallData::1636virtual_call_data_size()));1637bind(profile_continue);1638}1639}164016411642void InterpreterMacroAssembler::profile_virtual_call(Register receiver,1643Register mdp,1644Register reg2,1645bool receiver_can_be_null) {1646if (ProfileInterpreter) {1647Label profile_continue;16481649// If no method data exists, go to profile_continue.1650test_method_data_pointer(mdp, profile_continue);16511652Label skip_receiver_profile;1653if (receiver_can_be_null) {1654Label not_null;1655testptr(receiver, receiver);1656jccb(Assembler::notZero, not_null);1657// We are making a call. Increment the count for null receiver.1658increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1659jmp(skip_receiver_profile);1660bind(not_null);1661}16621663// Record the receiver type.1664record_klass_in_profile(receiver, mdp, reg2, true);1665bind(skip_receiver_profile);16661667// The method data pointer needs to be updated to reflect the new target.1668update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));1669bind(profile_continue);1670}1671}16721673// This routine creates a state machine for updating the multi-row1674// type profile at a virtual call site (or other type-sensitive bytecode).1675// The machine visits each row (of receiver/count) until the receiver type1676// is found, or until it runs out of rows. At the same time, it remembers1677// the location of the first empty row. (An empty row records null for its1678// receiver, and can be allocated for a newly-observed receiver type.)1679// Because there are two degrees of freedom in the state, a simple linear1680// search will not work; it must be a decision tree. Hence this helper1681// function is recursive, to generate the required tree structured code.1682// It's the interpreter, so we are trading off code space for speed.1683// See below for example code.1684void InterpreterMacroAssembler::record_klass_in_profile_helper(1685Register receiver, Register mdp,1686Register reg2, int start_row,1687Label& done, bool is_virtual_call) {1688if (TypeProfileWidth == 0) {1689if (is_virtual_call) {1690increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1691}1692#if INCLUDE_JVMCI1693else if (EnableJVMCI) {1694increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()));1695}1696#endif // INCLUDE_JVMCI1697} else {1698int non_profiled_offset = -1;1699if (is_virtual_call) {1700non_profiled_offset = in_bytes(CounterData::count_offset());1701}1702#if INCLUDE_JVMCI1703else if (EnableJVMCI) {1704non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset());1705}1706#endif // INCLUDE_JVMCI17071708record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,1709&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset);1710}1711}17121713void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,1714Register reg2, int start_row, Label& done, int total_rows,1715OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,1716int non_profiled_offset) {1717int last_row = total_rows - 1;1718assert(start_row <= last_row, "must be work left to do");1719// Test this row for both the item and for null.1720// Take any of three different outcomes:1721// 1. found item => increment count and goto done1722// 2. found null => keep looking for case 1, maybe allocate this cell1723// 3. found something else => keep looking for cases 1 and 21724// Case 3 is handled by a recursive call.1725for (int row = start_row; row <= last_row; row++) {1726Label next_test;1727bool test_for_null_also = (row == start_row);17281729// See if the item is item[n].1730int item_offset = in_bytes(item_offset_fn(row));1731test_mdp_data_at(mdp, item_offset, item,1732(test_for_null_also ? reg2 : noreg),1733next_test);1734// (Reg2 now contains the item from the CallData.)17351736// The item is item[n]. Increment count[n].1737int count_offset = in_bytes(item_count_offset_fn(row));1738increment_mdp_data_at(mdp, count_offset);1739jmp(done);1740bind(next_test);17411742if (test_for_null_also) {1743// Failed the equality check on item[n]... Test for null.1744testptr(reg2, reg2);1745if (start_row == last_row) {1746// The only thing left to do is handle the null case.1747if (non_profiled_offset >= 0) {1748Label found_null;1749jccb(Assembler::zero, found_null);1750// Item did not match any saved item and there is no empty row for it.1751// Increment total counter to indicate polymorphic case.1752increment_mdp_data_at(mdp, non_profiled_offset);1753jmp(done);1754bind(found_null);1755} else {1756jcc(Assembler::notZero, done);1757}1758break;1759}1760Label found_null;1761// Since null is rare, make it be the branch-taken case.1762jcc(Assembler::zero, found_null);17631764// Put all the "Case 3" tests here.1765record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,1766item_offset_fn, item_count_offset_fn, non_profiled_offset);17671768// Found a null. Keep searching for a matching item,1769// but remember that this is an empty (unused) slot.1770bind(found_null);1771}1772}17731774// In the fall-through case, we found no matching item, but we1775// observed the item[start_row] is NULL.17761777// Fill in the item field and increment the count.1778int item_offset = in_bytes(item_offset_fn(start_row));1779set_mdp_data_at(mdp, item_offset, item);1780int count_offset = in_bytes(item_count_offset_fn(start_row));1781movl(reg2, DataLayout::counter_increment);1782set_mdp_data_at(mdp, count_offset, reg2);1783if (start_row > 0) {1784jmp(done);1785}1786}17871788// Example state machine code for three profile rows:1789// // main copy of decision tree, rooted at row[1]1790// if (row[0].rec == rec) { row[0].incr(); goto done; }1791// if (row[0].rec != NULL) {1792// // inner copy of decision tree, rooted at row[1]1793// if (row[1].rec == rec) { row[1].incr(); goto done; }1794// if (row[1].rec != NULL) {1795// // degenerate decision tree, rooted at row[2]1796// if (row[2].rec == rec) { row[2].incr(); goto done; }1797// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow1798// row[2].init(rec); goto done;1799// } else {1800// // remember row[1] is empty1801// if (row[2].rec == rec) { row[2].incr(); goto done; }1802// row[1].init(rec); goto done;1803// }1804// } else {1805// // remember row[0] is empty1806// if (row[1].rec == rec) { row[1].incr(); goto done; }1807// if (row[2].rec == rec) { row[2].incr(); goto done; }1808// row[0].init(rec); goto done;1809// }1810// done:18111812void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,1813Register mdp, Register reg2,1814bool is_virtual_call) {1815assert(ProfileInterpreter, "must be profiling");1816Label done;18171818record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);18191820bind (done);1821}18221823void InterpreterMacroAssembler::profile_ret(Register return_bci,1824Register mdp) {1825if (ProfileInterpreter) {1826Label profile_continue;1827uint row;18281829// If no method data exists, go to profile_continue.1830test_method_data_pointer(mdp, profile_continue);18311832// Update the total ret count.1833increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));18341835for (row = 0; row < RetData::row_limit(); row++) {1836Label next_test;18371838// See if return_bci is equal to bci[n]:1839test_mdp_data_at(mdp,1840in_bytes(RetData::bci_offset(row)),1841return_bci, noreg,1842next_test);18431844// return_bci is equal to bci[n]. Increment the count.1845increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));18461847// The method data pointer needs to be updated to reflect the new target.1848update_mdp_by_offset(mdp,1849in_bytes(RetData::bci_displacement_offset(row)));1850jmp(profile_continue);1851bind(next_test);1852}18531854update_mdp_for_ret(return_bci);18551856bind(profile_continue);1857}1858}185918601861void InterpreterMacroAssembler::profile_null_seen(Register mdp) {1862if (ProfileInterpreter) {1863Label profile_continue;18641865// If no method data exists, go to profile_continue.1866test_method_data_pointer(mdp, profile_continue);18671868set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());18691870// The method data pointer needs to be updated.1871int mdp_delta = in_bytes(BitData::bit_data_size());1872if (TypeProfileCasts) {1873mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());1874}1875update_mdp_by_constant(mdp, mdp_delta);18761877bind(profile_continue);1878}1879}188018811882void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {1883if (ProfileInterpreter && TypeProfileCasts) {1884Label profile_continue;18851886// If no method data exists, go to profile_continue.1887test_method_data_pointer(mdp, profile_continue);18881889int count_offset = in_bytes(CounterData::count_offset());1890// Back up the address, since we have already bumped the mdp.1891count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());18921893// *Decrement* the counter. We expect to see zero or small negatives.1894increment_mdp_data_at(mdp, count_offset, true);18951896bind (profile_continue);1897}1898}189919001901void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {1902if (ProfileInterpreter) {1903Label profile_continue;19041905// If no method data exists, go to profile_continue.1906test_method_data_pointer(mdp, profile_continue);19071908// The method data pointer needs to be updated.1909int mdp_delta = in_bytes(BitData::bit_data_size());1910if (TypeProfileCasts) {1911mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());19121913// Record the object type.1914record_klass_in_profile(klass, mdp, reg2, false);1915NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");)1916NOT_LP64(restore_locals();) // Restore EDI1917}1918update_mdp_by_constant(mdp, mdp_delta);19191920bind(profile_continue);1921}1922}192319241925void InterpreterMacroAssembler::profile_switch_default(Register mdp) {1926if (ProfileInterpreter) {1927Label profile_continue;19281929// If no method data exists, go to profile_continue.1930test_method_data_pointer(mdp, profile_continue);19311932// Update the default case count1933increment_mdp_data_at(mdp,1934in_bytes(MultiBranchData::default_count_offset()));19351936// The method data pointer needs to be updated.1937update_mdp_by_offset(mdp,1938in_bytes(MultiBranchData::1939default_displacement_offset()));19401941bind(profile_continue);1942}1943}194419451946void InterpreterMacroAssembler::profile_switch_case(Register index,1947Register mdp,1948Register reg2) {1949if (ProfileInterpreter) {1950Label profile_continue;19511952// If no method data exists, go to profile_continue.1953test_method_data_pointer(mdp, profile_continue);19541955// Build the base (index * per_case_size_in_bytes()) +1956// case_array_offset_in_bytes()1957movl(reg2, in_bytes(MultiBranchData::per_case_size()));1958imulptr(index, reg2); // XXX l ?1959addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?19601961// Update the case count1962increment_mdp_data_at(mdp,1963index,1964in_bytes(MultiBranchData::relative_count_offset()));19651966// The method data pointer needs to be updated.1967update_mdp_by_offset(mdp,1968index,1969in_bytes(MultiBranchData::1970relative_displacement_offset()));19711972bind(profile_continue);1973}1974}1975197619771978void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {1979if (state == atos) {1980MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);1981}1982}19831984void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {1985#ifndef _LP641986if ((state == ftos && UseSSE < 1) ||1987(state == dtos && UseSSE < 2)) {1988MacroAssembler::verify_FPU(stack_depth);1989}1990#endif1991}19921993// Jump if ((*counter_addr += increment) & mask) satisfies the condition.1994void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,1995int increment, Address mask,1996Register scratch, bool preloaded,1997Condition cond, Label* where) {1998if (!preloaded) {1999movl(scratch, counter_addr);2000}2001incrementl(scratch, increment);2002movl(counter_addr, scratch);2003andl(scratch, mask);2004if (where != NULL) {2005jcc(cond, *where);2006}2007}20082009void InterpreterMacroAssembler::notify_method_entry() {2010// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to2011// track stack depth. If it is possible to enter interp_only_mode we add2012// the code to check if the event should be sent.2013Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);2014Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);2015if (JvmtiExport::can_post_interpreter_events()) {2016Label L;2017NOT_LP64(get_thread(rthread);)2018movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));2019testl(rdx, rdx);2020jcc(Assembler::zero, L);2021call_VM(noreg, CAST_FROM_FN_PTR(address,2022InterpreterRuntime::post_method_entry));2023bind(L);2024}20252026{2027SkipIfEqual skip(this, &DTraceMethodProbes, false);2028NOT_LP64(get_thread(rthread);)2029get_method(rarg);2030call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),2031rthread, rarg);2032}20332034// RedefineClasses() tracing support for obsolete method entry2035if (log_is_enabled(Trace, redefine, class, obsolete)) {2036NOT_LP64(get_thread(rthread);)2037get_method(rarg);2038call_VM_leaf(2039CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),2040rthread, rarg);2041}2042}204320442045void InterpreterMacroAssembler::notify_method_exit(2046TosState state, NotifyMethodExitMode mode) {2047// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to2048// track stack depth. If it is possible to enter interp_only_mode we add2049// the code to check if the event should be sent.2050Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);2051Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);2052if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {2053Label L;2054// Note: frame::interpreter_frame_result has a dependency on how the2055// method result is saved across the call to post_method_exit. If this2056// is changed then the interpreter_frame_result implementation will2057// need to be updated too.20582059// template interpreter will leave the result on the top of the stack.2060push(state);2061NOT_LP64(get_thread(rthread);)2062movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));2063testl(rdx, rdx);2064jcc(Assembler::zero, L);2065call_VM(noreg,2066CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));2067bind(L);2068pop(state);2069}20702071{2072SkipIfEqual skip(this, &DTraceMethodProbes, false);2073push(state);2074NOT_LP64(get_thread(rthread);)2075get_method(rarg);2076call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),2077rthread, rarg);2078pop(state);2079}2080}208120822083