Path: blob/master/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp
41144 views
/*1* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "c1/c1_CodeStubs.hpp"26#include "c1/c1_FrameMap.hpp"27#include "c1/c1_LIRAssembler.hpp"28#include "c1/c1_MacroAssembler.hpp"29#include "c1/c1_Runtime1.hpp"30#include "classfile/javaClasses.hpp"31#include "nativeInst_x86.hpp"32#include "runtime/sharedRuntime.hpp"33#include "utilities/align.hpp"34#include "utilities/macros.hpp"35#include "vmreg_x86.inline.hpp"363738#define __ ce->masm()->3940#ifndef _LP6441float ConversionStub::float_zero = 0.0;42double ConversionStub::double_zero = 0.0;4344void ConversionStub::emit_code(LIR_Assembler* ce) {45__ bind(_entry);46assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");474849if (input()->is_single_xmm()) {50__ comiss(input()->as_xmm_float_reg(),51ExternalAddress((address)&float_zero));52} else if (input()->is_double_xmm()) {53__ comisd(input()->as_xmm_double_reg(),54ExternalAddress((address)&double_zero));55} else {56__ push(rax);57__ ftst();58__ fnstsw_ax();59__ sahf();60__ pop(rax);61}6263Label NaN, do_return;64__ jccb(Assembler::parity, NaN);65__ jccb(Assembler::below, do_return);6667// input is > 0 -> return maxInt68// result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff69__ decrement(result()->as_register());70__ jmpb(do_return);7172// input is NaN -> return 073__ bind(NaN);74__ xorptr(result()->as_register(), result()->as_register());7576__ bind(do_return);77__ jmp(_continuation);78}79#endif // !_LP648081void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {82__ bind(_entry);83InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());84#ifdef _LP6485__ lea(rscratch1, safepoint_pc);86__ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);87#else88const Register tmp1 = rcx;89const Register tmp2 = rdx;90__ push(tmp1);91__ push(tmp2);9293__ lea(tmp1, safepoint_pc);94__ get_thread(tmp2);95__ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);9697__ pop(tmp2);98__ pop(tmp1);99#endif /* _LP64 */100assert(SharedRuntime::polling_page_return_handler_blob() != NULL,101"polling page return stub not created yet");102103address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();104__ jump(RuntimeAddress(stub));105}106107void CounterOverflowStub::emit_code(LIR_Assembler* ce) {108__ bind(_entry);109Metadata *m = _method->as_constant_ptr()->as_metadata();110ce->store_parameter(m, 1);111ce->store_parameter(_bci, 0);112__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));113ce->add_call_info_here(_info);114ce->verify_oop_map(_info);115__ jmp(_continuation);116}117118RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)119: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {120assert(info != NULL, "must have info");121_info = new CodeEmitInfo(info);122}123124RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)125: _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {126assert(info != NULL, "must have info");127_info = new CodeEmitInfo(info);128}129130void RangeCheckStub::emit_code(LIR_Assembler* ce) {131__ bind(_entry);132if (_info->deoptimize_on_exception()) {133address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);134__ call(RuntimeAddress(a));135ce->add_call_info_here(_info);136ce->verify_oop_map(_info);137debug_only(__ should_not_reach_here());138return;139}140141// pass the array index on stack because all registers must be preserved142if (_index->is_cpu_register()) {143ce->store_parameter(_index->as_register(), 0);144} else {145ce->store_parameter(_index->as_jint(), 0);146}147Runtime1::StubID stub_id;148if (_throw_index_out_of_bounds_exception) {149stub_id = Runtime1::throw_index_exception_id;150} else {151stub_id = Runtime1::throw_range_check_failed_id;152ce->store_parameter(_array->as_pointer_register(), 1);153}154__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));155ce->add_call_info_here(_info);156ce->verify_oop_map(_info);157debug_only(__ should_not_reach_here());158}159160PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {161_info = new CodeEmitInfo(info);162}163164void PredicateFailedStub::emit_code(LIR_Assembler* ce) {165__ bind(_entry);166address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);167__ call(RuntimeAddress(a));168ce->add_call_info_here(_info);169ce->verify_oop_map(_info);170debug_only(__ should_not_reach_here());171}172173void DivByZeroStub::emit_code(LIR_Assembler* ce) {174if (_offset != -1) {175ce->compilation()->implicit_exception_table()->append(_offset, __ offset());176}177__ bind(_entry);178__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));179ce->add_call_info_here(_info);180debug_only(__ should_not_reach_here());181}182183184// Implementation of NewInstanceStub185186NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {187_result = result;188_klass = klass;189_klass_reg = klass_reg;190_info = new CodeEmitInfo(info);191assert(stub_id == Runtime1::new_instance_id ||192stub_id == Runtime1::fast_new_instance_id ||193stub_id == Runtime1::fast_new_instance_init_check_id,194"need new_instance id");195_stub_id = stub_id;196}197198199void NewInstanceStub::emit_code(LIR_Assembler* ce) {200assert(__ rsp_offset() == 0, "frame size should be fixed");201__ bind(_entry);202__ movptr(rdx, _klass_reg->as_register());203__ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));204ce->add_call_info_here(_info);205ce->verify_oop_map(_info);206assert(_result->as_register() == rax, "result must in rax,");207__ jmp(_continuation);208}209210211// Implementation of NewTypeArrayStub212213NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {214_klass_reg = klass_reg;215_length = length;216_result = result;217_info = new CodeEmitInfo(info);218}219220221void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {222assert(__ rsp_offset() == 0, "frame size should be fixed");223__ bind(_entry);224assert(_length->as_register() == rbx, "length must in rbx,");225assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");226__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));227ce->add_call_info_here(_info);228ce->verify_oop_map(_info);229assert(_result->as_register() == rax, "result must in rax,");230__ jmp(_continuation);231}232233234// Implementation of NewObjectArrayStub235236NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {237_klass_reg = klass_reg;238_result = result;239_length = length;240_info = new CodeEmitInfo(info);241}242243244void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {245assert(__ rsp_offset() == 0, "frame size should be fixed");246__ bind(_entry);247assert(_length->as_register() == rbx, "length must in rbx,");248assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");249__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));250ce->add_call_info_here(_info);251ce->verify_oop_map(_info);252assert(_result->as_register() == rax, "result must in rax,");253__ jmp(_continuation);254}255256257// Implementation of MonitorAccessStubs258259MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)260: MonitorAccessStub(obj_reg, lock_reg)261{262_info = new CodeEmitInfo(info);263}264265266void MonitorEnterStub::emit_code(LIR_Assembler* ce) {267assert(__ rsp_offset() == 0, "frame size should be fixed");268__ bind(_entry);269ce->store_parameter(_obj_reg->as_register(), 1);270ce->store_parameter(_lock_reg->as_register(), 0);271Runtime1::StubID enter_id;272if (ce->compilation()->has_fpu_code()) {273enter_id = Runtime1::monitorenter_id;274} else {275enter_id = Runtime1::monitorenter_nofpu_id;276}277__ call(RuntimeAddress(Runtime1::entry_for(enter_id)));278ce->add_call_info_here(_info);279ce->verify_oop_map(_info);280__ jmp(_continuation);281}282283284void MonitorExitStub::emit_code(LIR_Assembler* ce) {285__ bind(_entry);286if (_compute_lock) {287// lock_reg was destroyed by fast unlocking attempt => recompute it288ce->monitor_address(_monitor_ix, _lock_reg);289}290ce->store_parameter(_lock_reg->as_register(), 0);291// note: non-blocking leaf routine => no call info needed292Runtime1::StubID exit_id;293if (ce->compilation()->has_fpu_code()) {294exit_id = Runtime1::monitorexit_id;295} else {296exit_id = Runtime1::monitorexit_nofpu_id;297}298__ call(RuntimeAddress(Runtime1::entry_for(exit_id)));299__ jmp(_continuation);300}301302303// Implementation of patching:304// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)305// - Replace original code with a call to the stub306// At Runtime:307// - call to stub, jump to runtime308// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)309// - in runtime: after initializing class, restore original code, reexecute instruction310311int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;312313void PatchingStub::align_patch_site(MacroAssembler* masm) {314// We're patching a 5-7 byte instruction on intel and we need to315// make sure that we don't see a piece of the instruction. It316// appears mostly impossible on Intel to simply invalidate other317// processors caches and since they may do aggressive prefetch it's318// very hard to make a guess about what code might be in the icache.319// Force the instruction to be double word aligned so that it320// doesn't span a cache line.321masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));322}323324void PatchingStub::emit_code(LIR_Assembler* ce) {325assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");326327Label call_patch;328329// static field accesses have special semantics while the class330// initializer is being run so we emit a test which can be used to331// check that this code is being executed by the initializing332// thread.333address being_initialized_entry = __ pc();334if (CommentedAssembly) {335__ block_comment(" patch template");336}337if (_id == load_klass_id) {338// produce a copy of the load klass instruction for use by the being initialized case339#ifdef ASSERT340address start = __ pc();341#endif342Metadata* o = NULL;343__ mov_metadata(_obj, o);344#ifdef ASSERT345for (int i = 0; i < _bytes_to_copy; i++) {346address ptr = (address)(_pc_start + i);347int a_byte = (*ptr) & 0xFF;348assert(a_byte == *start++, "should be the same code");349}350#endif351} else if (_id == load_mirror_id) {352// produce a copy of the load mirror instruction for use by the being353// initialized case354#ifdef ASSERT355address start = __ pc();356#endif357jobject o = NULL;358__ movoop(_obj, o);359#ifdef ASSERT360for (int i = 0; i < _bytes_to_copy; i++) {361address ptr = (address)(_pc_start + i);362int a_byte = (*ptr) & 0xFF;363assert(a_byte == *start++, "should be the same code");364}365#endif366} else {367// make a copy the code which is going to be patched.368for (int i = 0; i < _bytes_to_copy; i++) {369address ptr = (address)(_pc_start + i);370int a_byte = (*ptr) & 0xFF;371__ emit_int8(a_byte);372*ptr = 0x90; // make the site look like a nop373}374}375376address end_of_patch = __ pc();377int bytes_to_skip = 0;378if (_id == load_mirror_id) {379int offset = __ offset();380if (CommentedAssembly) {381__ block_comment(" being_initialized check");382}383assert(_obj != noreg, "must be a valid register");384Register tmp = rax;385Register tmp2 = rbx;386__ push(tmp);387__ push(tmp2);388// Load without verification to keep code size small. We need it because389// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.390__ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));391__ get_thread(tmp);392__ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));393__ pop(tmp2);394__ pop(tmp);395__ jcc(Assembler::notEqual, call_patch);396397// access_field patches may execute the patched code before it's398// copied back into place so we need to jump back into the main399// code of the nmethod to continue execution.400__ jmp(_patch_site_continuation);401402// make sure this extra code gets skipped403bytes_to_skip += __ offset() - offset;404}405if (CommentedAssembly) {406__ block_comment("patch data encoded as movl");407}408// Now emit the patch record telling the runtime how to find the409// pieces of the patch. We only need 3 bytes but for readability of410// the disassembly we make the data look like a movl reg, imm32,411// which requires 5 bytes412int sizeof_patch_record = 5;413bytes_to_skip += sizeof_patch_record;414415// emit the offsets needed to find the code to patch416int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;417418__ emit_int8((unsigned char)0xB8);419__ emit_int8(0);420__ emit_int8(being_initialized_entry_offset);421__ emit_int8(bytes_to_skip);422__ emit_int8(_bytes_to_copy);423address patch_info_pc = __ pc();424assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");425426address entry = __ pc();427NativeGeneralJump::insert_unconditional((address)_pc_start, entry);428address target = NULL;429relocInfo::relocType reloc_type = relocInfo::none;430switch (_id) {431case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;432case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;433case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;434case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;435default: ShouldNotReachHere();436}437__ bind(call_patch);438439if (CommentedAssembly) {440__ block_comment("patch entry point");441}442__ call(RuntimeAddress(target));443assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");444ce->add_call_info_here(_info);445int jmp_off = __ offset();446__ jmp(_patch_site_entry);447// Add enough nops so deoptimization can overwrite the jmp above with a call448// and not destroy the world. We cannot use fat nops here, since the concurrent449// code rewrite may transiently create the illegal instruction sequence.450for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {451__ nop();452}453if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {454CodeSection* cs = __ code_section();455RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));456relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);457}458}459460461void DeoptimizeStub::emit_code(LIR_Assembler* ce) {462__ bind(_entry);463ce->store_parameter(_trap_request, 0);464__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));465ce->add_call_info_here(_info);466DEBUG_ONLY(__ should_not_reach_here());467}468469470void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {471address a;472if (_info->deoptimize_on_exception()) {473// Deoptimize, do not throw the exception, because it is probably wrong to do it here.474a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);475} else {476a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);477}478479ce->compilation()->implicit_exception_table()->append(_offset, __ offset());480__ bind(_entry);481__ call(RuntimeAddress(a));482ce->add_call_info_here(_info);483ce->verify_oop_map(_info);484debug_only(__ should_not_reach_here());485}486487488void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {489assert(__ rsp_offset() == 0, "frame size should be fixed");490491__ bind(_entry);492// pass the object on stack because all registers must be preserved493if (_obj->is_cpu_register()) {494ce->store_parameter(_obj->as_register(), 0);495}496__ call(RuntimeAddress(Runtime1::entry_for(_stub)));497ce->add_call_info_here(_info);498debug_only(__ should_not_reach_here());499}500501502void ArrayCopyStub::emit_code(LIR_Assembler* ce) {503//---------------slow case: call to native-----------------504__ bind(_entry);505// Figure out where the args should go506// This should really convert the IntrinsicID to the Method* and signature507// but I don't know how to do that.508//509VMRegPair args[5];510BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};511SharedRuntime::java_calling_convention(signature, args, 5);512513// push parameters514// (src, src_pos, dest, destPos, length)515Register r[5];516r[0] = src()->as_register();517r[1] = src_pos()->as_register();518r[2] = dst()->as_register();519r[3] = dst_pos()->as_register();520r[4] = length()->as_register();521522// next registers will get stored on the stack523for (int i = 0; i < 5 ; i++ ) {524VMReg r_1 = args[i].first();525if (r_1->is_stack()) {526int st_off = r_1->reg2stack() * wordSize;527__ movptr (Address(rsp, st_off), r[i]);528} else {529assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");530}531}532533ce->align_call(lir_static_call);534535ce->emit_static_call_stub();536if (ce->compilation()->bailed_out()) {537return; // CodeCache is full538}539AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),540relocInfo::static_call_type);541__ call(resolve);542ce->add_call_info_here(info());543544#ifndef PRODUCT545__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));546#endif547548__ jmp(_continuation);549}550551#undef __552553554