Path: blob/master/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
41153 views
// // Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it // under the terms of the GNU General Public License version 2 only, as // published by the Free Software Foundation. // // This code is distributed in the hope that it will be useful, but WITHOUT // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License // version 2 for more details (a copy is included in the LICENSE file that // accompanied this code). // // You should have received a copy of the GNU General Public License version // 2 along with this work; if not, write to the Free Software Foundation, // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. // // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA // or visit www.oracle.com if you need additional information or have any // questions. // source_hpp %{ #include "gc/shared/gc_globals.hpp" #include "gc/z/c2/zBarrierSetC2.hpp" #include "gc/z/zThreadLocalData.hpp" %} source %{ static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { if (barrier_data == ZLoadBarrierElided) { return; } ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); __ andr(tmp, tmp, ref); __ cbnz(tmp, *stub->entry()); __ bind(*stub->continuation()); } static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong); __ b(*stub->entry()); __ bind(*stub->continuation()); } %} // Load Pointer instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0)); effect(TEMP dst, KILL cr); ins_cost(4 * INSN_COST); format %{ "ldr $dst, $mem" %} ins_encode %{ const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); __ ldr($dst$$Register, ref_addr); z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data()); %} ins_pipe(iload_reg_mem); %} // Load Pointer Volatile instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr) %{ match(Set dst (LoadP mem)); predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); effect(TEMP dst, KILL cr); ins_cost(VOLATILE_REF_COST); format %{ "ldar $dst, $mem\t" %} ins_encode %{ __ ldar($dst$$Register, $mem$$Register); z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data()); %} ins_pipe(pipe_serial); %} instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); effect(KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); format %{ "cmpxchg $mem, $oldval, $newval\n\t" "cset $res, EQ" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, false /* acquire */, true /* release */, false /* weak */, rscratch2); __ cset($res$$Register, Assembler::EQ); if (barrier_data() != ZLoadBarrierElided) { Label good; __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); __ andr(rscratch1, rscratch1, rscratch2); __ cbz(rscratch1, good); z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, false /* acquire */, true /* release */, false /* weak */, rscratch2); __ cset($res$$Register, Assembler::EQ); __ bind(good); } %} ins_pipe(pipe_slow); %} instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)); effect(KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); format %{ "cmpxchg $mem, $oldval, $newval\n\t" "cset $res, EQ" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, true /* acquire */, true /* release */, false /* weak */, rscratch2); __ cset($res$$Register, Assembler::EQ); if (barrier_data() != ZLoadBarrierElided) { Label good; __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); __ andr(rscratch1, rscratch1, rscratch2); __ cbz(rscratch1, good); z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ ); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, true /* acquire */, true /* release */, false /* weak */, rscratch2); __ cset($res$$Register, Assembler::EQ); __ bind(good); } %} ins_pipe(pipe_slow); %} instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); effect(TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); format %{ "cmpxchg $res = $mem, $oldval, $newval" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, false /* acquire */, true /* release */, false /* weak */, $res$$Register); if (barrier_data() != ZLoadBarrierElided) { Label good; __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); __ andr(rscratch1, rscratch1, $res$$Register); __ cbz(rscratch1, good); z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, false /* acquire */, true /* release */, false /* weak */, $res$$Register); __ bind(good); } %} ins_pipe(pipe_slow); %} instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); effect(TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); format %{ "cmpxchg $res = $mem, $oldval, $newval" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, true /* acquire */, true /* release */, false /* weak */, $res$$Register); if (barrier_data() != ZLoadBarrierElided) { Label good; __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); __ andr(rscratch1, rscratch1, $res$$Register); __ cbz(rscratch1, good); z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, true /* acquire */, true /* release */, false /* weak */, $res$$Register); __ bind(good); } %} ins_pipe(pipe_slow); %} instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP_DEF prev, KILL cr); ins_cost(2 * VOLATILE_REF_COST); format %{ "atomic_xchg $prev, $newv, [$mem]" %} ins_encode %{ __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register); z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); %} ins_pipe(pipe_serial); %} instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0)); effect(TEMP_DEF prev, KILL cr); ins_cost(VOLATILE_REF_COST); format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %} ins_encode %{ __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register); z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); %} ins_pipe(pipe_serial); %}