Path: blob/master/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
41155 views
/*1* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "classfile/classLoaderData.hpp"26#include "gc/shared/barrierSet.hpp"27#include "gc/shared/barrierSetAssembler.hpp"28#include "gc/shared/barrierSetNMethod.hpp"29#include "gc/shared/collectedHeap.hpp"30#include "interpreter/interp_masm.hpp"31#include "memory/universe.hpp"32#include "runtime/jniHandles.hpp"33#include "runtime/sharedRuntime.hpp"34#include "runtime/stubRoutines.hpp"35#include "runtime/thread.hpp"363738#define __ masm->3940void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,41Register dst, Address src, Register tmp1, Register tmp_thread) {4243// LR is live. It must be saved around calls.4445bool in_heap = (decorators & IN_HEAP) != 0;46bool in_native = (decorators & IN_NATIVE) != 0;47bool is_not_null = (decorators & IS_NOT_NULL) != 0;48switch (type) {49case T_OBJECT:50case T_ARRAY: {51if (in_heap) {52if (UseCompressedOops) {53__ ldrw(dst, src);54if (is_not_null) {55__ decode_heap_oop_not_null(dst);56} else {57__ decode_heap_oop(dst);58}59} else {60__ ldr(dst, src);61}62} else {63assert(in_native, "why else?");64__ ldr(dst, src);65}66break;67}68case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;69case T_BYTE: __ load_signed_byte (dst, src); break;70case T_CHAR: __ load_unsigned_short(dst, src); break;71case T_SHORT: __ load_signed_short (dst, src); break;72case T_INT: __ ldrw (dst, src); break;73case T_LONG: __ ldr (dst, src); break;74case T_ADDRESS: __ ldr (dst, src); break;75case T_FLOAT: __ ldrs (v0, src); break;76case T_DOUBLE: __ ldrd (v0, src); break;77default: Unimplemented();78}79}8081void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,82Address dst, Register val, Register tmp1, Register tmp2) {83bool in_heap = (decorators & IN_HEAP) != 0;84bool in_native = (decorators & IN_NATIVE) != 0;85switch (type) {86case T_OBJECT:87case T_ARRAY: {88val = val == noreg ? zr : val;89if (in_heap) {90if (UseCompressedOops) {91assert(!dst.uses(val), "not enough registers");92if (val != zr) {93__ encode_heap_oop(val);94}95__ strw(val, dst);96} else {97__ str(val, dst);98}99} else {100assert(in_native, "why else?");101__ str(val, dst);102}103break;104}105case T_BOOLEAN:106__ andw(val, val, 0x1); // boolean is true if LSB is 1107__ strb(val, dst);108break;109case T_BYTE: __ strb(val, dst); break;110case T_CHAR: __ strh(val, dst); break;111case T_SHORT: __ strh(val, dst); break;112case T_INT: __ strw(val, dst); break;113case T_LONG: __ str (val, dst); break;114case T_ADDRESS: __ str (val, dst); break;115case T_FLOAT: __ strs(v0, dst); break;116case T_DOUBLE: __ strd(v0, dst); break;117default: Unimplemented();118}119}120121void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,122Register obj, Register tmp, Label& slowpath) {123// If mask changes we need to ensure that the inverse is still encodable as an immediate124STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);125__ andr(obj, obj, ~JNIHandles::weak_tag_mask);126__ ldr(obj, Address(obj, 0)); // *obj127}128129// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.130void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,131Register var_size_in_bytes,132int con_size_in_bytes,133Register t1,134Register t2,135Label& slow_case) {136assert_different_registers(obj, t2);137assert_different_registers(obj, var_size_in_bytes);138Register end = t2;139140// verify_tlab();141142__ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));143if (var_size_in_bytes == noreg) {144__ lea(end, Address(obj, con_size_in_bytes));145} else {146__ lea(end, Address(obj, var_size_in_bytes));147}148__ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));149__ cmp(end, rscratch1);150__ br(Assembler::HI, slow_case);151152// update the tlab top pointer153__ str(end, Address(rthread, JavaThread::tlab_top_offset()));154155// recover var_size_in_bytes if necessary156if (var_size_in_bytes == end) {157__ sub(var_size_in_bytes, var_size_in_bytes, obj);158}159// verify_tlab();160}161162// Defines obj, preserves var_size_in_bytes163void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,164Register var_size_in_bytes,165int con_size_in_bytes,166Register t1,167Label& slow_case) {168assert_different_registers(obj, var_size_in_bytes, t1);169if (!Universe::heap()->supports_inline_contig_alloc()) {170__ b(slow_case);171} else {172Register end = t1;173Register heap_end = rscratch2;174Label retry;175__ bind(retry);176{177uint64_t offset;178__ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);179__ ldr(heap_end, Address(rscratch1, offset));180}181182ExternalAddress heap_top((address) Universe::heap()->top_addr());183184// Get the current top of the heap185{186uint64_t offset;187__ adrp(rscratch1, heap_top, offset);188// Use add() here after ARDP, rather than lea().189// lea() does not generate anything if its offset is zero.190// However, relocs expect to find either an ADD or a load/store191// insn after an ADRP. add() always generates an ADD insn, even192// for add(Rn, Rn, 0).193__ add(rscratch1, rscratch1, offset);194__ ldaxr(obj, rscratch1);195}196197// Adjust it my the size of our new object198if (var_size_in_bytes == noreg) {199__ lea(end, Address(obj, con_size_in_bytes));200} else {201__ lea(end, Address(obj, var_size_in_bytes));202}203204// if end < obj then we wrapped around high memory205__ cmp(end, obj);206__ br(Assembler::LO, slow_case);207208__ cmp(end, heap_end);209__ br(Assembler::HI, slow_case);210211// If heap_top hasn't been changed by some other thread, update it.212__ stlxr(rscratch2, end, rscratch1);213__ cbnzw(rscratch2, retry);214215incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);216}217}218219void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,220Register var_size_in_bytes,221int con_size_in_bytes,222Register t1) {223assert(t1->is_valid(), "need temp reg");224225__ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));226if (var_size_in_bytes->is_valid()) {227__ add(t1, t1, var_size_in_bytes);228} else {229__ add(t1, t1, con_size_in_bytes);230}231__ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));232}233234void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {235BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();236237if (bs_nm == NULL) {238return;239}240241Label skip, guard;242Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));243244__ ldrw(rscratch1, guard);245246// Subsequent loads of oops must occur after load of guard value.247// BarrierSetNMethod::disarm sets guard with release semantics.248__ membar(__ LoadLoad);249__ ldrw(rscratch2, thread_disarmed_addr);250__ cmpw(rscratch1, rscratch2);251__ br(Assembler::EQ, skip);252253__ movptr(rscratch1, (uintptr_t) StubRoutines::aarch64::method_entry_barrier());254__ blr(rscratch1);255__ b(skip);256257__ bind(guard);258259__ emit_int32(0); // nmethod guard value. Skipped over in common case.260261__ bind(skip);262}263264void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {265BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();266if (bs == NULL) {267return;268}269270Label bad_call;271__ cbz(rmethod, bad_call);272273// Pointer chase to the method holder to find out if the method is concurrently unloading.274Label method_live;275__ load_method_holder_cld(rscratch1, rmethod);276277// Is it a strong CLD?278__ ldr(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));279__ cbnz(rscratch2, method_live);280281// Is it a weak but alive CLD?282__ stp(r10, r11, Address(__ pre(sp, -2 * wordSize)));283__ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));284285// Uses rscratch1 & rscratch2, so we must pass new temporaries.286__ resolve_weak_handle(r10, r11);287__ mov(rscratch1, r10);288__ ldp(r10, r11, Address(__ post(sp, 2 * wordSize)));289__ cbnz(rscratch1, method_live);290291__ bind(bad_call);292293__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));294__ bind(method_live);295}296297298299