Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
41155 views
1
/*
2
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/classLoaderData.hpp"
27
#include "gc/shared/barrierSet.hpp"
28
#include "gc/shared/barrierSetAssembler.hpp"
29
#include "gc/shared/barrierSetNMethod.hpp"
30
#include "gc/shared/collectedHeap.hpp"
31
#include "interpreter/interp_masm.hpp"
32
#include "memory/universe.hpp"
33
#include "runtime/jniHandles.hpp"
34
#include "runtime/sharedRuntime.hpp"
35
#include "runtime/stubRoutines.hpp"
36
#include "runtime/thread.hpp"
37
38
39
#define __ masm->
40
41
void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
42
Register dst, Address src, Register tmp1, Register tmp_thread) {
43
44
// LR is live. It must be saved around calls.
45
46
bool in_heap = (decorators & IN_HEAP) != 0;
47
bool in_native = (decorators & IN_NATIVE) != 0;
48
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
49
switch (type) {
50
case T_OBJECT:
51
case T_ARRAY: {
52
if (in_heap) {
53
if (UseCompressedOops) {
54
__ ldrw(dst, src);
55
if (is_not_null) {
56
__ decode_heap_oop_not_null(dst);
57
} else {
58
__ decode_heap_oop(dst);
59
}
60
} else {
61
__ ldr(dst, src);
62
}
63
} else {
64
assert(in_native, "why else?");
65
__ ldr(dst, src);
66
}
67
break;
68
}
69
case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
70
case T_BYTE: __ load_signed_byte (dst, src); break;
71
case T_CHAR: __ load_unsigned_short(dst, src); break;
72
case T_SHORT: __ load_signed_short (dst, src); break;
73
case T_INT: __ ldrw (dst, src); break;
74
case T_LONG: __ ldr (dst, src); break;
75
case T_ADDRESS: __ ldr (dst, src); break;
76
case T_FLOAT: __ ldrs (v0, src); break;
77
case T_DOUBLE: __ ldrd (v0, src); break;
78
default: Unimplemented();
79
}
80
}
81
82
void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
83
Address dst, Register val, Register tmp1, Register tmp2) {
84
bool in_heap = (decorators & IN_HEAP) != 0;
85
bool in_native = (decorators & IN_NATIVE) != 0;
86
switch (type) {
87
case T_OBJECT:
88
case T_ARRAY: {
89
val = val == noreg ? zr : val;
90
if (in_heap) {
91
if (UseCompressedOops) {
92
assert(!dst.uses(val), "not enough registers");
93
if (val != zr) {
94
__ encode_heap_oop(val);
95
}
96
__ strw(val, dst);
97
} else {
98
__ str(val, dst);
99
}
100
} else {
101
assert(in_native, "why else?");
102
__ str(val, dst);
103
}
104
break;
105
}
106
case T_BOOLEAN:
107
__ andw(val, val, 0x1); // boolean is true if LSB is 1
108
__ strb(val, dst);
109
break;
110
case T_BYTE: __ strb(val, dst); break;
111
case T_CHAR: __ strh(val, dst); break;
112
case T_SHORT: __ strh(val, dst); break;
113
case T_INT: __ strw(val, dst); break;
114
case T_LONG: __ str (val, dst); break;
115
case T_ADDRESS: __ str (val, dst); break;
116
case T_FLOAT: __ strs(v0, dst); break;
117
case T_DOUBLE: __ strd(v0, dst); break;
118
default: Unimplemented();
119
}
120
}
121
122
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
123
Register obj, Register tmp, Label& slowpath) {
124
// If mask changes we need to ensure that the inverse is still encodable as an immediate
125
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
126
__ andr(obj, obj, ~JNIHandles::weak_tag_mask);
127
__ ldr(obj, Address(obj, 0)); // *obj
128
}
129
130
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
131
void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
132
Register var_size_in_bytes,
133
int con_size_in_bytes,
134
Register t1,
135
Register t2,
136
Label& slow_case) {
137
assert_different_registers(obj, t2);
138
assert_different_registers(obj, var_size_in_bytes);
139
Register end = t2;
140
141
// verify_tlab();
142
143
__ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
144
if (var_size_in_bytes == noreg) {
145
__ lea(end, Address(obj, con_size_in_bytes));
146
} else {
147
__ lea(end, Address(obj, var_size_in_bytes));
148
}
149
__ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
150
__ cmp(end, rscratch1);
151
__ br(Assembler::HI, slow_case);
152
153
// update the tlab top pointer
154
__ str(end, Address(rthread, JavaThread::tlab_top_offset()));
155
156
// recover var_size_in_bytes if necessary
157
if (var_size_in_bytes == end) {
158
__ sub(var_size_in_bytes, var_size_in_bytes, obj);
159
}
160
// verify_tlab();
161
}
162
163
// Defines obj, preserves var_size_in_bytes
164
void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
165
Register var_size_in_bytes,
166
int con_size_in_bytes,
167
Register t1,
168
Label& slow_case) {
169
assert_different_registers(obj, var_size_in_bytes, t1);
170
if (!Universe::heap()->supports_inline_contig_alloc()) {
171
__ b(slow_case);
172
} else {
173
Register end = t1;
174
Register heap_end = rscratch2;
175
Label retry;
176
__ bind(retry);
177
{
178
uint64_t offset;
179
__ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
180
__ ldr(heap_end, Address(rscratch1, offset));
181
}
182
183
ExternalAddress heap_top((address) Universe::heap()->top_addr());
184
185
// Get the current top of the heap
186
{
187
uint64_t offset;
188
__ adrp(rscratch1, heap_top, offset);
189
// Use add() here after ARDP, rather than lea().
190
// lea() does not generate anything if its offset is zero.
191
// However, relocs expect to find either an ADD or a load/store
192
// insn after an ADRP. add() always generates an ADD insn, even
193
// for add(Rn, Rn, 0).
194
__ add(rscratch1, rscratch1, offset);
195
__ ldaxr(obj, rscratch1);
196
}
197
198
// Adjust it my the size of our new object
199
if (var_size_in_bytes == noreg) {
200
__ lea(end, Address(obj, con_size_in_bytes));
201
} else {
202
__ lea(end, Address(obj, var_size_in_bytes));
203
}
204
205
// if end < obj then we wrapped around high memory
206
__ cmp(end, obj);
207
__ br(Assembler::LO, slow_case);
208
209
__ cmp(end, heap_end);
210
__ br(Assembler::HI, slow_case);
211
212
// If heap_top hasn't been changed by some other thread, update it.
213
__ stlxr(rscratch2, end, rscratch1);
214
__ cbnzw(rscratch2, retry);
215
216
incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
217
}
218
}
219
220
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
221
Register var_size_in_bytes,
222
int con_size_in_bytes,
223
Register t1) {
224
assert(t1->is_valid(), "need temp reg");
225
226
__ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
227
if (var_size_in_bytes->is_valid()) {
228
__ add(t1, t1, var_size_in_bytes);
229
} else {
230
__ add(t1, t1, con_size_in_bytes);
231
}
232
__ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
233
}
234
235
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
236
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
237
238
if (bs_nm == NULL) {
239
return;
240
}
241
242
Label skip, guard;
243
Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
244
245
__ ldrw(rscratch1, guard);
246
247
// Subsequent loads of oops must occur after load of guard value.
248
// BarrierSetNMethod::disarm sets guard with release semantics.
249
__ membar(__ LoadLoad);
250
__ ldrw(rscratch2, thread_disarmed_addr);
251
__ cmpw(rscratch1, rscratch2);
252
__ br(Assembler::EQ, skip);
253
254
__ movptr(rscratch1, (uintptr_t) StubRoutines::aarch64::method_entry_barrier());
255
__ blr(rscratch1);
256
__ b(skip);
257
258
__ bind(guard);
259
260
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
261
262
__ bind(skip);
263
}
264
265
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
266
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
267
if (bs == NULL) {
268
return;
269
}
270
271
Label bad_call;
272
__ cbz(rmethod, bad_call);
273
274
// Pointer chase to the method holder to find out if the method is concurrently unloading.
275
Label method_live;
276
__ load_method_holder_cld(rscratch1, rmethod);
277
278
// Is it a strong CLD?
279
__ ldr(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
280
__ cbnz(rscratch2, method_live);
281
282
// Is it a weak but alive CLD?
283
__ stp(r10, r11, Address(__ pre(sp, -2 * wordSize)));
284
__ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));
285
286
// Uses rscratch1 & rscratch2, so we must pass new temporaries.
287
__ resolve_weak_handle(r10, r11);
288
__ mov(rscratch1, r10);
289
__ ldp(r10, r11, Address(__ post(sp, 2 * wordSize)));
290
__ cbnz(rscratch1, method_live);
291
292
__ bind(bad_call);
293
294
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
295
__ bind(method_live);
296
}
297
298
299