Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
41153 views
1
/*
2
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "gc/g1/g1BarrierSet.hpp"
28
#include "gc/g1/g1BarrierSetAssembler.hpp"
29
#include "gc/g1/g1BarrierSetRuntime.hpp"
30
#include "gc/g1/g1CardTable.hpp"
31
#include "gc/g1/g1ThreadLocalData.hpp"
32
#include "gc/g1/heapRegion.hpp"
33
#include "interpreter/interp_masm.hpp"
34
#include "runtime/sharedRuntime.hpp"
35
#include "utilities/debug.hpp"
36
#include "utilities/macros.hpp"
37
#ifdef COMPILER1
38
#include "c1/c1_LIRAssembler.hpp"
39
#include "c1/c1_MacroAssembler.hpp"
40
#include "gc/g1/c1/g1BarrierSetC1.hpp"
41
#endif
42
43
#define __ masm->
44
45
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
46
Register addr, Register count) {
47
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
48
49
if (!dest_uninitialized) {
50
Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
51
#ifndef _LP64
52
__ push(thread);
53
__ get_thread(thread);
54
#endif
55
56
Label filtered;
57
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
58
// Is marking active?
59
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
60
__ cmpl(in_progress, 0);
61
} else {
62
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
63
__ cmpb(in_progress, 0);
64
}
65
66
NOT_LP64(__ pop(thread);)
67
68
__ jcc(Assembler::equal, filtered);
69
70
__ pusha(); // push registers
71
#ifdef _LP64
72
if (count == c_rarg0) {
73
if (addr == c_rarg1) {
74
// exactly backwards!!
75
__ xchgptr(c_rarg1, c_rarg0);
76
} else {
77
__ movptr(c_rarg1, count);
78
__ movptr(c_rarg0, addr);
79
}
80
} else {
81
__ movptr(c_rarg0, addr);
82
__ movptr(c_rarg1, count);
83
}
84
if (UseCompressedOops) {
85
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
86
} else {
87
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
88
}
89
#else
90
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry),
91
addr, count);
92
#endif
93
__ popa();
94
95
__ bind(filtered);
96
}
97
}
98
99
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
100
Register addr, Register count, Register tmp) {
101
__ pusha(); // push registers (overkill)
102
#ifdef _LP64
103
if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
104
assert_different_registers(c_rarg1, addr);
105
__ mov(c_rarg1, count);
106
__ mov(c_rarg0, addr);
107
} else {
108
assert_different_registers(c_rarg0, count);
109
__ mov(c_rarg0, addr);
110
__ mov(c_rarg1, count);
111
}
112
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
113
#else
114
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry),
115
addr, count);
116
#endif
117
__ popa();
118
}
119
120
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
121
Register dst, Address src, Register tmp1, Register tmp_thread) {
122
bool on_oop = is_reference_type(type);
123
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
124
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
125
bool on_reference = on_weak || on_phantom;
126
ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
127
if (on_oop && on_reference) {
128
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
129
NOT_LP64(__ get_thread(thread));
130
131
// Generate the G1 pre-barrier code to log the value of
132
// the referent field in an SATB buffer.
133
g1_write_barrier_pre(masm /* masm */,
134
noreg /* obj */,
135
dst /* pre_val */,
136
thread /* thread */,
137
tmp1 /* tmp */,
138
true /* tosca_live */,
139
true /* expand_call */);
140
}
141
}
142
143
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
144
Register obj,
145
Register pre_val,
146
Register thread,
147
Register tmp,
148
bool tosca_live,
149
bool expand_call) {
150
// If expand_call is true then we expand the call_VM_leaf macro
151
// directly to skip generating the check by
152
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
153
154
#ifdef _LP64
155
assert(thread == r15_thread, "must be");
156
#endif // _LP64
157
158
Label done;
159
Label runtime;
160
161
assert(pre_val != noreg, "check this code");
162
163
if (obj != noreg) {
164
assert_different_registers(obj, pre_val, tmp);
165
assert(pre_val != rax, "check this code");
166
}
167
168
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
169
Address index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
170
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
171
172
// Is marking active?
173
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
174
__ cmpl(in_progress, 0);
175
} else {
176
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
177
__ cmpb(in_progress, 0);
178
}
179
__ jcc(Assembler::equal, done);
180
181
// Do we need to load the previous value?
182
if (obj != noreg) {
183
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
184
}
185
186
// Is the previous value null?
187
__ cmpptr(pre_val, (int32_t) NULL_WORD);
188
__ jcc(Assembler::equal, done);
189
190
// Can we store original value in the thread's buffer?
191
// Is index == 0?
192
// (The index field is typed as size_t.)
193
194
__ movptr(tmp, index); // tmp := *index_adr
195
__ cmpptr(tmp, 0); // tmp == 0?
196
__ jcc(Assembler::equal, runtime); // If yes, goto runtime
197
198
__ subptr(tmp, wordSize); // tmp := tmp - wordSize
199
__ movptr(index, tmp); // *index_adr := tmp
200
__ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
201
202
// Record the previous value
203
__ movptr(Address(tmp, 0), pre_val);
204
__ jmp(done);
205
206
__ bind(runtime);
207
// save the live input values
208
if(tosca_live) __ push(rax);
209
210
if (obj != noreg && obj != rax)
211
__ push(obj);
212
213
if (pre_val != rax)
214
__ push(pre_val);
215
216
// Calling the runtime using the regular call_VM_leaf mechanism generates
217
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
218
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
219
//
220
// If we care generating the pre-barrier without a frame (e.g. in the
221
// intrinsified Reference.get() routine) then ebp might be pointing to
222
// the caller frame and so this check will most likely fail at runtime.
223
//
224
// Expanding the call directly bypasses the generation of the check.
225
// So when we do not have have a full interpreter frame on the stack
226
// expand_call should be passed true.
227
228
NOT_LP64( __ push(thread); )
229
230
if (expand_call) {
231
LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
232
#ifdef _LP64
233
if (c_rarg1 != thread) {
234
__ mov(c_rarg1, thread);
235
}
236
if (c_rarg0 != pre_val) {
237
__ mov(c_rarg0, pre_val);
238
}
239
#else
240
__ push(thread);
241
__ push(pre_val);
242
#endif
243
__ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
244
} else {
245
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
246
}
247
248
NOT_LP64( __ pop(thread); )
249
250
// save the live input values
251
if (pre_val != rax)
252
__ pop(pre_val);
253
254
if (obj != noreg && obj != rax)
255
__ pop(obj);
256
257
if(tosca_live) __ pop(rax);
258
259
__ bind(done);
260
}
261
262
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
263
Register store_addr,
264
Register new_val,
265
Register thread,
266
Register tmp,
267
Register tmp2) {
268
// Generated code assumes that buffer index is pointer sized.
269
STATIC_ASSERT(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t));
270
#ifdef _LP64
271
assert(thread == r15_thread, "must be");
272
#endif // _LP64
273
274
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
275
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
276
277
CardTableBarrierSet* ct =
278
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
279
280
Label done;
281
Label runtime;
282
283
// Does store cross heap regions?
284
285
__ movptr(tmp, store_addr);
286
__ xorptr(tmp, new_val);
287
__ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
288
__ jcc(Assembler::equal, done);
289
290
// crosses regions, storing NULL?
291
292
__ cmpptr(new_val, (int32_t) NULL_WORD);
293
__ jcc(Assembler::equal, done);
294
295
// storing region crossing non-NULL, is card already dirty?
296
297
const Register card_addr = tmp;
298
const Register cardtable = tmp2;
299
300
__ movptr(card_addr, store_addr);
301
__ shrptr(card_addr, CardTable::card_shift);
302
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
303
// a valid address and therefore is not properly handled by the relocation code.
304
__ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base());
305
__ addptr(card_addr, cardtable);
306
307
__ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
308
__ jcc(Assembler::equal, done);
309
310
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
311
__ cmpb(Address(card_addr, 0), (int)G1CardTable::dirty_card_val());
312
__ jcc(Assembler::equal, done);
313
314
315
// storing a region crossing, non-NULL oop, card is clean.
316
// dirty card and log.
317
318
__ movb(Address(card_addr, 0), (int)G1CardTable::dirty_card_val());
319
320
__ movptr(tmp2, queue_index);
321
__ testptr(tmp2, tmp2);
322
__ jcc(Assembler::zero, runtime);
323
__ subptr(tmp2, wordSize);
324
__ movptr(queue_index, tmp2);
325
__ addptr(tmp2, buffer);
326
__ movptr(Address(tmp2, 0), card_addr);
327
__ jmp(done);
328
329
__ bind(runtime);
330
// save the live input values
331
__ push(store_addr);
332
#ifdef _LP64
333
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, r15_thread);
334
#else
335
__ push(thread);
336
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
337
__ pop(thread);
338
#endif
339
__ pop(store_addr);
340
341
__ bind(done);
342
}
343
344
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
345
Address dst, Register val, Register tmp1, Register tmp2) {
346
bool in_heap = (decorators & IN_HEAP) != 0;
347
bool as_normal = (decorators & AS_NORMAL) != 0;
348
assert((decorators & IS_DEST_UNINITIALIZED) == 0, "unsupported");
349
350
bool needs_pre_barrier = as_normal;
351
bool needs_post_barrier = val != noreg && in_heap;
352
353
Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
354
Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
355
// flatten object address if needed
356
// We do it regardless of precise because we need the registers
357
if (dst.index() == noreg && dst.disp() == 0) {
358
if (dst.base() != tmp1) {
359
__ movptr(tmp1, dst.base());
360
}
361
} else {
362
__ lea(tmp1, dst);
363
}
364
365
#ifndef _LP64
366
InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm);
367
#endif
368
369
NOT_LP64(__ get_thread(rcx));
370
NOT_LP64(imasm->save_bcp());
371
372
if (needs_pre_barrier) {
373
g1_write_barrier_pre(masm /*masm*/,
374
tmp1 /* obj */,
375
tmp2 /* pre_val */,
376
rthread /* thread */,
377
tmp3 /* tmp */,
378
val != noreg /* tosca_live */,
379
false /* expand_call */);
380
}
381
if (val == noreg) {
382
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg);
383
} else {
384
Register new_val = val;
385
if (needs_post_barrier) {
386
// G1 barrier needs uncompressed oop for region cross check.
387
if (UseCompressedOops) {
388
new_val = tmp2;
389
__ movptr(new_val, val);
390
}
391
}
392
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg);
393
if (needs_post_barrier) {
394
g1_write_barrier_post(masm /*masm*/,
395
tmp1 /* store_adr */,
396
new_val /* new_val */,
397
rthread /* thread */,
398
tmp3 /* tmp */,
399
tmp2 /* tmp2 */);
400
}
401
}
402
NOT_LP64(imasm->restore_bcp());
403
}
404
405
#ifdef COMPILER1
406
407
#undef __
408
#define __ ce->masm()->
409
410
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
411
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
412
// At this point we know that marking is in progress.
413
// If do_load() is true then we have to emit the
414
// load of the previous value; otherwise it has already
415
// been loaded into _pre_val.
416
417
__ bind(*stub->entry());
418
assert(stub->pre_val()->is_register(), "Precondition.");
419
420
Register pre_val_reg = stub->pre_val()->as_register();
421
422
if (stub->do_load()) {
423
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
424
}
425
426
__ cmpptr(pre_val_reg, (int32_t)NULL_WORD);
427
__ jcc(Assembler::equal, *stub->continuation());
428
ce->store_parameter(stub->pre_val()->as_register(), 0);
429
__ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
430
__ jmp(*stub->continuation());
431
432
}
433
434
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
435
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
436
__ bind(*stub->entry());
437
assert(stub->addr()->is_register(), "Precondition.");
438
assert(stub->new_val()->is_register(), "Precondition.");
439
Register new_val_reg = stub->new_val()->as_register();
440
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
441
__ jcc(Assembler::equal, *stub->continuation());
442
ce->store_parameter(stub->addr()->as_pointer_register(), 0);
443
__ call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
444
__ jmp(*stub->continuation());
445
}
446
447
#undef __
448
449
#define __ sasm->
450
451
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
452
// Generated code assumes that buffer index is pointer sized.
453
STATIC_ASSERT(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t));
454
455
__ prologue("g1_pre_barrier", false);
456
// arg0 : previous value of memory
457
458
__ push(rax);
459
__ push(rdx);
460
461
const Register pre_val = rax;
462
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
463
const Register tmp = rdx;
464
465
NOT_LP64(__ get_thread(thread);)
466
467
Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
468
Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
469
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
470
471
Label done;
472
Label runtime;
473
474
// Is marking still active?
475
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
476
__ cmpl(queue_active, 0);
477
} else {
478
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
479
__ cmpb(queue_active, 0);
480
}
481
__ jcc(Assembler::equal, done);
482
483
// Can we store original value in the thread's buffer?
484
485
__ movptr(tmp, queue_index);
486
__ testptr(tmp, tmp);
487
__ jcc(Assembler::zero, runtime);
488
__ subptr(tmp, wordSize);
489
__ movptr(queue_index, tmp);
490
__ addptr(tmp, buffer);
491
492
// prev_val (rax)
493
__ load_parameter(0, pre_val);
494
__ movptr(Address(tmp, 0), pre_val);
495
__ jmp(done);
496
497
__ bind(runtime);
498
499
__ save_live_registers_no_oop_map(true);
500
501
// load the pre-value
502
__ load_parameter(0, rcx);
503
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), rcx, thread);
504
505
__ restore_live_registers(true);
506
507
__ bind(done);
508
509
__ pop(rdx);
510
__ pop(rax);
511
512
__ epilogue();
513
}
514
515
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
516
__ prologue("g1_post_barrier", false);
517
518
// arg0: store_address
519
Address store_addr(rbp, 2*BytesPerWord);
520
521
CardTableBarrierSet* ct =
522
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
523
524
Label done;
525
Label enqueued;
526
Label runtime;
527
528
// At this point we know new_value is non-NULL and the new_value crosses regions.
529
// Must check to see if card is already dirty
530
531
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
532
533
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
534
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
535
536
__ push(rax);
537
__ push(rcx);
538
539
const Register cardtable = rax;
540
const Register card_addr = rcx;
541
542
__ load_parameter(0, card_addr);
543
__ shrptr(card_addr, CardTable::card_shift);
544
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
545
// a valid address and therefore is not properly handled by the relocation code.
546
__ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base());
547
__ addptr(card_addr, cardtable);
548
549
NOT_LP64(__ get_thread(thread);)
550
551
__ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
552
__ jcc(Assembler::equal, done);
553
554
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
555
__ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
556
__ jcc(Assembler::equal, done);
557
558
// storing region crossing non-NULL, card is clean.
559
// dirty card and log.
560
561
__ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
562
563
const Register tmp = rdx;
564
__ push(rdx);
565
566
__ movptr(tmp, queue_index);
567
__ testptr(tmp, tmp);
568
__ jcc(Assembler::zero, runtime);
569
__ subptr(tmp, wordSize);
570
__ movptr(queue_index, tmp);
571
__ addptr(tmp, buffer);
572
__ movptr(Address(tmp, 0), card_addr);
573
__ jmp(enqueued);
574
575
__ bind(runtime);
576
577
__ save_live_registers_no_oop_map(true);
578
579
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
580
581
__ restore_live_registers(true);
582
583
__ bind(enqueued);
584
__ pop(rdx);
585
586
__ bind(done);
587
__ pop(rcx);
588
__ pop(rax);
589
590
__ epilogue();
591
}
592
593
#undef __
594
595
#endif // COMPILER1
596
597