Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
41155 views
1
/*
2
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2018, 2019 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "registerSaver_s390.hpp"
29
#include "gc/g1/g1CardTable.hpp"
30
#include "gc/g1/g1BarrierSet.hpp"
31
#include "gc/g1/g1BarrierSetAssembler.hpp"
32
#include "gc/g1/g1BarrierSetRuntime.hpp"
33
#include "gc/g1/g1DirtyCardQueue.hpp"
34
#include "gc/g1/g1SATBMarkQueueSet.hpp"
35
#include "gc/g1/g1ThreadLocalData.hpp"
36
#include "gc/g1/heapRegion.hpp"
37
#include "interpreter/interp_masm.hpp"
38
#include "runtime/jniHandles.hpp"
39
#include "runtime/sharedRuntime.hpp"
40
#ifdef COMPILER1
41
#include "c1/c1_LIRAssembler.hpp"
42
#include "c1/c1_MacroAssembler.hpp"
43
#include "gc/g1/c1/g1BarrierSetC1.hpp"
44
#endif
45
46
#define __ masm->
47
48
#define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
49
50
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
51
Register addr, Register count) {
52
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
53
54
// With G1, don't generate the call if we statically know that the target is uninitialized.
55
if (!dest_uninitialized) {
56
// Is marking active?
57
Label filtered;
58
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
59
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
60
Register Rtmp1 = Z_R0_scratch;
61
const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
62
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
63
__ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
64
} else {
65
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
66
__ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
67
}
68
__ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
69
70
RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
71
72
if (UseCompressedOops) {
73
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), addr, count);
74
} else {
75
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), addr, count);
76
}
77
78
RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
79
80
__ bind(filtered);
81
}
82
}
83
84
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
85
Register addr, Register count, bool do_return) {
86
address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry);
87
if (!do_return) {
88
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
89
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
90
RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
91
__ call_VM_leaf(entry_point, addr, count);
92
RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
93
} else {
94
// Tail call: call c and return to stub caller.
95
__ lgr_if_needed(Z_ARG1, addr);
96
__ lgr_if_needed(Z_ARG2, count);
97
__ load_const(Z_R1, entry_point);
98
__ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
99
}
100
}
101
102
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
103
const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
104
bool on_oop = is_reference_type(type);
105
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
106
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
107
bool on_reference = on_weak || on_phantom;
108
Label done;
109
if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }
110
ModRefBarrierSetAssembler::load_at(masm, decorators, type, src, dst, tmp1, tmp2, L_handle_null);
111
if (on_oop && on_reference) {
112
// Generate the G1 pre-barrier code to log the value of
113
// the referent field in an SATB buffer.
114
g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
115
NULL /* obj */,
116
dst /* pre_val */,
117
noreg/* preserve */ ,
118
tmp1, tmp2 /* tmp */,
119
true /* pre_val_needed */);
120
}
121
__ bind(done);
122
}
123
124
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
125
const Address* obj,
126
Register Rpre_val, // Ideally, this is a non-volatile register.
127
Register Rval, // Will be preserved.
128
Register Rtmp1, // If Rpre_val is volatile, either Rtmp1
129
Register Rtmp2, // or Rtmp2 has to be non-volatile.
130
bool pre_val_needed // Save Rpre_val across runtime call, caller uses it.
131
) {
132
133
bool not_null = (decorators & IS_NOT_NULL) != 0,
134
preloaded = obj == NULL;
135
136
const Register Robj = obj ? obj->base() : noreg,
137
Roff = obj ? obj->index() : noreg;
138
const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
139
const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
140
const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
141
assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
142
assert_different_registers(Robj, Z_R0_scratch); // Used for addressing. Furthermore, push_frame destroys Z_R0!!
143
assert_different_registers(Rval, Z_R0_scratch); // push_frame destroys Z_R0!!
144
145
Label callRuntime, filtered;
146
147
BLOCK_COMMENT("g1_write_barrier_pre {");
148
149
// Is marking active?
150
// Note: value is loaded for test purposes only. No further use here.
151
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
152
__ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
153
} else {
154
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
155
__ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
156
}
157
__ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
158
159
assert(Rpre_val != noreg, "must have a real register");
160
161
162
// If an object is given, we need to load the previous value into Rpre_val.
163
if (obj) {
164
// Load the previous value...
165
if (UseCompressedOops) {
166
__ z_llgf(Rpre_val, *obj);
167
} else {
168
__ z_lg(Rpre_val, *obj);
169
}
170
}
171
172
// Is the previous value NULL?
173
// If so, we don't need to record it and we're done.
174
// Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
175
// Register contents is preserved across runtime call if caller requests to do so.
176
if (preloaded && not_null) {
177
#ifdef ASSERT
178
__ z_ltgr(Rpre_val, Rpre_val);
179
__ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller.
180
#endif
181
} else {
182
__ z_ltgr(Rpre_val, Rpre_val);
183
__ z_bre(filtered); // previous value is NULL, so we don't need to record it.
184
}
185
186
// Decode the oop now. We know it's not NULL.
187
if (Robj != noreg && UseCompressedOops) {
188
__ oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
189
}
190
191
// OK, it's not filtered, so we'll need to call enqueue.
192
193
// We can store the original value in the thread's buffer
194
// only if index > 0. Otherwise, we need runtime to handle.
195
// (The index field is typed as size_t.)
196
Register Rbuffer = Rtmp1, Rindex = Rtmp2;
197
assert_different_registers(Rbuffer, Rindex, Rpre_val);
198
199
__ z_lg(Rbuffer, buffer_offset, Z_thread);
200
201
__ load_and_test_long(Rindex, Address(Z_thread, index_offset));
202
__ z_bre(callRuntime); // If index == 0, goto runtime.
203
204
__ add2reg(Rindex, -wordSize); // Decrement index.
205
__ z_stg(Rindex, index_offset, Z_thread);
206
207
// Record the previous value.
208
__ z_stg(Rpre_val, 0, Rbuffer, Rindex);
209
__ z_bru(filtered); // We are done.
210
211
Rbuffer = noreg; // end of life
212
Rindex = noreg; // end of life
213
214
__ bind(callRuntime);
215
216
// Save some registers (inputs and result) over runtime call
217
// by spilling them into the top frame.
218
if (Robj != noreg && Robj->is_volatile()) {
219
__ z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
220
}
221
if (Roff != noreg && Roff->is_volatile()) {
222
__ z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
223
}
224
if (Rval != noreg && Rval->is_volatile()) {
225
__ z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
226
}
227
228
// Save Rpre_val (result) over runtime call.
229
Register Rpre_save = Rpre_val;
230
if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
231
guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
232
Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
233
}
234
__ lgr_if_needed(Rpre_save, Rpre_val);
235
236
// Push frame to protect top frame with return pc and spilled register values.
237
__ save_return_pc();
238
__ push_frame_abi160(0); // Will use Z_R0 as tmp.
239
240
// Rpre_val may be destroyed by push_frame().
241
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), Rpre_save, Z_thread);
242
243
__ pop_frame();
244
__ restore_return_pc();
245
246
// Restore spilled values.
247
if (Robj != noreg && Robj->is_volatile()) {
248
__ z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
249
}
250
if (Roff != noreg && Roff->is_volatile()) {
251
__ z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
252
}
253
if (Rval != noreg && Rval->is_volatile()) {
254
__ z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
255
}
256
if (pre_val_needed && Rpre_val->is_volatile()) {
257
__ lgr_if_needed(Rpre_val, Rpre_save);
258
}
259
260
__ bind(filtered);
261
BLOCK_COMMENT("} g1_write_barrier_pre");
262
}
263
264
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register Rstore_addr, Register Rnew_val,
265
Register Rtmp1, Register Rtmp2, Register Rtmp3) {
266
bool not_null = (decorators & IS_NOT_NULL) != 0;
267
268
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
269
270
Label callRuntime, filtered;
271
272
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
273
274
BLOCK_COMMENT("g1_write_barrier_post {");
275
276
// Does store cross heap regions?
277
// It does if the two addresses specify different grain addresses.
278
if (VM_Version::has_DistinctOpnds()) {
279
__ z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
280
} else {
281
__ z_lgr(Rtmp1, Rstore_addr);
282
__ z_xgr(Rtmp1, Rnew_val);
283
}
284
__ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
285
__ z_bre(filtered);
286
287
// Crosses regions, storing NULL?
288
if (not_null) {
289
#ifdef ASSERT
290
__ z_ltgr(Rnew_val, Rnew_val);
291
__ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller.
292
#endif
293
} else {
294
__ z_ltgr(Rnew_val, Rnew_val);
295
__ z_bre(filtered);
296
}
297
298
Rnew_val = noreg; // end of lifetime
299
300
// Storing region crossing non-NULL, is card already dirty?
301
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
302
// Make sure not to use Z_R0 for any of these registers.
303
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
304
Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
305
306
// calculate address of card
307
__ load_const_optimized(Rbase, (address)ct->card_table()->byte_map_base()); // Card table base.
308
__ z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift); // Index into card table.
309
__ z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
310
Rbase = noreg; // end of lifetime
311
312
// Filter young.
313
__ z_cli(0, Rcard_addr, G1CardTable::g1_young_card_val());
314
__ z_bre(filtered);
315
316
// Check the card value. If dirty, we're done.
317
// This also avoids false sharing of the (already dirty) card.
318
__ z_sync(); // Required to support concurrent cleaning.
319
__ z_cli(0, Rcard_addr, G1CardTable::dirty_card_val()); // Reload after membar.
320
__ z_bre(filtered);
321
322
// Storing a region crossing, non-NULL oop, card is clean.
323
// Dirty card and log.
324
__ z_mvi(0, Rcard_addr, G1CardTable::dirty_card_val());
325
326
Register Rcard_addr_x = Rcard_addr;
327
Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
328
Register Rqueue_buf = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
329
const int qidx_off = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
330
const int qbuf_off = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
331
if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
332
Rcard_addr_x = Z_R0_scratch; // Register shortage. We have to use Z_R0.
333
}
334
__ lgr_if_needed(Rcard_addr_x, Rcard_addr);
335
336
__ load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
337
__ z_bre(callRuntime); // Index == 0 then jump to runtime.
338
339
__ z_lg(Rqueue_buf, qbuf_off, Z_thread);
340
341
__ add2reg(Rqueue_index, -wordSize); // Decrement index.
342
__ z_stg(Rqueue_index, qidx_off, Z_thread);
343
344
__ z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
345
__ z_bru(filtered);
346
347
__ bind(callRuntime);
348
349
// TODO: do we need a frame? Introduced to be on the safe side.
350
bool needs_frame = true;
351
__ lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
352
353
// VM call need frame to access(write) O register.
354
if (needs_frame) {
355
__ save_return_pc();
356
__ push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
357
}
358
359
// Save the live input values.
360
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, Z_thread);
361
362
if (needs_frame) {
363
__ pop_frame();
364
__ restore_return_pc();
365
}
366
367
__ bind(filtered);
368
369
BLOCK_COMMENT("} g1_write_barrier_post");
370
}
371
372
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
373
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
374
bool is_array = (decorators & IS_ARRAY) != 0;
375
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
376
bool precise = is_array || on_anonymous;
377
// Load and record the previous value.
378
g1_write_barrier_pre(masm, decorators, &dst, tmp3, val, tmp1, tmp2, false);
379
380
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
381
382
// No need for post barrier if storing NULL
383
if (val != noreg) {
384
const Register base = dst.base(),
385
idx = dst.index();
386
const intptr_t disp = dst.disp();
387
if (precise && (disp != 0 || idx != noreg)) {
388
__ add2reg_with_index(base, disp, idx, base);
389
}
390
g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
391
}
392
}
393
394
void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
395
NearLabel Ldone, Lnot_weak;
396
__ z_ltgr(tmp1, value);
397
__ z_bre(Ldone); // Use NULL result as-is.
398
399
__ z_nill(value, ~JNIHandles::weak_tag_mask);
400
__ z_lg(value, 0, value); // Resolve (untagged) jobject.
401
402
__ z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
403
__ z_braz(Lnot_weak);
404
__ verify_oop(value, FILE_AND_LINE);
405
DecoratorSet decorators = IN_NATIVE | ON_PHANTOM_OOP_REF;
406
g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
407
__ bind(Lnot_weak);
408
__ verify_oop(value, FILE_AND_LINE);
409
__ bind(Ldone);
410
}
411
412
#ifdef COMPILER1
413
414
#undef __
415
#define __ ce->masm()->
416
417
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
418
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
419
// At this point we know that marking is in progress.
420
// If do_load() is true then we have to emit the
421
// load of the previous value; otherwise it has already
422
// been loaded into _pre_val.
423
__ bind(*stub->entry());
424
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
425
assert(stub->pre_val()->is_register(), "Precondition.");
426
427
Register pre_val_reg = stub->pre_val()->as_register();
428
429
if (stub->do_load()) {
430
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
431
}
432
433
__ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
434
__ branch_optimized(Assembler::bcondZero, *stub->continuation());
435
ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
436
__ branch_optimized(Assembler::bcondAlways, *stub->continuation());
437
}
438
439
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
440
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
441
__ bind(*stub->entry());
442
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
443
assert(stub->addr()->is_register(), "Precondition.");
444
assert(stub->new_val()->is_register(), "Precondition.");
445
Register new_val_reg = stub->new_val()->as_register();
446
__ z_ltgr(new_val_reg, new_val_reg);
447
__ branch_optimized(Assembler::bcondZero, *stub->continuation());
448
__ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
449
ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
450
__ branch_optimized(Assembler::bcondAlways, *stub->continuation());
451
}
452
453
#undef __
454
455
#define __ sasm->
456
457
static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
458
__ block_comment("save_volatile_registers");
459
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
460
int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
461
sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
462
return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
463
}
464
465
static void restore_volatile_registers(StubAssembler* sasm) {
466
__ block_comment("restore_volatile_registers");
467
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
468
RegisterSaver::restore_live_registers(sasm, reg_set);
469
}
470
471
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
472
// Z_R1_scratch: previous value of memory
473
474
BarrierSet* bs = BarrierSet::barrier_set();
475
__ set_info("g1_pre_barrier_slow_id", false);
476
477
Register pre_val = Z_R1_scratch;
478
Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
479
Register tmp2 = Z_R7;
480
481
Label refill, restart, marking_not_active;
482
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
483
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
484
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
485
486
// Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
487
__ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
488
__ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
489
490
// Is marking still active?
491
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
492
__ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
493
} else {
494
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
495
__ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
496
}
497
__ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
498
499
__ bind(restart);
500
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
501
// size_t so ld_ptr is appropriate.
502
__ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
503
504
// index == 0?
505
__ z_brz(refill);
506
507
__ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
508
__ add2reg(tmp, -oopSize);
509
510
__ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
511
__ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
512
513
__ bind(marking_not_active);
514
// Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
515
__ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
516
__ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
517
__ z_br(Z_R14);
518
519
__ bind(refill);
520
save_volatile_registers(sasm);
521
__ z_lgr(tmp, pre_val); // save pre_val
522
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread),
523
Z_thread);
524
__ z_lgr(pre_val, tmp); // restore pre_val
525
restore_volatile_registers(sasm);
526
__ z_bru(restart);
527
}
528
529
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
530
// Z_R1_scratch: oop address, address of updated memory slot
531
532
BarrierSet* bs = BarrierSet::barrier_set();
533
__ set_info("g1_post_barrier_slow_id", false);
534
535
Register addr_oop = Z_R1_scratch;
536
Register addr_card = Z_R1_scratch;
537
Register r1 = Z_R6; // Must be saved/restored.
538
Register r2 = Z_R7; // Must be saved/restored.
539
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
540
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
541
CardTable* ct = ctbs->card_table();
542
CardTable::CardValue* byte_map_base = ct->byte_map_base();
543
544
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
545
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
546
547
Label not_already_dirty, restart, refill, young_card;
548
549
// Calculate address of card corresponding to the updated oop slot.
550
AddressLiteral rs(byte_map_base);
551
__ z_srlg(addr_card, addr_oop, CardTable::card_shift);
552
addr_oop = noreg; // dead now
553
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
554
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
555
556
__ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
557
__ z_bre(young_card);
558
559
__ z_sync(); // Required to support concurrent cleaning.
560
561
__ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
562
__ z_brne(not_already_dirty);
563
564
__ bind(young_card);
565
// We didn't take the branch, so we're already dirty: restore
566
// used registers and return.
567
__ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
568
__ z_br(Z_R14);
569
570
// Not dirty.
571
__ bind(not_already_dirty);
572
573
// First, dirty it: [addr_card] := 0
574
__ z_mvi(0, addr_card, CardTable::dirty_card_val());
575
576
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
577
Register buf = r2;
578
cardtable = noreg; // now dead
579
580
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
581
__ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
582
583
ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
584
ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
585
586
__ bind(restart);
587
588
// Get the index into the update buffer. G1DirtyCardQueue::_index is
589
// a size_t so z_ltg is appropriate here.
590
__ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
591
592
// index == 0?
593
__ z_brz(refill);
594
595
__ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
596
__ add2reg(idx, -oopSize);
597
598
__ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
599
__ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
600
// Restore killed registers and return.
601
__ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
602
__ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
603
__ z_br(Z_R14);
604
605
__ bind(refill);
606
save_volatile_registers(sasm);
607
__ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
608
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread),
609
Z_thread);
610
__ z_lgr(addr_card, idx);
611
restore_volatile_registers(sasm); // Restore addr_card.
612
__ z_bru(restart);
613
}
614
615
#undef __
616
617
#endif // COMPILER1
618
619