Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
41153 views
1
/*
2
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "asm/macroAssembler.inline.hpp"
26
#include "code/codeBlob.hpp"
27
#include "code/vmreg.inline.hpp"
28
#include "gc/z/zBarrier.inline.hpp"
29
#include "gc/z/zBarrierSet.hpp"
30
#include "gc/z/zBarrierSetAssembler.hpp"
31
#include "gc/z/zBarrierSetRuntime.hpp"
32
#include "memory/resourceArea.hpp"
33
#include "runtime/sharedRuntime.hpp"
34
#include "utilities/macros.hpp"
35
#ifdef COMPILER1
36
#include "c1/c1_LIRAssembler.hpp"
37
#include "c1/c1_MacroAssembler.hpp"
38
#include "gc/z/c1/zBarrierSetC1.hpp"
39
#endif // COMPILER1
40
#ifdef COMPILER2
41
#include "gc/z/c2/zBarrierSetC2.hpp"
42
#endif // COMPILER2
43
44
#ifdef PRODUCT
45
#define BLOCK_COMMENT(str) /* nothing */
46
#else
47
#define BLOCK_COMMENT(str) __ block_comment(str)
48
#endif
49
50
#undef __
51
#define __ masm->
52
53
static void call_vm(MacroAssembler* masm,
54
address entry_point,
55
Register arg0,
56
Register arg1) {
57
// Setup arguments
58
if (arg1 == c_rarg0) {
59
if (arg0 == c_rarg1) {
60
__ xchgptr(c_rarg1, c_rarg0);
61
} else {
62
__ movptr(c_rarg1, arg1);
63
__ movptr(c_rarg0, arg0);
64
}
65
} else {
66
if (arg0 != c_rarg0) {
67
__ movptr(c_rarg0, arg0);
68
}
69
if (arg1 != c_rarg1) {
70
__ movptr(c_rarg1, arg1);
71
}
72
}
73
74
// Call VM
75
__ MacroAssembler::call_VM_leaf_base(entry_point, 2);
76
}
77
78
void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
79
DecoratorSet decorators,
80
BasicType type,
81
Register dst,
82
Address src,
83
Register tmp1,
84
Register tmp_thread) {
85
if (!ZBarrierSet::barrier_needed(decorators, type)) {
86
// Barrier not needed
87
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
88
return;
89
}
90
91
BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
92
93
// Allocate scratch register
94
Register scratch = tmp1;
95
if (tmp1 == noreg) {
96
scratch = r12;
97
__ push(scratch);
98
}
99
100
assert_different_registers(dst, scratch);
101
102
Label done;
103
104
//
105
// Fast Path
106
//
107
108
// Load address
109
__ lea(scratch, src);
110
111
// Load oop at address
112
__ movptr(dst, Address(scratch, 0));
113
114
// Test address bad mask
115
__ testptr(dst, address_bad_mask_from_thread(r15_thread));
116
__ jcc(Assembler::zero, done);
117
118
//
119
// Slow path
120
//
121
122
// Save registers
123
__ push(rax);
124
__ push(rcx);
125
__ push(rdx);
126
__ push(rdi);
127
__ push(rsi);
128
__ push(r8);
129
__ push(r9);
130
__ push(r10);
131
__ push(r11);
132
133
// We may end up here from generate_native_wrapper, then the method may have
134
// floats as arguments, and we must spill them before calling the VM runtime
135
// leaf. From the interpreter all floats are passed on the stack.
136
assert(Argument::n_float_register_parameters_j == 8, "Assumption");
137
const int xmm_size = wordSize * 2;
138
const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
139
__ subptr(rsp, xmm_spill_size);
140
__ movdqu(Address(rsp, xmm_size * 7), xmm7);
141
__ movdqu(Address(rsp, xmm_size * 6), xmm6);
142
__ movdqu(Address(rsp, xmm_size * 5), xmm5);
143
__ movdqu(Address(rsp, xmm_size * 4), xmm4);
144
__ movdqu(Address(rsp, xmm_size * 3), xmm3);
145
__ movdqu(Address(rsp, xmm_size * 2), xmm2);
146
__ movdqu(Address(rsp, xmm_size * 1), xmm1);
147
__ movdqu(Address(rsp, xmm_size * 0), xmm0);
148
149
// Call VM
150
call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
151
152
__ movdqu(xmm0, Address(rsp, xmm_size * 0));
153
__ movdqu(xmm1, Address(rsp, xmm_size * 1));
154
__ movdqu(xmm2, Address(rsp, xmm_size * 2));
155
__ movdqu(xmm3, Address(rsp, xmm_size * 3));
156
__ movdqu(xmm4, Address(rsp, xmm_size * 4));
157
__ movdqu(xmm5, Address(rsp, xmm_size * 5));
158
__ movdqu(xmm6, Address(rsp, xmm_size * 6));
159
__ movdqu(xmm7, Address(rsp, xmm_size * 7));
160
__ addptr(rsp, xmm_spill_size);
161
162
__ pop(r11);
163
__ pop(r10);
164
__ pop(r9);
165
__ pop(r8);
166
__ pop(rsi);
167
__ pop(rdi);
168
__ pop(rdx);
169
__ pop(rcx);
170
171
if (dst == rax) {
172
__ addptr(rsp, wordSize);
173
} else {
174
__ movptr(dst, rax);
175
__ pop(rax);
176
}
177
178
__ bind(done);
179
180
// Restore scratch register
181
if (tmp1 == noreg) {
182
__ pop(scratch);
183
}
184
185
BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
186
}
187
188
#ifdef ASSERT
189
190
void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
191
DecoratorSet decorators,
192
BasicType type,
193
Address dst,
194
Register src,
195
Register tmp1,
196
Register tmp2) {
197
BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
198
199
// Verify oop store
200
if (is_reference_type(type)) {
201
// Note that src could be noreg, which means we
202
// are storing null and can skip verification.
203
if (src != noreg) {
204
Label done;
205
__ testptr(src, address_bad_mask_from_thread(r15_thread));
206
__ jcc(Assembler::zero, done);
207
__ stop("Verify oop store failed");
208
__ should_not_reach_here();
209
__ bind(done);
210
}
211
}
212
213
// Store value
214
BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
215
216
BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
217
}
218
219
#endif // ASSERT
220
221
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
222
DecoratorSet decorators,
223
BasicType type,
224
Register src,
225
Register dst,
226
Register count) {
227
if (!ZBarrierSet::barrier_needed(decorators, type)) {
228
// Barrier not needed
229
return;
230
}
231
232
BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
233
234
// Save registers
235
__ pusha();
236
237
// Call VM
238
call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
239
240
// Restore registers
241
__ popa();
242
243
BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
244
}
245
246
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
247
Register jni_env,
248
Register obj,
249
Register tmp,
250
Label& slowpath) {
251
BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
252
253
// Resolve jobject
254
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
255
256
// Test address bad mask
257
__ testptr(obj, address_bad_mask_from_jni_env(jni_env));
258
__ jcc(Assembler::notZero, slowpath);
259
260
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
261
}
262
263
#ifdef COMPILER1
264
265
#undef __
266
#define __ ce->masm()->
267
268
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
269
LIR_Opr ref) const {
270
__ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
271
}
272
273
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
274
ZLoadBarrierStubC1* stub) const {
275
// Stub entry
276
__ bind(*stub->entry());
277
278
Register ref = stub->ref()->as_register();
279
Register ref_addr = noreg;
280
Register tmp = noreg;
281
282
if (stub->tmp()->is_valid()) {
283
// Load address into tmp register
284
ce->leal(stub->ref_addr(), stub->tmp());
285
ref_addr = tmp = stub->tmp()->as_pointer_register();
286
} else {
287
// Address already in register
288
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
289
}
290
291
assert_different_registers(ref, ref_addr, noreg);
292
293
// Save rax unless it is the result or tmp register
294
if (ref != rax && tmp != rax) {
295
__ push(rax);
296
}
297
298
// Setup arguments and call runtime stub
299
__ subptr(rsp, 2 * BytesPerWord);
300
ce->store_parameter(ref_addr, 1);
301
ce->store_parameter(ref, 0);
302
__ call(RuntimeAddress(stub->runtime_stub()));
303
__ addptr(rsp, 2 * BytesPerWord);
304
305
// Verify result
306
__ verify_oop(rax);
307
308
// Move result into place
309
if (ref != rax) {
310
__ movptr(ref, rax);
311
}
312
313
// Restore rax unless it is the result or tmp register
314
if (ref != rax && tmp != rax) {
315
__ pop(rax);
316
}
317
318
// Stub exit
319
__ jmp(*stub->continuation());
320
}
321
322
#undef __
323
#define __ sasm->
324
325
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
326
DecoratorSet decorators) const {
327
// Enter and save registers
328
__ enter();
329
__ save_live_registers_no_oop_map(true /* save_fpu_registers */);
330
331
// Setup arguments
332
__ load_parameter(1, c_rarg1);
333
__ load_parameter(0, c_rarg0);
334
335
// Call VM
336
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
337
338
// Restore registers and return
339
__ restore_live_registers_except_rax(true /* restore_fpu_registers */);
340
__ leave();
341
__ ret(0);
342
}
343
344
#endif // COMPILER1
345
346
#ifdef COMPILER2
347
348
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
349
if (!OptoReg::is_reg(opto_reg)) {
350
return OptoReg::Bad;
351
}
352
353
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
354
if (vm_reg->is_XMMRegister()) {
355
opto_reg &= ~15;
356
switch (node->ideal_reg()) {
357
case Op_VecX:
358
opto_reg |= 2;
359
break;
360
case Op_VecY:
361
opto_reg |= 4;
362
break;
363
case Op_VecZ:
364
opto_reg |= 8;
365
break;
366
default:
367
opto_reg |= 1;
368
break;
369
}
370
}
371
372
return opto_reg;
373
}
374
375
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
376
extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
377
int stack_offset, int reg, uint ireg, outputStream* st);
378
379
#undef __
380
#define __ _masm->
381
382
class ZSaveLiveRegisters {
383
private:
384
struct XMMRegisterData {
385
XMMRegister _reg;
386
int _size;
387
388
// Used by GrowableArray::find()
389
bool operator == (const XMMRegisterData& other) {
390
return _reg == other._reg;
391
}
392
};
393
394
MacroAssembler* const _masm;
395
GrowableArray<Register> _gp_registers;
396
GrowableArray<KRegister> _opmask_registers;
397
GrowableArray<XMMRegisterData> _xmm_registers;
398
int _spill_size;
399
int _spill_offset;
400
401
static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
402
if (left->_size == right->_size) {
403
return 0;
404
}
405
406
return (left->_size < right->_size) ? -1 : 1;
407
}
408
409
static int xmm_slot_size(OptoReg::Name opto_reg) {
410
// The low order 4 bytes denote what size of the XMM register is live
411
return (opto_reg & 15) << 3;
412
}
413
414
static uint xmm_ideal_reg_for_size(int reg_size) {
415
switch (reg_size) {
416
case 8:
417
return Op_VecD;
418
case 16:
419
return Op_VecX;
420
case 32:
421
return Op_VecY;
422
case 64:
423
return Op_VecZ;
424
default:
425
fatal("Invalid register size %d", reg_size);
426
return 0;
427
}
428
}
429
430
bool xmm_needs_vzeroupper() const {
431
return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
432
}
433
434
void xmm_register_save(const XMMRegisterData& reg_data) {
435
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
436
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
437
_spill_offset -= reg_data._size;
438
vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
439
}
440
441
void xmm_register_restore(const XMMRegisterData& reg_data) {
442
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
443
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
444
vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
445
_spill_offset += reg_data._size;
446
}
447
448
void gp_register_save(Register reg) {
449
_spill_offset -= 8;
450
__ movq(Address(rsp, _spill_offset), reg);
451
}
452
453
void opmask_register_save(KRegister reg) {
454
_spill_offset -= 8;
455
__ kmovql(Address(rsp, _spill_offset), reg);
456
}
457
458
void gp_register_restore(Register reg) {
459
__ movq(reg, Address(rsp, _spill_offset));
460
_spill_offset += 8;
461
}
462
463
void opmask_register_restore(KRegister reg) {
464
__ kmovql(reg, Address(rsp, _spill_offset));
465
_spill_offset += 8;
466
}
467
468
void initialize(ZLoadBarrierStubC2* stub) {
469
// Create mask of caller saved registers that need to
470
// be saved/restored if live
471
RegMask caller_saved;
472
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
473
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
474
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
475
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
476
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
477
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
478
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
479
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
480
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
481
caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
482
483
// Create mask of live registers
484
RegMask live = stub->live();
485
if (stub->tmp() != noreg) {
486
live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
487
}
488
489
int gp_spill_size = 0;
490
int opmask_spill_size = 0;
491
int xmm_spill_size = 0;
492
493
// Record registers that needs to be saved/restored
494
RegMaskIterator rmi(live);
495
while (rmi.has_next()) {
496
const OptoReg::Name opto_reg = rmi.next();
497
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
498
499
if (vm_reg->is_Register()) {
500
if (caller_saved.Member(opto_reg)) {
501
_gp_registers.append(vm_reg->as_Register());
502
gp_spill_size += 8;
503
}
504
} else if (vm_reg->is_KRegister()) {
505
// All opmask registers are caller saved, thus spill the ones
506
// which are live.
507
if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
508
_opmask_registers.append(vm_reg->as_KRegister());
509
opmask_spill_size += 8;
510
}
511
} else if (vm_reg->is_XMMRegister()) {
512
// We encode in the low order 4 bits of the opto_reg, how large part of the register is live
513
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
514
const int reg_size = xmm_slot_size(opto_reg);
515
const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
516
const int reg_index = _xmm_registers.find(reg_data);
517
if (reg_index == -1) {
518
// Not previously appended
519
_xmm_registers.append(reg_data);
520
xmm_spill_size += reg_size;
521
} else {
522
// Previously appended, update size
523
const int reg_size_prev = _xmm_registers.at(reg_index)._size;
524
if (reg_size > reg_size_prev) {
525
_xmm_registers.at_put(reg_index, reg_data);
526
xmm_spill_size += reg_size - reg_size_prev;
527
}
528
}
529
} else {
530
fatal("Unexpected register type");
531
}
532
}
533
534
// Sort by size, largest first
535
_xmm_registers.sort(xmm_compare_register_size);
536
537
// On Windows, the caller reserves stack space for spilling register arguments
538
const int arg_spill_size = frame::arg_reg_save_area_bytes;
539
540
// Stack pointer must be 16 bytes aligned for the call
541
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
542
}
543
544
public:
545
ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
546
_masm(masm),
547
_gp_registers(),
548
_opmask_registers(),
549
_xmm_registers(),
550
_spill_size(0),
551
_spill_offset(0) {
552
553
//
554
// Stack layout after registers have been spilled:
555
//
556
// | ... | original rsp, 16 bytes aligned
557
// ------------------
558
// | zmm0 high |
559
// | ... |
560
// | zmm0 low | 16 bytes aligned
561
// | ... |
562
// | ymm1 high |
563
// | ... |
564
// | ymm1 low | 16 bytes aligned
565
// | ... |
566
// | xmmN high |
567
// | ... |
568
// | xmmN low | 8 bytes aligned
569
// | reg0 | 8 bytes aligned
570
// | reg1 |
571
// | ... |
572
// | regN | new rsp, if 16 bytes aligned
573
// | <padding> | else new rsp, 16 bytes aligned
574
// ------------------
575
//
576
577
// Figure out what registers to save/restore
578
initialize(stub);
579
580
// Allocate stack space
581
if (_spill_size > 0) {
582
__ subptr(rsp, _spill_size);
583
}
584
585
// Save XMM/YMM/ZMM registers
586
for (int i = 0; i < _xmm_registers.length(); i++) {
587
xmm_register_save(_xmm_registers.at(i));
588
}
589
590
if (xmm_needs_vzeroupper()) {
591
__ vzeroupper();
592
}
593
594
// Save general purpose registers
595
for (int i = 0; i < _gp_registers.length(); i++) {
596
gp_register_save(_gp_registers.at(i));
597
}
598
599
// Save opmask registers
600
for (int i = 0; i < _opmask_registers.length(); i++) {
601
opmask_register_save(_opmask_registers.at(i));
602
}
603
}
604
605
~ZSaveLiveRegisters() {
606
// Restore opmask registers
607
for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
608
opmask_register_restore(_opmask_registers.at(i));
609
}
610
611
// Restore general purpose registers
612
for (int i = _gp_registers.length() - 1; i >= 0; i--) {
613
gp_register_restore(_gp_registers.at(i));
614
}
615
616
__ vzeroupper();
617
618
// Restore XMM/YMM/ZMM registers
619
for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
620
xmm_register_restore(_xmm_registers.at(i));
621
}
622
623
// Free stack space
624
if (_spill_size > 0) {
625
__ addptr(rsp, _spill_size);
626
}
627
}
628
};
629
630
class ZSetupArguments {
631
private:
632
MacroAssembler* const _masm;
633
const Register _ref;
634
const Address _ref_addr;
635
636
public:
637
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
638
_masm(masm),
639
_ref(stub->ref()),
640
_ref_addr(stub->ref_addr()) {
641
642
// Setup arguments
643
if (_ref_addr.base() == noreg) {
644
// No self healing
645
if (_ref != c_rarg0) {
646
__ movq(c_rarg0, _ref);
647
}
648
__ xorq(c_rarg1, c_rarg1);
649
} else {
650
// Self healing
651
if (_ref == c_rarg0) {
652
__ lea(c_rarg1, _ref_addr);
653
} else if (_ref != c_rarg1) {
654
__ lea(c_rarg1, _ref_addr);
655
__ movq(c_rarg0, _ref);
656
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
657
__ movq(c_rarg0, _ref);
658
__ lea(c_rarg1, _ref_addr);
659
} else {
660
__ xchgq(c_rarg0, c_rarg1);
661
if (_ref_addr.base() == c_rarg0) {
662
__ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
663
} else if (_ref_addr.index() == c_rarg0) {
664
__ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
665
} else {
666
ShouldNotReachHere();
667
}
668
}
669
}
670
}
671
672
~ZSetupArguments() {
673
// Transfer result
674
if (_ref != rax) {
675
__ movq(_ref, rax);
676
}
677
}
678
};
679
680
#undef __
681
#define __ masm->
682
683
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
684
BLOCK_COMMENT("ZLoadBarrierStubC2");
685
686
// Stub entry
687
__ bind(*stub->entry());
688
689
{
690
ZSaveLiveRegisters save_live_registers(masm, stub);
691
ZSetupArguments setup_arguments(masm, stub);
692
__ call(RuntimeAddress(stub->slow_path()));
693
}
694
695
// Stub exit
696
__ jmp(*stub->continuation());
697
}
698
699
#undef __
700
701
#endif // COMPILER2
702
703