Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
41144 views
1
/*
2
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_CodeStubs.hpp"
29
#include "c1/c1_Compilation.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "c1/c1_ValueStack.hpp"
34
#include "ci/ciArrayKlass.hpp"
35
#include "ci/ciInstance.hpp"
36
#include "compiler/oopMap.hpp"
37
#include "gc/shared/collectedHeap.hpp"
38
#include "gc/shared/gc_globals.hpp"
39
#include "nativeInst_x86.hpp"
40
#include "oops/objArrayKlass.hpp"
41
#include "runtime/frame.inline.hpp"
42
#include "runtime/safepointMechanism.hpp"
43
#include "runtime/sharedRuntime.hpp"
44
#include "runtime/stubRoutines.hpp"
45
#include "utilities/powerOfTwo.hpp"
46
#include "vmreg_x86.inline.hpp"
47
48
49
// These masks are used to provide 128-bit aligned bitmasks to the XMM
50
// instructions, to allow sign-masking or sign-bit flipping. They allow
51
// fast versions of NegF/NegD and AbsF/AbsD.
52
53
// Note: 'double' and 'long long' have 32-bits alignment on x86.
54
static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
55
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address
56
// of 128-bits operands for SSE instructions.
57
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
58
// Store the value to a 128-bits operand.
59
operand[0] = lo;
60
operand[1] = hi;
61
return operand;
62
}
63
64
// Buffer for 128-bits masks used by SSE instructions.
65
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
66
67
// Static initialization during VM startup.
68
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
69
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
70
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
71
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
72
73
74
NEEDS_CLEANUP // remove this definitions ?
75
const Register IC_Klass = rax; // where the IC klass is cached
76
const Register SYNC_header = rax; // synchronization header
77
const Register SHIFT_count = rcx; // where count for shift operations must be
78
79
#define __ _masm->
80
81
82
static void select_different_registers(Register preserve,
83
Register extra,
84
Register &tmp1,
85
Register &tmp2) {
86
if (tmp1 == preserve) {
87
assert_different_registers(tmp1, tmp2, extra);
88
tmp1 = extra;
89
} else if (tmp2 == preserve) {
90
assert_different_registers(tmp1, tmp2, extra);
91
tmp2 = extra;
92
}
93
assert_different_registers(preserve, tmp1, tmp2);
94
}
95
96
97
98
static void select_different_registers(Register preserve,
99
Register extra,
100
Register &tmp1,
101
Register &tmp2,
102
Register &tmp3) {
103
if (tmp1 == preserve) {
104
assert_different_registers(tmp1, tmp2, tmp3, extra);
105
tmp1 = extra;
106
} else if (tmp2 == preserve) {
107
assert_different_registers(tmp1, tmp2, tmp3, extra);
108
tmp2 = extra;
109
} else if (tmp3 == preserve) {
110
assert_different_registers(tmp1, tmp2, tmp3, extra);
111
tmp3 = extra;
112
}
113
assert_different_registers(preserve, tmp1, tmp2, tmp3);
114
}
115
116
117
118
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
119
if (opr->is_constant()) {
120
LIR_Const* constant = opr->as_constant_ptr();
121
switch (constant->type()) {
122
case T_INT: {
123
return true;
124
}
125
126
default:
127
return false;
128
}
129
}
130
return false;
131
}
132
133
134
LIR_Opr LIR_Assembler::receiverOpr() {
135
return FrameMap::receiver_opr;
136
}
137
138
LIR_Opr LIR_Assembler::osrBufferPointer() {
139
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
140
}
141
142
//--------------fpu register translations-----------------------
143
144
145
address LIR_Assembler::float_constant(float f) {
146
address const_addr = __ float_constant(f);
147
if (const_addr == NULL) {
148
bailout("const section overflow");
149
return __ code()->consts()->start();
150
} else {
151
return const_addr;
152
}
153
}
154
155
156
address LIR_Assembler::double_constant(double d) {
157
address const_addr = __ double_constant(d);
158
if (const_addr == NULL) {
159
bailout("const section overflow");
160
return __ code()->consts()->start();
161
} else {
162
return const_addr;
163
}
164
}
165
166
#ifndef _LP64
167
void LIR_Assembler::fpop() {
168
__ fpop();
169
}
170
171
void LIR_Assembler::fxch(int i) {
172
__ fxch(i);
173
}
174
175
void LIR_Assembler::fld(int i) {
176
__ fld_s(i);
177
}
178
179
void LIR_Assembler::ffree(int i) {
180
__ ffree(i);
181
}
182
#endif // !_LP64
183
184
void LIR_Assembler::breakpoint() {
185
__ int3();
186
}
187
188
void LIR_Assembler::push(LIR_Opr opr) {
189
if (opr->is_single_cpu()) {
190
__ push_reg(opr->as_register());
191
} else if (opr->is_double_cpu()) {
192
NOT_LP64(__ push_reg(opr->as_register_hi()));
193
__ push_reg(opr->as_register_lo());
194
} else if (opr->is_stack()) {
195
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
196
} else if (opr->is_constant()) {
197
LIR_Const* const_opr = opr->as_constant_ptr();
198
if (const_opr->type() == T_OBJECT) {
199
__ push_oop(const_opr->as_jobject());
200
} else if (const_opr->type() == T_INT) {
201
__ push_jint(const_opr->as_jint());
202
} else {
203
ShouldNotReachHere();
204
}
205
206
} else {
207
ShouldNotReachHere();
208
}
209
}
210
211
void LIR_Assembler::pop(LIR_Opr opr) {
212
if (opr->is_single_cpu()) {
213
__ pop_reg(opr->as_register());
214
} else {
215
ShouldNotReachHere();
216
}
217
}
218
219
bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
220
return addr->base()->is_illegal() && addr->index()->is_illegal();
221
}
222
223
//-------------------------------------------
224
225
Address LIR_Assembler::as_Address(LIR_Address* addr) {
226
return as_Address(addr, rscratch1);
227
}
228
229
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
230
if (addr->base()->is_illegal()) {
231
assert(addr->index()->is_illegal(), "must be illegal too");
232
AddressLiteral laddr((address)addr->disp(), relocInfo::none);
233
if (! __ reachable(laddr)) {
234
__ movptr(tmp, laddr.addr());
235
Address res(tmp, 0);
236
return res;
237
} else {
238
return __ as_Address(laddr);
239
}
240
}
241
242
Register base = addr->base()->as_pointer_register();
243
244
if (addr->index()->is_illegal()) {
245
return Address( base, addr->disp());
246
} else if (addr->index()->is_cpu_register()) {
247
Register index = addr->index()->as_pointer_register();
248
return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
249
} else if (addr->index()->is_constant()) {
250
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
251
assert(Assembler::is_simm32(addr_offset), "must be");
252
253
return Address(base, addr_offset);
254
} else {
255
Unimplemented();
256
return Address();
257
}
258
}
259
260
261
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
262
Address base = as_Address(addr);
263
return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
264
}
265
266
267
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
268
return as_Address(addr);
269
}
270
271
272
void LIR_Assembler::osr_entry() {
273
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
274
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
275
ValueStack* entry_state = osr_entry->state();
276
int number_of_locks = entry_state->locks_size();
277
278
// we jump here if osr happens with the interpreter
279
// state set up to continue at the beginning of the
280
// loop that triggered osr - in particular, we have
281
// the following registers setup:
282
//
283
// rcx: osr buffer
284
//
285
286
// build frame
287
ciMethod* m = compilation()->method();
288
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
289
290
// OSR buffer is
291
//
292
// locals[nlocals-1..0]
293
// monitors[0..number_of_locks]
294
//
295
// locals is a direct copy of the interpreter frame so in the osr buffer
296
// so first slot in the local array is the last local from the interpreter
297
// and last slot is local[0] (receiver) from the interpreter
298
//
299
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
300
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
301
// in the interpreter frame (the method lock if a sync method)
302
303
// Initialize monitors in the compiled activation.
304
// rcx: pointer to osr buffer
305
//
306
// All other registers are dead at this point and the locals will be
307
// copied into place by code emitted in the IR.
308
309
Register OSR_buf = osrBufferPointer()->as_pointer_register();
310
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
311
int monitor_offset = BytesPerWord * method()->max_locals() +
312
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
313
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
314
// the OSR buffer using 2 word entries: first the lock and then
315
// the oop.
316
for (int i = 0; i < number_of_locks; i++) {
317
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
318
#ifdef ASSERT
319
// verify the interpreter's monitor has a non-null object
320
{
321
Label L;
322
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
323
__ jcc(Assembler::notZero, L);
324
__ stop("locked object is NULL");
325
__ bind(L);
326
}
327
#endif
328
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
329
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
330
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
331
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
332
}
333
}
334
}
335
336
337
// inline cache check; done before the frame is built.
338
int LIR_Assembler::check_icache() {
339
Register receiver = FrameMap::receiver_opr->as_register();
340
Register ic_klass = IC_Klass;
341
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
342
const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
343
if (!do_post_padding) {
344
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
345
__ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
346
}
347
int offset = __ offset();
348
__ inline_cache_check(receiver, IC_Klass);
349
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
350
if (do_post_padding) {
351
// force alignment after the cache check.
352
// It's been verified to be aligned if !VerifyOops
353
__ align(CodeEntryAlignment);
354
}
355
return offset;
356
}
357
358
void LIR_Assembler::clinit_barrier(ciMethod* method) {
359
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
360
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
361
362
Label L_skip_barrier;
363
Register klass = rscratch1;
364
Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg );
365
assert(thread != noreg, "x86_32 not implemented");
366
367
__ mov_metadata(klass, method->holder()->constant_encoding());
368
__ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/);
369
370
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
371
372
__ bind(L_skip_barrier);
373
}
374
375
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
376
jobject o = NULL;
377
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
378
__ movoop(reg, o);
379
patching_epilog(patch, lir_patch_normal, reg, info);
380
}
381
382
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
383
Metadata* o = NULL;
384
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
385
__ mov_metadata(reg, o);
386
patching_epilog(patch, lir_patch_normal, reg, info);
387
}
388
389
// This specifies the rsp decrement needed to build the frame
390
int LIR_Assembler::initial_frame_size_in_bytes() const {
391
// if rounding, must let FrameMap know!
392
393
// The frame_map records size in slots (32bit word)
394
395
// subtract two words to account for return address and link
396
return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
397
}
398
399
400
int LIR_Assembler::emit_exception_handler() {
401
// if the last instruction is a call (typically to do a throw which
402
// is coming at the end after block reordering) the return address
403
// must still point into the code area in order to avoid assertion
404
// failures when searching for the corresponding bci => add a nop
405
// (was bug 5/14/1999 - gri)
406
__ nop();
407
408
// generate code for exception handler
409
address handler_base = __ start_a_stub(exception_handler_size());
410
if (handler_base == NULL) {
411
// not enough space left for the handler
412
bailout("exception handler overflow");
413
return -1;
414
}
415
416
int offset = code_offset();
417
418
// the exception oop and pc are in rax, and rdx
419
// no other registers need to be preserved, so invalidate them
420
__ invalidate_registers(false, true, true, false, true, true);
421
422
// check that there is really an exception
423
__ verify_not_null_oop(rax);
424
425
// search an exception handler (rax: exception oop, rdx: throwing pc)
426
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
427
__ should_not_reach_here();
428
guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
429
__ end_a_stub();
430
431
return offset;
432
}
433
434
435
// Emit the code to remove the frame from the stack in the exception
436
// unwind path.
437
int LIR_Assembler::emit_unwind_handler() {
438
#ifndef PRODUCT
439
if (CommentedAssembly) {
440
_masm->block_comment("Unwind handler");
441
}
442
#endif
443
444
int offset = code_offset();
445
446
// Fetch the exception from TLS and clear out exception related thread state
447
Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
448
NOT_LP64(__ get_thread(rsi));
449
__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
450
__ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
451
__ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
452
453
__ bind(_unwind_handler_entry);
454
__ verify_not_null_oop(rax);
455
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
456
__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
457
}
458
459
// Preform needed unlocking
460
MonitorExitStub* stub = NULL;
461
if (method()->is_synchronized()) {
462
monitor_address(0, FrameMap::rax_opr);
463
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
464
__ unlock_object(rdi, rsi, rax, *stub->entry());
465
__ bind(*stub->continuation());
466
}
467
468
if (compilation()->env()->dtrace_method_probes()) {
469
#ifdef _LP64
470
__ mov(rdi, r15_thread);
471
__ mov_metadata(rsi, method()->constant_encoding());
472
#else
473
__ get_thread(rax);
474
__ movptr(Address(rsp, 0), rax);
475
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
476
#endif
477
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
478
}
479
480
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
481
__ mov(rax, rbx); // Restore the exception
482
}
483
484
// remove the activation and dispatch to the unwind handler
485
__ remove_frame(initial_frame_size_in_bytes());
486
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
487
488
// Emit the slow path assembly
489
if (stub != NULL) {
490
stub->emit_code(this);
491
}
492
493
return offset;
494
}
495
496
497
int LIR_Assembler::emit_deopt_handler() {
498
// if the last instruction is a call (typically to do a throw which
499
// is coming at the end after block reordering) the return address
500
// must still point into the code area in order to avoid assertion
501
// failures when searching for the corresponding bci => add a nop
502
// (was bug 5/14/1999 - gri)
503
__ nop();
504
505
// generate code for exception handler
506
address handler_base = __ start_a_stub(deopt_handler_size());
507
if (handler_base == NULL) {
508
// not enough space left for the handler
509
bailout("deopt handler overflow");
510
return -1;
511
}
512
513
int offset = code_offset();
514
InternalAddress here(__ pc());
515
516
__ pushptr(here.addr());
517
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
518
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
519
__ end_a_stub();
520
521
return offset;
522
}
523
524
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
525
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
526
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
527
assert(result->fpu() == 0, "result must already be on TOS");
528
}
529
530
// Pop the stack before the safepoint code
531
__ remove_frame(initial_frame_size_in_bytes());
532
533
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
534
__ reserved_stack_check();
535
}
536
537
// Note: we do not need to round double result; float result has the right precision
538
// the poll sets the condition code, but no data registers
539
540
#ifdef _LP64
541
const Register thread = r15_thread;
542
#else
543
const Register thread = rbx;
544
__ get_thread(thread);
545
#endif
546
code_stub->set_safepoint_offset(__ offset());
547
__ relocate(relocInfo::poll_return_type);
548
__ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
549
__ ret(0);
550
}
551
552
553
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
554
guarantee(info != NULL, "Shouldn't be NULL");
555
int offset = __ offset();
556
#ifdef _LP64
557
const Register poll_addr = rscratch1;
558
__ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
559
#else
560
assert(tmp->is_cpu_register(), "needed");
561
const Register poll_addr = tmp->as_register();
562
__ get_thread(poll_addr);
563
__ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
564
#endif
565
add_debug_info_for_branch(info);
566
__ relocate(relocInfo::poll_type);
567
address pre_pc = __ pc();
568
__ testl(rax, Address(poll_addr, 0));
569
address post_pc = __ pc();
570
guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
571
return offset;
572
}
573
574
575
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
576
if (from_reg != to_reg) __ mov(to_reg, from_reg);
577
}
578
579
void LIR_Assembler::swap_reg(Register a, Register b) {
580
__ xchgptr(a, b);
581
}
582
583
584
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
585
assert(src->is_constant(), "should not call otherwise");
586
assert(dest->is_register(), "should not call otherwise");
587
LIR_Const* c = src->as_constant_ptr();
588
589
switch (c->type()) {
590
case T_INT: {
591
assert(patch_code == lir_patch_none, "no patching handled here");
592
__ movl(dest->as_register(), c->as_jint());
593
break;
594
}
595
596
case T_ADDRESS: {
597
assert(patch_code == lir_patch_none, "no patching handled here");
598
__ movptr(dest->as_register(), c->as_jint());
599
break;
600
}
601
602
case T_LONG: {
603
assert(patch_code == lir_patch_none, "no patching handled here");
604
#ifdef _LP64
605
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
606
#else
607
__ movptr(dest->as_register_lo(), c->as_jint_lo());
608
__ movptr(dest->as_register_hi(), c->as_jint_hi());
609
#endif // _LP64
610
break;
611
}
612
613
case T_OBJECT: {
614
if (patch_code != lir_patch_none) {
615
jobject2reg_with_patching(dest->as_register(), info);
616
} else {
617
__ movoop(dest->as_register(), c->as_jobject());
618
}
619
break;
620
}
621
622
case T_METADATA: {
623
if (patch_code != lir_patch_none) {
624
klass2reg_with_patching(dest->as_register(), info);
625
} else {
626
__ mov_metadata(dest->as_register(), c->as_metadata());
627
}
628
break;
629
}
630
631
case T_FLOAT: {
632
if (dest->is_single_xmm()) {
633
if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) {
634
__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
635
} else {
636
__ movflt(dest->as_xmm_float_reg(),
637
InternalAddress(float_constant(c->as_jfloat())));
638
}
639
} else {
640
#ifndef _LP64
641
assert(dest->is_single_fpu(), "must be");
642
assert(dest->fpu_regnr() == 0, "dest must be TOS");
643
if (c->is_zero_float()) {
644
__ fldz();
645
} else if (c->is_one_float()) {
646
__ fld1();
647
} else {
648
__ fld_s (InternalAddress(float_constant(c->as_jfloat())));
649
}
650
#else
651
ShouldNotReachHere();
652
#endif // !_LP64
653
}
654
break;
655
}
656
657
case T_DOUBLE: {
658
if (dest->is_double_xmm()) {
659
if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) {
660
__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
661
} else {
662
__ movdbl(dest->as_xmm_double_reg(),
663
InternalAddress(double_constant(c->as_jdouble())));
664
}
665
} else {
666
#ifndef _LP64
667
assert(dest->is_double_fpu(), "must be");
668
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
669
if (c->is_zero_double()) {
670
__ fldz();
671
} else if (c->is_one_double()) {
672
__ fld1();
673
} else {
674
__ fld_d (InternalAddress(double_constant(c->as_jdouble())));
675
}
676
#else
677
ShouldNotReachHere();
678
#endif // !_LP64
679
}
680
break;
681
}
682
683
default:
684
ShouldNotReachHere();
685
}
686
}
687
688
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
689
assert(src->is_constant(), "should not call otherwise");
690
assert(dest->is_stack(), "should not call otherwise");
691
LIR_Const* c = src->as_constant_ptr();
692
693
switch (c->type()) {
694
case T_INT: // fall through
695
case T_FLOAT:
696
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
697
break;
698
699
case T_ADDRESS:
700
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
701
break;
702
703
case T_OBJECT:
704
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
705
break;
706
707
case T_LONG: // fall through
708
case T_DOUBLE:
709
#ifdef _LP64
710
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
711
lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
712
#else
713
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
714
lo_word_offset_in_bytes), c->as_jint_lo_bits());
715
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
716
hi_word_offset_in_bytes), c->as_jint_hi_bits());
717
#endif // _LP64
718
break;
719
720
default:
721
ShouldNotReachHere();
722
}
723
}
724
725
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
726
assert(src->is_constant(), "should not call otherwise");
727
assert(dest->is_address(), "should not call otherwise");
728
LIR_Const* c = src->as_constant_ptr();
729
LIR_Address* addr = dest->as_address_ptr();
730
731
int null_check_here = code_offset();
732
switch (type) {
733
case T_INT: // fall through
734
case T_FLOAT:
735
__ movl(as_Address(addr), c->as_jint_bits());
736
break;
737
738
case T_ADDRESS:
739
__ movptr(as_Address(addr), c->as_jint_bits());
740
break;
741
742
case T_OBJECT: // fall through
743
case T_ARRAY:
744
if (c->as_jobject() == NULL) {
745
if (UseCompressedOops && !wide) {
746
__ movl(as_Address(addr), (int32_t)NULL_WORD);
747
} else {
748
#ifdef _LP64
749
__ xorptr(rscratch1, rscratch1);
750
null_check_here = code_offset();
751
__ movptr(as_Address(addr), rscratch1);
752
#else
753
__ movptr(as_Address(addr), NULL_WORD);
754
#endif
755
}
756
} else {
757
if (is_literal_address(addr)) {
758
ShouldNotReachHere();
759
__ movoop(as_Address(addr, noreg), c->as_jobject());
760
} else {
761
#ifdef _LP64
762
__ movoop(rscratch1, c->as_jobject());
763
if (UseCompressedOops && !wide) {
764
__ encode_heap_oop(rscratch1);
765
null_check_here = code_offset();
766
__ movl(as_Address_lo(addr), rscratch1);
767
} else {
768
null_check_here = code_offset();
769
__ movptr(as_Address_lo(addr), rscratch1);
770
}
771
#else
772
__ movoop(as_Address(addr), c->as_jobject());
773
#endif
774
}
775
}
776
break;
777
778
case T_LONG: // fall through
779
case T_DOUBLE:
780
#ifdef _LP64
781
if (is_literal_address(addr)) {
782
ShouldNotReachHere();
783
__ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
784
} else {
785
__ movptr(r10, (intptr_t)c->as_jlong_bits());
786
null_check_here = code_offset();
787
__ movptr(as_Address_lo(addr), r10);
788
}
789
#else
790
// Always reachable in 32bit so this doesn't produce useless move literal
791
__ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
792
__ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
793
#endif // _LP64
794
break;
795
796
case T_BOOLEAN: // fall through
797
case T_BYTE:
798
__ movb(as_Address(addr), c->as_jint() & 0xFF);
799
break;
800
801
case T_CHAR: // fall through
802
case T_SHORT:
803
__ movw(as_Address(addr), c->as_jint() & 0xFFFF);
804
break;
805
806
default:
807
ShouldNotReachHere();
808
};
809
810
if (info != NULL) {
811
add_debug_info_for_null_check(null_check_here, info);
812
}
813
}
814
815
816
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
817
assert(src->is_register(), "should not call otherwise");
818
assert(dest->is_register(), "should not call otherwise");
819
820
// move between cpu-registers
821
if (dest->is_single_cpu()) {
822
#ifdef _LP64
823
if (src->type() == T_LONG) {
824
// Can do LONG -> OBJECT
825
move_regs(src->as_register_lo(), dest->as_register());
826
return;
827
}
828
#endif
829
assert(src->is_single_cpu(), "must match");
830
if (src->type() == T_OBJECT) {
831
__ verify_oop(src->as_register());
832
}
833
move_regs(src->as_register(), dest->as_register());
834
835
} else if (dest->is_double_cpu()) {
836
#ifdef _LP64
837
if (is_reference_type(src->type())) {
838
// Surprising to me but we can see move of a long to t_object
839
__ verify_oop(src->as_register());
840
move_regs(src->as_register(), dest->as_register_lo());
841
return;
842
}
843
#endif
844
assert(src->is_double_cpu(), "must match");
845
Register f_lo = src->as_register_lo();
846
Register f_hi = src->as_register_hi();
847
Register t_lo = dest->as_register_lo();
848
Register t_hi = dest->as_register_hi();
849
#ifdef _LP64
850
assert(f_hi == f_lo, "must be same");
851
assert(t_hi == t_lo, "must be same");
852
move_regs(f_lo, t_lo);
853
#else
854
assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
855
856
857
if (f_lo == t_hi && f_hi == t_lo) {
858
swap_reg(f_lo, f_hi);
859
} else if (f_hi == t_lo) {
860
assert(f_lo != t_hi, "overwriting register");
861
move_regs(f_hi, t_hi);
862
move_regs(f_lo, t_lo);
863
} else {
864
assert(f_hi != t_lo, "overwriting register");
865
move_regs(f_lo, t_lo);
866
move_regs(f_hi, t_hi);
867
}
868
#endif // LP64
869
870
#ifndef _LP64
871
// special moves from fpu-register to xmm-register
872
// necessary for method results
873
} else if (src->is_single_xmm() && !dest->is_single_xmm()) {
874
__ movflt(Address(rsp, 0), src->as_xmm_float_reg());
875
__ fld_s(Address(rsp, 0));
876
} else if (src->is_double_xmm() && !dest->is_double_xmm()) {
877
__ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
878
__ fld_d(Address(rsp, 0));
879
} else if (dest->is_single_xmm() && !src->is_single_xmm()) {
880
__ fstp_s(Address(rsp, 0));
881
__ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
882
} else if (dest->is_double_xmm() && !src->is_double_xmm()) {
883
__ fstp_d(Address(rsp, 0));
884
__ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
885
#endif // !_LP64
886
887
// move between xmm-registers
888
} else if (dest->is_single_xmm()) {
889
assert(src->is_single_xmm(), "must match");
890
__ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
891
} else if (dest->is_double_xmm()) {
892
assert(src->is_double_xmm(), "must match");
893
__ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
894
895
#ifndef _LP64
896
// move between fpu-registers (no instruction necessary because of fpu-stack)
897
} else if (dest->is_single_fpu() || dest->is_double_fpu()) {
898
assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
899
assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
900
#endif // !_LP64
901
902
} else {
903
ShouldNotReachHere();
904
}
905
}
906
907
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
908
assert(src->is_register(), "should not call otherwise");
909
assert(dest->is_stack(), "should not call otherwise");
910
911
if (src->is_single_cpu()) {
912
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
913
if (is_reference_type(type)) {
914
__ verify_oop(src->as_register());
915
__ movptr (dst, src->as_register());
916
} else if (type == T_METADATA || type == T_ADDRESS) {
917
__ movptr (dst, src->as_register());
918
} else {
919
__ movl (dst, src->as_register());
920
}
921
922
} else if (src->is_double_cpu()) {
923
Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
924
Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
925
__ movptr (dstLO, src->as_register_lo());
926
NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
927
928
} else if (src->is_single_xmm()) {
929
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
930
__ movflt(dst_addr, src->as_xmm_float_reg());
931
932
} else if (src->is_double_xmm()) {
933
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
934
__ movdbl(dst_addr, src->as_xmm_double_reg());
935
936
#ifndef _LP64
937
} else if (src->is_single_fpu()) {
938
assert(src->fpu_regnr() == 0, "argument must be on TOS");
939
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
940
if (pop_fpu_stack) __ fstp_s (dst_addr);
941
else __ fst_s (dst_addr);
942
943
} else if (src->is_double_fpu()) {
944
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
945
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
946
if (pop_fpu_stack) __ fstp_d (dst_addr);
947
else __ fst_d (dst_addr);
948
#endif // !_LP64
949
950
} else {
951
ShouldNotReachHere();
952
}
953
}
954
955
956
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
957
LIR_Address* to_addr = dest->as_address_ptr();
958
PatchingStub* patch = NULL;
959
Register compressed_src = rscratch1;
960
961
if (is_reference_type(type)) {
962
__ verify_oop(src->as_register());
963
#ifdef _LP64
964
if (UseCompressedOops && !wide) {
965
__ movptr(compressed_src, src->as_register());
966
__ encode_heap_oop(compressed_src);
967
if (patch_code != lir_patch_none) {
968
info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
969
}
970
}
971
#endif
972
}
973
974
if (patch_code != lir_patch_none) {
975
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
976
Address toa = as_Address(to_addr);
977
assert(toa.disp() != 0, "must have");
978
}
979
980
int null_check_here = code_offset();
981
switch (type) {
982
case T_FLOAT: {
983
#ifdef _LP64
984
assert(src->is_single_xmm(), "not a float");
985
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
986
#else
987
if (src->is_single_xmm()) {
988
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
989
} else {
990
assert(src->is_single_fpu(), "must be");
991
assert(src->fpu_regnr() == 0, "argument must be on TOS");
992
if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
993
else __ fst_s (as_Address(to_addr));
994
}
995
#endif // _LP64
996
break;
997
}
998
999
case T_DOUBLE: {
1000
#ifdef _LP64
1001
assert(src->is_double_xmm(), "not a double");
1002
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1003
#else
1004
if (src->is_double_xmm()) {
1005
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1006
} else {
1007
assert(src->is_double_fpu(), "must be");
1008
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1009
if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1010
else __ fst_d (as_Address(to_addr));
1011
}
1012
#endif // _LP64
1013
break;
1014
}
1015
1016
case T_ARRAY: // fall through
1017
case T_OBJECT: // fall through
1018
if (UseCompressedOops && !wide) {
1019
__ movl(as_Address(to_addr), compressed_src);
1020
} else {
1021
__ movptr(as_Address(to_addr), src->as_register());
1022
}
1023
break;
1024
case T_METADATA:
1025
// We get here to store a method pointer to the stack to pass to
1026
// a dtrace runtime call. This can't work on 64 bit with
1027
// compressed klass ptrs: T_METADATA can be a compressed klass
1028
// ptr or a 64 bit method pointer.
1029
LP64_ONLY(ShouldNotReachHere());
1030
__ movptr(as_Address(to_addr), src->as_register());
1031
break;
1032
case T_ADDRESS:
1033
__ movptr(as_Address(to_addr), src->as_register());
1034
break;
1035
case T_INT:
1036
__ movl(as_Address(to_addr), src->as_register());
1037
break;
1038
1039
case T_LONG: {
1040
Register from_lo = src->as_register_lo();
1041
Register from_hi = src->as_register_hi();
1042
#ifdef _LP64
1043
__ movptr(as_Address_lo(to_addr), from_lo);
1044
#else
1045
Register base = to_addr->base()->as_register();
1046
Register index = noreg;
1047
if (to_addr->index()->is_register()) {
1048
index = to_addr->index()->as_register();
1049
}
1050
if (base == from_lo || index == from_lo) {
1051
assert(base != from_hi, "can't be");
1052
assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1053
__ movl(as_Address_hi(to_addr), from_hi);
1054
if (patch != NULL) {
1055
patching_epilog(patch, lir_patch_high, base, info);
1056
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1057
patch_code = lir_patch_low;
1058
}
1059
__ movl(as_Address_lo(to_addr), from_lo);
1060
} else {
1061
assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1062
__ movl(as_Address_lo(to_addr), from_lo);
1063
if (patch != NULL) {
1064
patching_epilog(patch, lir_patch_low, base, info);
1065
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1066
patch_code = lir_patch_high;
1067
}
1068
__ movl(as_Address_hi(to_addr), from_hi);
1069
}
1070
#endif // _LP64
1071
break;
1072
}
1073
1074
case T_BYTE: // fall through
1075
case T_BOOLEAN: {
1076
Register src_reg = src->as_register();
1077
Address dst_addr = as_Address(to_addr);
1078
assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1079
__ movb(dst_addr, src_reg);
1080
break;
1081
}
1082
1083
case T_CHAR: // fall through
1084
case T_SHORT:
1085
__ movw(as_Address(to_addr), src->as_register());
1086
break;
1087
1088
default:
1089
ShouldNotReachHere();
1090
}
1091
if (info != NULL) {
1092
add_debug_info_for_null_check(null_check_here, info);
1093
}
1094
1095
if (patch_code != lir_patch_none) {
1096
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1097
}
1098
}
1099
1100
1101
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1102
assert(src->is_stack(), "should not call otherwise");
1103
assert(dest->is_register(), "should not call otherwise");
1104
1105
if (dest->is_single_cpu()) {
1106
if (is_reference_type(type)) {
1107
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1108
__ verify_oop(dest->as_register());
1109
} else if (type == T_METADATA || type == T_ADDRESS) {
1110
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1111
} else {
1112
__ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1113
}
1114
1115
} else if (dest->is_double_cpu()) {
1116
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1117
Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1118
__ movptr(dest->as_register_lo(), src_addr_LO);
1119
NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1120
1121
} else if (dest->is_single_xmm()) {
1122
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1123
__ movflt(dest->as_xmm_float_reg(), src_addr);
1124
1125
} else if (dest->is_double_xmm()) {
1126
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1127
__ movdbl(dest->as_xmm_double_reg(), src_addr);
1128
1129
#ifndef _LP64
1130
} else if (dest->is_single_fpu()) {
1131
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1132
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1133
__ fld_s(src_addr);
1134
1135
} else if (dest->is_double_fpu()) {
1136
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1137
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1138
__ fld_d(src_addr);
1139
#endif // _LP64
1140
1141
} else {
1142
ShouldNotReachHere();
1143
}
1144
}
1145
1146
1147
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1148
if (src->is_single_stack()) {
1149
if (is_reference_type(type)) {
1150
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1151
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1152
} else {
1153
#ifndef _LP64
1154
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1155
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1156
#else
1157
//no pushl on 64bits
1158
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1159
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1160
#endif
1161
}
1162
1163
} else if (src->is_double_stack()) {
1164
#ifdef _LP64
1165
__ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1166
__ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1167
#else
1168
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1169
// push and pop the part at src + wordSize, adding wordSize for the previous push
1170
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1171
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1172
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1173
#endif // _LP64
1174
1175
} else {
1176
ShouldNotReachHere();
1177
}
1178
}
1179
1180
1181
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1182
assert(src->is_address(), "should not call otherwise");
1183
assert(dest->is_register(), "should not call otherwise");
1184
1185
LIR_Address* addr = src->as_address_ptr();
1186
Address from_addr = as_Address(addr);
1187
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1188
1189
if (addr->base()->type() == T_OBJECT) {
1190
__ verify_oop(addr->base()->as_pointer_register());
1191
}
1192
1193
switch (type) {
1194
case T_BOOLEAN: // fall through
1195
case T_BYTE: // fall through
1196
case T_CHAR: // fall through
1197
case T_SHORT:
1198
if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1199
// on pre P6 processors we may get partial register stalls
1200
// so blow away the value of to_rinfo before loading a
1201
// partial word into it. Do it here so that it precedes
1202
// the potential patch point below.
1203
__ xorptr(dest->as_register(), dest->as_register());
1204
}
1205
break;
1206
default:
1207
break;
1208
}
1209
1210
PatchingStub* patch = NULL;
1211
if (patch_code != lir_patch_none) {
1212
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1213
assert(from_addr.disp() != 0, "must have");
1214
}
1215
if (info != NULL) {
1216
add_debug_info_for_null_check_here(info);
1217
}
1218
1219
switch (type) {
1220
case T_FLOAT: {
1221
if (dest->is_single_xmm()) {
1222
__ movflt(dest->as_xmm_float_reg(), from_addr);
1223
} else {
1224
#ifndef _LP64
1225
assert(dest->is_single_fpu(), "must be");
1226
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1227
__ fld_s(from_addr);
1228
#else
1229
ShouldNotReachHere();
1230
#endif // !LP64
1231
}
1232
break;
1233
}
1234
1235
case T_DOUBLE: {
1236
if (dest->is_double_xmm()) {
1237
__ movdbl(dest->as_xmm_double_reg(), from_addr);
1238
} else {
1239
#ifndef _LP64
1240
assert(dest->is_double_fpu(), "must be");
1241
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1242
__ fld_d(from_addr);
1243
#else
1244
ShouldNotReachHere();
1245
#endif // !LP64
1246
}
1247
break;
1248
}
1249
1250
case T_OBJECT: // fall through
1251
case T_ARRAY: // fall through
1252
if (UseCompressedOops && !wide) {
1253
__ movl(dest->as_register(), from_addr);
1254
} else {
1255
__ movptr(dest->as_register(), from_addr);
1256
}
1257
break;
1258
1259
case T_ADDRESS:
1260
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1261
__ movl(dest->as_register(), from_addr);
1262
} else {
1263
__ movptr(dest->as_register(), from_addr);
1264
}
1265
break;
1266
case T_INT:
1267
__ movl(dest->as_register(), from_addr);
1268
break;
1269
1270
case T_LONG: {
1271
Register to_lo = dest->as_register_lo();
1272
Register to_hi = dest->as_register_hi();
1273
#ifdef _LP64
1274
__ movptr(to_lo, as_Address_lo(addr));
1275
#else
1276
Register base = addr->base()->as_register();
1277
Register index = noreg;
1278
if (addr->index()->is_register()) {
1279
index = addr->index()->as_register();
1280
}
1281
if ((base == to_lo && index == to_hi) ||
1282
(base == to_hi && index == to_lo)) {
1283
// addresses with 2 registers are only formed as a result of
1284
// array access so this code will never have to deal with
1285
// patches or null checks.
1286
assert(info == NULL && patch == NULL, "must be");
1287
__ lea(to_hi, as_Address(addr));
1288
__ movl(to_lo, Address(to_hi, 0));
1289
__ movl(to_hi, Address(to_hi, BytesPerWord));
1290
} else if (base == to_lo || index == to_lo) {
1291
assert(base != to_hi, "can't be");
1292
assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1293
__ movl(to_hi, as_Address_hi(addr));
1294
if (patch != NULL) {
1295
patching_epilog(patch, lir_patch_high, base, info);
1296
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1297
patch_code = lir_patch_low;
1298
}
1299
__ movl(to_lo, as_Address_lo(addr));
1300
} else {
1301
assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1302
__ movl(to_lo, as_Address_lo(addr));
1303
if (patch != NULL) {
1304
patching_epilog(patch, lir_patch_low, base, info);
1305
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1306
patch_code = lir_patch_high;
1307
}
1308
__ movl(to_hi, as_Address_hi(addr));
1309
}
1310
#endif // _LP64
1311
break;
1312
}
1313
1314
case T_BOOLEAN: // fall through
1315
case T_BYTE: {
1316
Register dest_reg = dest->as_register();
1317
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1318
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1319
__ movsbl(dest_reg, from_addr);
1320
} else {
1321
__ movb(dest_reg, from_addr);
1322
__ shll(dest_reg, 24);
1323
__ sarl(dest_reg, 24);
1324
}
1325
break;
1326
}
1327
1328
case T_CHAR: {
1329
Register dest_reg = dest->as_register();
1330
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1331
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1332
__ movzwl(dest_reg, from_addr);
1333
} else {
1334
__ movw(dest_reg, from_addr);
1335
}
1336
break;
1337
}
1338
1339
case T_SHORT: {
1340
Register dest_reg = dest->as_register();
1341
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1342
__ movswl(dest_reg, from_addr);
1343
} else {
1344
__ movw(dest_reg, from_addr);
1345
__ shll(dest_reg, 16);
1346
__ sarl(dest_reg, 16);
1347
}
1348
break;
1349
}
1350
1351
default:
1352
ShouldNotReachHere();
1353
}
1354
1355
if (patch != NULL) {
1356
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1357
}
1358
1359
if (is_reference_type(type)) {
1360
#ifdef _LP64
1361
if (UseCompressedOops && !wide) {
1362
__ decode_heap_oop(dest->as_register());
1363
}
1364
#endif
1365
1366
// Load barrier has not yet been applied, so ZGC can't verify the oop here
1367
if (!UseZGC) {
1368
__ verify_oop(dest->as_register());
1369
}
1370
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1371
#ifdef _LP64
1372
if (UseCompressedClassPointers) {
1373
__ decode_klass_not_null(dest->as_register(), tmp_load_klass);
1374
}
1375
#endif
1376
}
1377
}
1378
1379
1380
NEEDS_CLEANUP; // This could be static?
1381
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1382
int elem_size = type2aelembytes(type);
1383
switch (elem_size) {
1384
case 1: return Address::times_1;
1385
case 2: return Address::times_2;
1386
case 4: return Address::times_4;
1387
case 8: return Address::times_8;
1388
}
1389
ShouldNotReachHere();
1390
return Address::no_scale;
1391
}
1392
1393
1394
void LIR_Assembler::emit_op3(LIR_Op3* op) {
1395
switch (op->code()) {
1396
case lir_idiv:
1397
case lir_irem:
1398
arithmetic_idiv(op->code(),
1399
op->in_opr1(),
1400
op->in_opr2(),
1401
op->in_opr3(),
1402
op->result_opr(),
1403
op->info());
1404
break;
1405
case lir_fmad:
1406
__ fmad(op->result_opr()->as_xmm_double_reg(),
1407
op->in_opr1()->as_xmm_double_reg(),
1408
op->in_opr2()->as_xmm_double_reg(),
1409
op->in_opr3()->as_xmm_double_reg());
1410
break;
1411
case lir_fmaf:
1412
__ fmaf(op->result_opr()->as_xmm_float_reg(),
1413
op->in_opr1()->as_xmm_float_reg(),
1414
op->in_opr2()->as_xmm_float_reg(),
1415
op->in_opr3()->as_xmm_float_reg());
1416
break;
1417
default: ShouldNotReachHere(); break;
1418
}
1419
}
1420
1421
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1422
#ifdef ASSERT
1423
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1424
if (op->block() != NULL) _branch_target_blocks.append(op->block());
1425
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1426
#endif
1427
1428
if (op->cond() == lir_cond_always) {
1429
if (op->info() != NULL) add_debug_info_for_branch(op->info());
1430
__ jmp (*(op->label()));
1431
} else {
1432
Assembler::Condition acond = Assembler::zero;
1433
if (op->code() == lir_cond_float_branch) {
1434
assert(op->ublock() != NULL, "must have unordered successor");
1435
__ jcc(Assembler::parity, *(op->ublock()->label()));
1436
switch(op->cond()) {
1437
case lir_cond_equal: acond = Assembler::equal; break;
1438
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1439
case lir_cond_less: acond = Assembler::below; break;
1440
case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1441
case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1442
case lir_cond_greater: acond = Assembler::above; break;
1443
default: ShouldNotReachHere();
1444
}
1445
} else {
1446
switch (op->cond()) {
1447
case lir_cond_equal: acond = Assembler::equal; break;
1448
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1449
case lir_cond_less: acond = Assembler::less; break;
1450
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1451
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1452
case lir_cond_greater: acond = Assembler::greater; break;
1453
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1454
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1455
default: ShouldNotReachHere();
1456
}
1457
}
1458
__ jcc(acond,*(op->label()));
1459
}
1460
}
1461
1462
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1463
LIR_Opr src = op->in_opr();
1464
LIR_Opr dest = op->result_opr();
1465
1466
switch (op->bytecode()) {
1467
case Bytecodes::_i2l:
1468
#ifdef _LP64
1469
__ movl2ptr(dest->as_register_lo(), src->as_register());
1470
#else
1471
move_regs(src->as_register(), dest->as_register_lo());
1472
move_regs(src->as_register(), dest->as_register_hi());
1473
__ sarl(dest->as_register_hi(), 31);
1474
#endif // LP64
1475
break;
1476
1477
case Bytecodes::_l2i:
1478
#ifdef _LP64
1479
__ movl(dest->as_register(), src->as_register_lo());
1480
#else
1481
move_regs(src->as_register_lo(), dest->as_register());
1482
#endif
1483
break;
1484
1485
case Bytecodes::_i2b:
1486
move_regs(src->as_register(), dest->as_register());
1487
__ sign_extend_byte(dest->as_register());
1488
break;
1489
1490
case Bytecodes::_i2c:
1491
move_regs(src->as_register(), dest->as_register());
1492
__ andl(dest->as_register(), 0xFFFF);
1493
break;
1494
1495
case Bytecodes::_i2s:
1496
move_regs(src->as_register(), dest->as_register());
1497
__ sign_extend_short(dest->as_register());
1498
break;
1499
1500
1501
#ifdef _LP64
1502
case Bytecodes::_f2d:
1503
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1504
break;
1505
1506
case Bytecodes::_d2f:
1507
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1508
break;
1509
1510
case Bytecodes::_i2f:
1511
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1512
break;
1513
1514
case Bytecodes::_i2d:
1515
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1516
break;
1517
1518
case Bytecodes::_l2f:
1519
__ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1520
break;
1521
1522
case Bytecodes::_l2d:
1523
__ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1524
break;
1525
1526
case Bytecodes::_f2i:
1527
__ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1528
break;
1529
1530
case Bytecodes::_d2i:
1531
__ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1532
break;
1533
1534
case Bytecodes::_f2l:
1535
__ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1536
break;
1537
1538
case Bytecodes::_d2l:
1539
__ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1540
break;
1541
#else
1542
case Bytecodes::_f2d:
1543
case Bytecodes::_d2f:
1544
if (dest->is_single_xmm()) {
1545
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1546
} else if (dest->is_double_xmm()) {
1547
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1548
} else {
1549
assert(src->fpu() == dest->fpu(), "register must be equal");
1550
// do nothing (float result is rounded later through spilling)
1551
}
1552
break;
1553
1554
case Bytecodes::_i2f:
1555
case Bytecodes::_i2d:
1556
if (dest->is_single_xmm()) {
1557
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1558
} else if (dest->is_double_xmm()) {
1559
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1560
} else {
1561
assert(dest->fpu() == 0, "result must be on TOS");
1562
__ movl(Address(rsp, 0), src->as_register());
1563
__ fild_s(Address(rsp, 0));
1564
}
1565
break;
1566
1567
case Bytecodes::_l2f:
1568
case Bytecodes::_l2d:
1569
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1570
assert(dest->fpu() == 0, "result must be on TOS");
1571
__ movptr(Address(rsp, 0), src->as_register_lo());
1572
__ movl(Address(rsp, BytesPerWord), src->as_register_hi());
1573
__ fild_d(Address(rsp, 0));
1574
// float result is rounded later through spilling
1575
break;
1576
1577
case Bytecodes::_f2i:
1578
case Bytecodes::_d2i:
1579
if (src->is_single_xmm()) {
1580
__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1581
} else if (src->is_double_xmm()) {
1582
__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1583
} else {
1584
assert(src->fpu() == 0, "input must be on TOS");
1585
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc()));
1586
__ fist_s(Address(rsp, 0));
1587
__ movl(dest->as_register(), Address(rsp, 0));
1588
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1589
}
1590
// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1591
assert(op->stub() != NULL, "stub required");
1592
__ cmpl(dest->as_register(), 0x80000000);
1593
__ jcc(Assembler::equal, *op->stub()->entry());
1594
__ bind(*op->stub()->continuation());
1595
break;
1596
1597
case Bytecodes::_f2l:
1598
case Bytecodes::_d2l:
1599
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1600
assert(src->fpu() == 0, "input must be on TOS");
1601
assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1602
1603
// instruction sequence too long to inline it here
1604
{
1605
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1606
}
1607
break;
1608
#endif // _LP64
1609
1610
default: ShouldNotReachHere();
1611
}
1612
}
1613
1614
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1615
if (op->init_check()) {
1616
add_debug_info_for_null_check_here(op->stub()->info());
1617
__ cmpb(Address(op->klass()->as_register(),
1618
InstanceKlass::init_state_offset()),
1619
InstanceKlass::fully_initialized);
1620
__ jcc(Assembler::notEqual, *op->stub()->entry());
1621
}
1622
__ allocate_object(op->obj()->as_register(),
1623
op->tmp1()->as_register(),
1624
op->tmp2()->as_register(),
1625
op->header_size(),
1626
op->object_size(),
1627
op->klass()->as_register(),
1628
*op->stub()->entry());
1629
__ bind(*op->stub()->continuation());
1630
}
1631
1632
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1633
Register len = op->len()->as_register();
1634
LP64_ONLY( __ movslq(len, len); )
1635
1636
if (UseSlowPath ||
1637
(!UseFastNewObjectArray && is_reference_type(op->type())) ||
1638
(!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1639
__ jmp(*op->stub()->entry());
1640
} else {
1641
Register tmp1 = op->tmp1()->as_register();
1642
Register tmp2 = op->tmp2()->as_register();
1643
Register tmp3 = op->tmp3()->as_register();
1644
if (len == tmp1) {
1645
tmp1 = tmp3;
1646
} else if (len == tmp2) {
1647
tmp2 = tmp3;
1648
} else if (len == tmp3) {
1649
// everything is ok
1650
} else {
1651
__ mov(tmp3, len);
1652
}
1653
__ allocate_array(op->obj()->as_register(),
1654
len,
1655
tmp1,
1656
tmp2,
1657
arrayOopDesc::header_size(op->type()),
1658
array_element_size(op->type()),
1659
op->klass()->as_register(),
1660
*op->stub()->entry());
1661
}
1662
__ bind(*op->stub()->continuation());
1663
}
1664
1665
void LIR_Assembler::type_profile_helper(Register mdo,
1666
ciMethodData *md, ciProfileData *data,
1667
Register recv, Label* update_done) {
1668
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1669
Label next_test;
1670
// See if the receiver is receiver[n].
1671
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1672
__ jccb(Assembler::notEqual, next_test);
1673
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1674
__ addptr(data_addr, DataLayout::counter_increment);
1675
__ jmp(*update_done);
1676
__ bind(next_test);
1677
}
1678
1679
// Didn't find receiver; find next empty slot and fill it in
1680
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1681
Label next_test;
1682
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1683
__ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1684
__ jccb(Assembler::notEqual, next_test);
1685
__ movptr(recv_addr, recv);
1686
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1687
__ jmp(*update_done);
1688
__ bind(next_test);
1689
}
1690
}
1691
1692
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1693
// we always need a stub for the failure case.
1694
CodeStub* stub = op->stub();
1695
Register obj = op->object()->as_register();
1696
Register k_RInfo = op->tmp1()->as_register();
1697
Register klass_RInfo = op->tmp2()->as_register();
1698
Register dst = op->result_opr()->as_register();
1699
ciKlass* k = op->klass();
1700
Register Rtmp1 = noreg;
1701
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1702
1703
// check if it needs to be profiled
1704
ciMethodData* md = NULL;
1705
ciProfileData* data = NULL;
1706
1707
if (op->should_profile()) {
1708
ciMethod* method = op->profiled_method();
1709
assert(method != NULL, "Should have method");
1710
int bci = op->profiled_bci();
1711
md = method->method_data_or_null();
1712
assert(md != NULL, "Sanity");
1713
data = md->bci_to_data(bci);
1714
assert(data != NULL, "need data for type check");
1715
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1716
}
1717
Label profile_cast_success, profile_cast_failure;
1718
Label *success_target = op->should_profile() ? &profile_cast_success : success;
1719
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1720
1721
if (obj == k_RInfo) {
1722
k_RInfo = dst;
1723
} else if (obj == klass_RInfo) {
1724
klass_RInfo = dst;
1725
}
1726
if (k->is_loaded() && !UseCompressedClassPointers) {
1727
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1728
} else {
1729
Rtmp1 = op->tmp3()->as_register();
1730
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1731
}
1732
1733
assert_different_registers(obj, k_RInfo, klass_RInfo);
1734
1735
__ cmpptr(obj, (int32_t)NULL_WORD);
1736
if (op->should_profile()) {
1737
Label not_null;
1738
__ jccb(Assembler::notEqual, not_null);
1739
// Object is null; update MDO and exit
1740
Register mdo = klass_RInfo;
1741
__ mov_metadata(mdo, md->constant_encoding());
1742
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1743
int header_bits = BitData::null_seen_byte_constant();
1744
__ orb(data_addr, header_bits);
1745
__ jmp(*obj_is_null);
1746
__ bind(not_null);
1747
} else {
1748
__ jcc(Assembler::equal, *obj_is_null);
1749
}
1750
1751
if (!k->is_loaded()) {
1752
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1753
} else {
1754
#ifdef _LP64
1755
__ mov_metadata(k_RInfo, k->constant_encoding());
1756
#endif // _LP64
1757
}
1758
__ verify_oop(obj);
1759
1760
if (op->fast_check()) {
1761
// get object class
1762
// not a safepoint as obj null check happens earlier
1763
#ifdef _LP64
1764
if (UseCompressedClassPointers) {
1765
__ load_klass(Rtmp1, obj, tmp_load_klass);
1766
__ cmpptr(k_RInfo, Rtmp1);
1767
} else {
1768
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1769
}
1770
#else
1771
if (k->is_loaded()) {
1772
__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1773
} else {
1774
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1775
}
1776
#endif
1777
__ jcc(Assembler::notEqual, *failure_target);
1778
// successful cast, fall through to profile or jump
1779
} else {
1780
// get object class
1781
// not a safepoint as obj null check happens earlier
1782
__ load_klass(klass_RInfo, obj, tmp_load_klass);
1783
if (k->is_loaded()) {
1784
// See if we get an immediate positive hit
1785
#ifdef _LP64
1786
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1787
#else
1788
__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1789
#endif // _LP64
1790
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1791
__ jcc(Assembler::notEqual, *failure_target);
1792
// successful cast, fall through to profile or jump
1793
} else {
1794
// See if we get an immediate positive hit
1795
__ jcc(Assembler::equal, *success_target);
1796
// check for self
1797
#ifdef _LP64
1798
__ cmpptr(klass_RInfo, k_RInfo);
1799
#else
1800
__ cmpklass(klass_RInfo, k->constant_encoding());
1801
#endif // _LP64
1802
__ jcc(Assembler::equal, *success_target);
1803
1804
__ push(klass_RInfo);
1805
#ifdef _LP64
1806
__ push(k_RInfo);
1807
#else
1808
__ pushklass(k->constant_encoding());
1809
#endif // _LP64
1810
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1811
__ pop(klass_RInfo);
1812
__ pop(klass_RInfo);
1813
// result is a boolean
1814
__ cmpl(klass_RInfo, 0);
1815
__ jcc(Assembler::equal, *failure_target);
1816
// successful cast, fall through to profile or jump
1817
}
1818
} else {
1819
// perform the fast part of the checking logic
1820
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1821
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1822
__ push(klass_RInfo);
1823
__ push(k_RInfo);
1824
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1825
__ pop(klass_RInfo);
1826
__ pop(k_RInfo);
1827
// result is a boolean
1828
__ cmpl(k_RInfo, 0);
1829
__ jcc(Assembler::equal, *failure_target);
1830
// successful cast, fall through to profile or jump
1831
}
1832
}
1833
if (op->should_profile()) {
1834
Register mdo = klass_RInfo, recv = k_RInfo;
1835
__ bind(profile_cast_success);
1836
__ mov_metadata(mdo, md->constant_encoding());
1837
__ load_klass(recv, obj, tmp_load_klass);
1838
type_profile_helper(mdo, md, data, recv, success);
1839
__ jmp(*success);
1840
1841
__ bind(profile_cast_failure);
1842
__ mov_metadata(mdo, md->constant_encoding());
1843
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1844
__ subptr(counter_addr, DataLayout::counter_increment);
1845
__ jmp(*failure);
1846
}
1847
__ jmp(*success);
1848
}
1849
1850
1851
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1852
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1853
LIR_Code code = op->code();
1854
if (code == lir_store_check) {
1855
Register value = op->object()->as_register();
1856
Register array = op->array()->as_register();
1857
Register k_RInfo = op->tmp1()->as_register();
1858
Register klass_RInfo = op->tmp2()->as_register();
1859
Register Rtmp1 = op->tmp3()->as_register();
1860
1861
CodeStub* stub = op->stub();
1862
1863
// check if it needs to be profiled
1864
ciMethodData* md = NULL;
1865
ciProfileData* data = NULL;
1866
1867
if (op->should_profile()) {
1868
ciMethod* method = op->profiled_method();
1869
assert(method != NULL, "Should have method");
1870
int bci = op->profiled_bci();
1871
md = method->method_data_or_null();
1872
assert(md != NULL, "Sanity");
1873
data = md->bci_to_data(bci);
1874
assert(data != NULL, "need data for type check");
1875
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1876
}
1877
Label profile_cast_success, profile_cast_failure, done;
1878
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1879
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1880
1881
__ cmpptr(value, (int32_t)NULL_WORD);
1882
if (op->should_profile()) {
1883
Label not_null;
1884
__ jccb(Assembler::notEqual, not_null);
1885
// Object is null; update MDO and exit
1886
Register mdo = klass_RInfo;
1887
__ mov_metadata(mdo, md->constant_encoding());
1888
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1889
int header_bits = BitData::null_seen_byte_constant();
1890
__ orb(data_addr, header_bits);
1891
__ jmp(done);
1892
__ bind(not_null);
1893
} else {
1894
__ jcc(Assembler::equal, done);
1895
}
1896
1897
add_debug_info_for_null_check_here(op->info_for_exception());
1898
__ load_klass(k_RInfo, array, tmp_load_klass);
1899
__ load_klass(klass_RInfo, value, tmp_load_klass);
1900
1901
// get instance klass (it's already uncompressed)
1902
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1903
// perform the fast part of the checking logic
1904
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1905
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1906
__ push(klass_RInfo);
1907
__ push(k_RInfo);
1908
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1909
__ pop(klass_RInfo);
1910
__ pop(k_RInfo);
1911
// result is a boolean
1912
__ cmpl(k_RInfo, 0);
1913
__ jcc(Assembler::equal, *failure_target);
1914
// fall through to the success case
1915
1916
if (op->should_profile()) {
1917
Register mdo = klass_RInfo, recv = k_RInfo;
1918
__ bind(profile_cast_success);
1919
__ mov_metadata(mdo, md->constant_encoding());
1920
__ load_klass(recv, value, tmp_load_klass);
1921
type_profile_helper(mdo, md, data, recv, &done);
1922
__ jmpb(done);
1923
1924
__ bind(profile_cast_failure);
1925
__ mov_metadata(mdo, md->constant_encoding());
1926
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1927
__ subptr(counter_addr, DataLayout::counter_increment);
1928
__ jmp(*stub->entry());
1929
}
1930
1931
__ bind(done);
1932
} else
1933
if (code == lir_checkcast) {
1934
Register obj = op->object()->as_register();
1935
Register dst = op->result_opr()->as_register();
1936
Label success;
1937
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1938
__ bind(success);
1939
if (dst != obj) {
1940
__ mov(dst, obj);
1941
}
1942
} else
1943
if (code == lir_instanceof) {
1944
Register obj = op->object()->as_register();
1945
Register dst = op->result_opr()->as_register();
1946
Label success, failure, done;
1947
emit_typecheck_helper(op, &success, &failure, &failure);
1948
__ bind(failure);
1949
__ xorptr(dst, dst);
1950
__ jmpb(done);
1951
__ bind(success);
1952
__ movptr(dst, 1);
1953
__ bind(done);
1954
} else {
1955
ShouldNotReachHere();
1956
}
1957
1958
}
1959
1960
1961
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1962
if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1963
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1964
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1965
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1966
assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1967
Register addr = op->addr()->as_register();
1968
__ lock();
1969
NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1970
1971
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1972
NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1973
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1974
Register newval = op->new_value()->as_register();
1975
Register cmpval = op->cmp_value()->as_register();
1976
assert(cmpval == rax, "wrong register");
1977
assert(newval != NULL, "new val must be register");
1978
assert(cmpval != newval, "cmp and new values must be in different registers");
1979
assert(cmpval != addr, "cmp and addr must be in different registers");
1980
assert(newval != addr, "new value and addr must be in different registers");
1981
1982
if ( op->code() == lir_cas_obj) {
1983
#ifdef _LP64
1984
if (UseCompressedOops) {
1985
__ encode_heap_oop(cmpval);
1986
__ mov(rscratch1, newval);
1987
__ encode_heap_oop(rscratch1);
1988
__ lock();
1989
// cmpval (rax) is implicitly used by this instruction
1990
__ cmpxchgl(rscratch1, Address(addr, 0));
1991
} else
1992
#endif
1993
{
1994
__ lock();
1995
__ cmpxchgptr(newval, Address(addr, 0));
1996
}
1997
} else {
1998
assert(op->code() == lir_cas_int, "lir_cas_int expected");
1999
__ lock();
2000
__ cmpxchgl(newval, Address(addr, 0));
2001
}
2002
#ifdef _LP64
2003
} else if (op->code() == lir_cas_long) {
2004
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2005
Register newval = op->new_value()->as_register_lo();
2006
Register cmpval = op->cmp_value()->as_register_lo();
2007
assert(cmpval == rax, "wrong register");
2008
assert(newval != NULL, "new val must be register");
2009
assert(cmpval != newval, "cmp and new values must be in different registers");
2010
assert(cmpval != addr, "cmp and addr must be in different registers");
2011
assert(newval != addr, "new value and addr must be in different registers");
2012
__ lock();
2013
__ cmpxchgq(newval, Address(addr, 0));
2014
#endif // _LP64
2015
} else {
2016
Unimplemented();
2017
}
2018
}
2019
2020
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2021
Assembler::Condition acond, ncond;
2022
switch (condition) {
2023
case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2024
case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2025
case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2026
case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
2027
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
2028
case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
2029
case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
2030
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
2031
default: acond = Assembler::equal; ncond = Assembler::notEqual;
2032
ShouldNotReachHere();
2033
}
2034
2035
if (opr1->is_cpu_register()) {
2036
reg2reg(opr1, result);
2037
} else if (opr1->is_stack()) {
2038
stack2reg(opr1, result, result->type());
2039
} else if (opr1->is_constant()) {
2040
const2reg(opr1, result, lir_patch_none, NULL);
2041
} else {
2042
ShouldNotReachHere();
2043
}
2044
2045
if (VM_Version::supports_cmov() && !opr2->is_constant()) {
2046
// optimized version that does not require a branch
2047
if (opr2->is_single_cpu()) {
2048
assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
2049
__ cmov(ncond, result->as_register(), opr2->as_register());
2050
} else if (opr2->is_double_cpu()) {
2051
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2052
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2053
__ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
2054
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
2055
} else if (opr2->is_single_stack()) {
2056
__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
2057
} else if (opr2->is_double_stack()) {
2058
__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
2059
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
2060
} else {
2061
ShouldNotReachHere();
2062
}
2063
2064
} else {
2065
Label skip;
2066
__ jcc (acond, skip);
2067
if (opr2->is_cpu_register()) {
2068
reg2reg(opr2, result);
2069
} else if (opr2->is_stack()) {
2070
stack2reg(opr2, result, result->type());
2071
} else if (opr2->is_constant()) {
2072
const2reg(opr2, result, lir_patch_none, NULL);
2073
} else {
2074
ShouldNotReachHere();
2075
}
2076
__ bind(skip);
2077
}
2078
}
2079
2080
2081
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
2082
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
2083
2084
if (left->is_single_cpu()) {
2085
assert(left == dest, "left and dest must be equal");
2086
Register lreg = left->as_register();
2087
2088
if (right->is_single_cpu()) {
2089
// cpu register - cpu register
2090
Register rreg = right->as_register();
2091
switch (code) {
2092
case lir_add: __ addl (lreg, rreg); break;
2093
case lir_sub: __ subl (lreg, rreg); break;
2094
case lir_mul: __ imull(lreg, rreg); break;
2095
default: ShouldNotReachHere();
2096
}
2097
2098
} else if (right->is_stack()) {
2099
// cpu register - stack
2100
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2101
switch (code) {
2102
case lir_add: __ addl(lreg, raddr); break;
2103
case lir_sub: __ subl(lreg, raddr); break;
2104
default: ShouldNotReachHere();
2105
}
2106
2107
} else if (right->is_constant()) {
2108
// cpu register - constant
2109
jint c = right->as_constant_ptr()->as_jint();
2110
switch (code) {
2111
case lir_add: {
2112
__ incrementl(lreg, c);
2113
break;
2114
}
2115
case lir_sub: {
2116
__ decrementl(lreg, c);
2117
break;
2118
}
2119
default: ShouldNotReachHere();
2120
}
2121
2122
} else {
2123
ShouldNotReachHere();
2124
}
2125
2126
} else if (left->is_double_cpu()) {
2127
assert(left == dest, "left and dest must be equal");
2128
Register lreg_lo = left->as_register_lo();
2129
Register lreg_hi = left->as_register_hi();
2130
2131
if (right->is_double_cpu()) {
2132
// cpu register - cpu register
2133
Register rreg_lo = right->as_register_lo();
2134
Register rreg_hi = right->as_register_hi();
2135
NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2136
LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2137
switch (code) {
2138
case lir_add:
2139
__ addptr(lreg_lo, rreg_lo);
2140
NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2141
break;
2142
case lir_sub:
2143
__ subptr(lreg_lo, rreg_lo);
2144
NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2145
break;
2146
case lir_mul:
2147
#ifdef _LP64
2148
__ imulq(lreg_lo, rreg_lo);
2149
#else
2150
assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2151
__ imull(lreg_hi, rreg_lo);
2152
__ imull(rreg_hi, lreg_lo);
2153
__ addl (rreg_hi, lreg_hi);
2154
__ mull (rreg_lo);
2155
__ addl (lreg_hi, rreg_hi);
2156
#endif // _LP64
2157
break;
2158
default:
2159
ShouldNotReachHere();
2160
}
2161
2162
} else if (right->is_constant()) {
2163
// cpu register - constant
2164
#ifdef _LP64
2165
jlong c = right->as_constant_ptr()->as_jlong_bits();
2166
__ movptr(r10, (intptr_t) c);
2167
switch (code) {
2168
case lir_add:
2169
__ addptr(lreg_lo, r10);
2170
break;
2171
case lir_sub:
2172
__ subptr(lreg_lo, r10);
2173
break;
2174
default:
2175
ShouldNotReachHere();
2176
}
2177
#else
2178
jint c_lo = right->as_constant_ptr()->as_jint_lo();
2179
jint c_hi = right->as_constant_ptr()->as_jint_hi();
2180
switch (code) {
2181
case lir_add:
2182
__ addptr(lreg_lo, c_lo);
2183
__ adcl(lreg_hi, c_hi);
2184
break;
2185
case lir_sub:
2186
__ subptr(lreg_lo, c_lo);
2187
__ sbbl(lreg_hi, c_hi);
2188
break;
2189
default:
2190
ShouldNotReachHere();
2191
}
2192
#endif // _LP64
2193
2194
} else {
2195
ShouldNotReachHere();
2196
}
2197
2198
} else if (left->is_single_xmm()) {
2199
assert(left == dest, "left and dest must be equal");
2200
XMMRegister lreg = left->as_xmm_float_reg();
2201
2202
if (right->is_single_xmm()) {
2203
XMMRegister rreg = right->as_xmm_float_reg();
2204
switch (code) {
2205
case lir_add: __ addss(lreg, rreg); break;
2206
case lir_sub: __ subss(lreg, rreg); break;
2207
case lir_mul: __ mulss(lreg, rreg); break;
2208
case lir_div: __ divss(lreg, rreg); break;
2209
default: ShouldNotReachHere();
2210
}
2211
} else {
2212
Address raddr;
2213
if (right->is_single_stack()) {
2214
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2215
} else if (right->is_constant()) {
2216
// hack for now
2217
raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2218
} else {
2219
ShouldNotReachHere();
2220
}
2221
switch (code) {
2222
case lir_add: __ addss(lreg, raddr); break;
2223
case lir_sub: __ subss(lreg, raddr); break;
2224
case lir_mul: __ mulss(lreg, raddr); break;
2225
case lir_div: __ divss(lreg, raddr); break;
2226
default: ShouldNotReachHere();
2227
}
2228
}
2229
2230
} else if (left->is_double_xmm()) {
2231
assert(left == dest, "left and dest must be equal");
2232
2233
XMMRegister lreg = left->as_xmm_double_reg();
2234
if (right->is_double_xmm()) {
2235
XMMRegister rreg = right->as_xmm_double_reg();
2236
switch (code) {
2237
case lir_add: __ addsd(lreg, rreg); break;
2238
case lir_sub: __ subsd(lreg, rreg); break;
2239
case lir_mul: __ mulsd(lreg, rreg); break;
2240
case lir_div: __ divsd(lreg, rreg); break;
2241
default: ShouldNotReachHere();
2242
}
2243
} else {
2244
Address raddr;
2245
if (right->is_double_stack()) {
2246
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2247
} else if (right->is_constant()) {
2248
// hack for now
2249
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2250
} else {
2251
ShouldNotReachHere();
2252
}
2253
switch (code) {
2254
case lir_add: __ addsd(lreg, raddr); break;
2255
case lir_sub: __ subsd(lreg, raddr); break;
2256
case lir_mul: __ mulsd(lreg, raddr); break;
2257
case lir_div: __ divsd(lreg, raddr); break;
2258
default: ShouldNotReachHere();
2259
}
2260
}
2261
2262
#ifndef _LP64
2263
} else if (left->is_single_fpu()) {
2264
assert(dest->is_single_fpu(), "fpu stack allocation required");
2265
2266
if (right->is_single_fpu()) {
2267
arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2268
2269
} else {
2270
assert(left->fpu_regnr() == 0, "left must be on TOS");
2271
assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2272
2273
Address raddr;
2274
if (right->is_single_stack()) {
2275
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2276
} else if (right->is_constant()) {
2277
address const_addr = float_constant(right->as_jfloat());
2278
assert(const_addr != NULL, "incorrect float/double constant maintainance");
2279
// hack for now
2280
raddr = __ as_Address(InternalAddress(const_addr));
2281
} else {
2282
ShouldNotReachHere();
2283
}
2284
2285
switch (code) {
2286
case lir_add: __ fadd_s(raddr); break;
2287
case lir_sub: __ fsub_s(raddr); break;
2288
case lir_mul: __ fmul_s(raddr); break;
2289
case lir_div: __ fdiv_s(raddr); break;
2290
default: ShouldNotReachHere();
2291
}
2292
}
2293
2294
} else if (left->is_double_fpu()) {
2295
assert(dest->is_double_fpu(), "fpu stack allocation required");
2296
2297
if (code == lir_mul || code == lir_div) {
2298
// Double values require special handling for strictfp mul/div on x86
2299
__ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
2300
__ fmulp(left->fpu_regnrLo() + 1);
2301
}
2302
2303
if (right->is_double_fpu()) {
2304
arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2305
2306
} else {
2307
assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2308
assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2309
2310
Address raddr;
2311
if (right->is_double_stack()) {
2312
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2313
} else if (right->is_constant()) {
2314
// hack for now
2315
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2316
} else {
2317
ShouldNotReachHere();
2318
}
2319
2320
switch (code) {
2321
case lir_add: __ fadd_d(raddr); break;
2322
case lir_sub: __ fsub_d(raddr); break;
2323
case lir_mul: __ fmul_d(raddr); break;
2324
case lir_div: __ fdiv_d(raddr); break;
2325
default: ShouldNotReachHere();
2326
}
2327
}
2328
2329
if (code == lir_mul || code == lir_div) {
2330
// Double values require special handling for strictfp mul/div on x86
2331
__ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
2332
__ fmulp(dest->fpu_regnrLo() + 1);
2333
}
2334
#endif // !_LP64
2335
2336
} else if (left->is_single_stack() || left->is_address()) {
2337
assert(left == dest, "left and dest must be equal");
2338
2339
Address laddr;
2340
if (left->is_single_stack()) {
2341
laddr = frame_map()->address_for_slot(left->single_stack_ix());
2342
} else if (left->is_address()) {
2343
laddr = as_Address(left->as_address_ptr());
2344
} else {
2345
ShouldNotReachHere();
2346
}
2347
2348
if (right->is_single_cpu()) {
2349
Register rreg = right->as_register();
2350
switch (code) {
2351
case lir_add: __ addl(laddr, rreg); break;
2352
case lir_sub: __ subl(laddr, rreg); break;
2353
default: ShouldNotReachHere();
2354
}
2355
} else if (right->is_constant()) {
2356
jint c = right->as_constant_ptr()->as_jint();
2357
switch (code) {
2358
case lir_add: {
2359
__ incrementl(laddr, c);
2360
break;
2361
}
2362
case lir_sub: {
2363
__ decrementl(laddr, c);
2364
break;
2365
}
2366
default: ShouldNotReachHere();
2367
}
2368
} else {
2369
ShouldNotReachHere();
2370
}
2371
2372
} else {
2373
ShouldNotReachHere();
2374
}
2375
}
2376
2377
#ifndef _LP64
2378
void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2379
assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");
2380
assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2381
assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2382
2383
bool left_is_tos = (left_index == 0);
2384
bool dest_is_tos = (dest_index == 0);
2385
int non_tos_index = (left_is_tos ? right_index : left_index);
2386
2387
switch (code) {
2388
case lir_add:
2389
if (pop_fpu_stack) __ faddp(non_tos_index);
2390
else if (dest_is_tos) __ fadd (non_tos_index);
2391
else __ fadda(non_tos_index);
2392
break;
2393
2394
case lir_sub:
2395
if (left_is_tos) {
2396
if (pop_fpu_stack) __ fsubrp(non_tos_index);
2397
else if (dest_is_tos) __ fsub (non_tos_index);
2398
else __ fsubra(non_tos_index);
2399
} else {
2400
if (pop_fpu_stack) __ fsubp (non_tos_index);
2401
else if (dest_is_tos) __ fsubr (non_tos_index);
2402
else __ fsuba (non_tos_index);
2403
}
2404
break;
2405
2406
case lir_mul:
2407
if (pop_fpu_stack) __ fmulp(non_tos_index);
2408
else if (dest_is_tos) __ fmul (non_tos_index);
2409
else __ fmula(non_tos_index);
2410
break;
2411
2412
case lir_div:
2413
if (left_is_tos) {
2414
if (pop_fpu_stack) __ fdivrp(non_tos_index);
2415
else if (dest_is_tos) __ fdiv (non_tos_index);
2416
else __ fdivra(non_tos_index);
2417
} else {
2418
if (pop_fpu_stack) __ fdivp (non_tos_index);
2419
else if (dest_is_tos) __ fdivr (non_tos_index);
2420
else __ fdiva (non_tos_index);
2421
}
2422
break;
2423
2424
case lir_rem:
2425
assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2426
__ fremr(noreg);
2427
break;
2428
2429
default:
2430
ShouldNotReachHere();
2431
}
2432
}
2433
#endif // _LP64
2434
2435
2436
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
2437
if (value->is_double_xmm()) {
2438
switch(code) {
2439
case lir_abs :
2440
{
2441
#ifdef _LP64
2442
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
2443
assert(tmp->is_valid(), "need temporary");
2444
__ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2);
2445
} else
2446
#endif
2447
{
2448
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2449
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2450
}
2451
assert(!tmp->is_valid(), "do not need temporary");
2452
__ andpd(dest->as_xmm_double_reg(),
2453
ExternalAddress((address)double_signmask_pool));
2454
}
2455
}
2456
break;
2457
2458
case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2459
// all other intrinsics are not available in the SSE instruction set, so FPU is used
2460
default : ShouldNotReachHere();
2461
}
2462
2463
#ifndef _LP64
2464
} else if (value->is_double_fpu()) {
2465
assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2466
switch(code) {
2467
case lir_abs : __ fabs() ; break;
2468
case lir_sqrt : __ fsqrt(); break;
2469
default : ShouldNotReachHere();
2470
}
2471
#endif // !_LP64
2472
} else {
2473
Unimplemented();
2474
}
2475
}
2476
2477
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2478
// assert(left->destroys_register(), "check");
2479
if (left->is_single_cpu()) {
2480
Register reg = left->as_register();
2481
if (right->is_constant()) {
2482
int val = right->as_constant_ptr()->as_jint();
2483
switch (code) {
2484
case lir_logic_and: __ andl (reg, val); break;
2485
case lir_logic_or: __ orl (reg, val); break;
2486
case lir_logic_xor: __ xorl (reg, val); break;
2487
default: ShouldNotReachHere();
2488
}
2489
} else if (right->is_stack()) {
2490
// added support for stack operands
2491
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2492
switch (code) {
2493
case lir_logic_and: __ andl (reg, raddr); break;
2494
case lir_logic_or: __ orl (reg, raddr); break;
2495
case lir_logic_xor: __ xorl (reg, raddr); break;
2496
default: ShouldNotReachHere();
2497
}
2498
} else {
2499
Register rright = right->as_register();
2500
switch (code) {
2501
case lir_logic_and: __ andptr (reg, rright); break;
2502
case lir_logic_or : __ orptr (reg, rright); break;
2503
case lir_logic_xor: __ xorptr (reg, rright); break;
2504
default: ShouldNotReachHere();
2505
}
2506
}
2507
move_regs(reg, dst->as_register());
2508
} else {
2509
Register l_lo = left->as_register_lo();
2510
Register l_hi = left->as_register_hi();
2511
if (right->is_constant()) {
2512
#ifdef _LP64
2513
__ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2514
switch (code) {
2515
case lir_logic_and:
2516
__ andq(l_lo, rscratch1);
2517
break;
2518
case lir_logic_or:
2519
__ orq(l_lo, rscratch1);
2520
break;
2521
case lir_logic_xor:
2522
__ xorq(l_lo, rscratch1);
2523
break;
2524
default: ShouldNotReachHere();
2525
}
2526
#else
2527
int r_lo = right->as_constant_ptr()->as_jint_lo();
2528
int r_hi = right->as_constant_ptr()->as_jint_hi();
2529
switch (code) {
2530
case lir_logic_and:
2531
__ andl(l_lo, r_lo);
2532
__ andl(l_hi, r_hi);
2533
break;
2534
case lir_logic_or:
2535
__ orl(l_lo, r_lo);
2536
__ orl(l_hi, r_hi);
2537
break;
2538
case lir_logic_xor:
2539
__ xorl(l_lo, r_lo);
2540
__ xorl(l_hi, r_hi);
2541
break;
2542
default: ShouldNotReachHere();
2543
}
2544
#endif // _LP64
2545
} else {
2546
#ifdef _LP64
2547
Register r_lo;
2548
if (is_reference_type(right->type())) {
2549
r_lo = right->as_register();
2550
} else {
2551
r_lo = right->as_register_lo();
2552
}
2553
#else
2554
Register r_lo = right->as_register_lo();
2555
Register r_hi = right->as_register_hi();
2556
assert(l_lo != r_hi, "overwriting registers");
2557
#endif
2558
switch (code) {
2559
case lir_logic_and:
2560
__ andptr(l_lo, r_lo);
2561
NOT_LP64(__ andptr(l_hi, r_hi);)
2562
break;
2563
case lir_logic_or:
2564
__ orptr(l_lo, r_lo);
2565
NOT_LP64(__ orptr(l_hi, r_hi);)
2566
break;
2567
case lir_logic_xor:
2568
__ xorptr(l_lo, r_lo);
2569
NOT_LP64(__ xorptr(l_hi, r_hi);)
2570
break;
2571
default: ShouldNotReachHere();
2572
}
2573
}
2574
2575
Register dst_lo = dst->as_register_lo();
2576
Register dst_hi = dst->as_register_hi();
2577
2578
#ifdef _LP64
2579
move_regs(l_lo, dst_lo);
2580
#else
2581
if (dst_lo == l_hi) {
2582
assert(dst_hi != l_lo, "overwriting registers");
2583
move_regs(l_hi, dst_hi);
2584
move_regs(l_lo, dst_lo);
2585
} else {
2586
assert(dst_lo != l_hi, "overwriting registers");
2587
move_regs(l_lo, dst_lo);
2588
move_regs(l_hi, dst_hi);
2589
}
2590
#endif // _LP64
2591
}
2592
}
2593
2594
2595
// we assume that rax, and rdx can be overwritten
2596
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2597
2598
assert(left->is_single_cpu(), "left must be register");
2599
assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
2600
assert(result->is_single_cpu(), "result must be register");
2601
2602
// assert(left->destroys_register(), "check");
2603
// assert(right->destroys_register(), "check");
2604
2605
Register lreg = left->as_register();
2606
Register dreg = result->as_register();
2607
2608
if (right->is_constant()) {
2609
jint divisor = right->as_constant_ptr()->as_jint();
2610
assert(divisor > 0 && is_power_of_2(divisor), "must be");
2611
if (code == lir_idiv) {
2612
assert(lreg == rax, "must be rax,");
2613
assert(temp->as_register() == rdx, "tmp register must be rdx");
2614
__ cdql(); // sign extend into rdx:rax
2615
if (divisor == 2) {
2616
__ subl(lreg, rdx);
2617
} else {
2618
__ andl(rdx, divisor - 1);
2619
__ addl(lreg, rdx);
2620
}
2621
__ sarl(lreg, log2i_exact(divisor));
2622
move_regs(lreg, dreg);
2623
} else if (code == lir_irem) {
2624
Label done;
2625
__ mov(dreg, lreg);
2626
__ andl(dreg, 0x80000000 | (divisor - 1));
2627
__ jcc(Assembler::positive, done);
2628
__ decrement(dreg);
2629
__ orl(dreg, ~(divisor - 1));
2630
__ increment(dreg);
2631
__ bind(done);
2632
} else {
2633
ShouldNotReachHere();
2634
}
2635
} else {
2636
Register rreg = right->as_register();
2637
assert(lreg == rax, "left register must be rax,");
2638
assert(rreg != rdx, "right register must not be rdx");
2639
assert(temp->as_register() == rdx, "tmp register must be rdx");
2640
2641
move_regs(lreg, rax);
2642
2643
int idivl_offset = __ corrected_idivl(rreg);
2644
if (ImplicitDiv0Checks) {
2645
add_debug_info_for_div0(idivl_offset, info);
2646
}
2647
if (code == lir_irem) {
2648
move_regs(rdx, dreg); // result is in rdx
2649
} else {
2650
move_regs(rax, dreg);
2651
}
2652
}
2653
}
2654
2655
2656
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2657
if (opr1->is_single_cpu()) {
2658
Register reg1 = opr1->as_register();
2659
if (opr2->is_single_cpu()) {
2660
// cpu register - cpu register
2661
if (is_reference_type(opr1->type())) {
2662
__ cmpoop(reg1, opr2->as_register());
2663
} else {
2664
assert(!is_reference_type(opr2->type()), "cmp int, oop?");
2665
__ cmpl(reg1, opr2->as_register());
2666
}
2667
} else if (opr2->is_stack()) {
2668
// cpu register - stack
2669
if (is_reference_type(opr1->type())) {
2670
__ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2671
} else {
2672
__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2673
}
2674
} else if (opr2->is_constant()) {
2675
// cpu register - constant
2676
LIR_Const* c = opr2->as_constant_ptr();
2677
if (c->type() == T_INT) {
2678
__ cmpl(reg1, c->as_jint());
2679
} else if (c->type() == T_METADATA) {
2680
// All we need for now is a comparison with NULL for equality.
2681
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2682
Metadata* m = c->as_metadata();
2683
if (m == NULL) {
2684
__ cmpptr(reg1, (int32_t)0);
2685
} else {
2686
ShouldNotReachHere();
2687
}
2688
} else if (is_reference_type(c->type())) {
2689
// In 64bit oops are single register
2690
jobject o = c->as_jobject();
2691
if (o == NULL) {
2692
__ cmpptr(reg1, (int32_t)NULL_WORD);
2693
} else {
2694
__ cmpoop(reg1, o);
2695
}
2696
} else {
2697
fatal("unexpected type: %s", basictype_to_str(c->type()));
2698
}
2699
// cpu register - address
2700
} else if (opr2->is_address()) {
2701
if (op->info() != NULL) {
2702
add_debug_info_for_null_check_here(op->info());
2703
}
2704
__ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2705
} else {
2706
ShouldNotReachHere();
2707
}
2708
2709
} else if(opr1->is_double_cpu()) {
2710
Register xlo = opr1->as_register_lo();
2711
Register xhi = opr1->as_register_hi();
2712
if (opr2->is_double_cpu()) {
2713
#ifdef _LP64
2714
__ cmpptr(xlo, opr2->as_register_lo());
2715
#else
2716
// cpu register - cpu register
2717
Register ylo = opr2->as_register_lo();
2718
Register yhi = opr2->as_register_hi();
2719
__ subl(xlo, ylo);
2720
__ sbbl(xhi, yhi);
2721
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2722
__ orl(xhi, xlo);
2723
}
2724
#endif // _LP64
2725
} else if (opr2->is_constant()) {
2726
// cpu register - constant 0
2727
assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2728
#ifdef _LP64
2729
__ cmpptr(xlo, (int32_t)opr2->as_jlong());
2730
#else
2731
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2732
__ orl(xhi, xlo);
2733
#endif // _LP64
2734
} else {
2735
ShouldNotReachHere();
2736
}
2737
2738
} else if (opr1->is_single_xmm()) {
2739
XMMRegister reg1 = opr1->as_xmm_float_reg();
2740
if (opr2->is_single_xmm()) {
2741
// xmm register - xmm register
2742
__ ucomiss(reg1, opr2->as_xmm_float_reg());
2743
} else if (opr2->is_stack()) {
2744
// xmm register - stack
2745
__ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2746
} else if (opr2->is_constant()) {
2747
// xmm register - constant
2748
__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2749
} else if (opr2->is_address()) {
2750
// xmm register - address
2751
if (op->info() != NULL) {
2752
add_debug_info_for_null_check_here(op->info());
2753
}
2754
__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2755
} else {
2756
ShouldNotReachHere();
2757
}
2758
2759
} else if (opr1->is_double_xmm()) {
2760
XMMRegister reg1 = opr1->as_xmm_double_reg();
2761
if (opr2->is_double_xmm()) {
2762
// xmm register - xmm register
2763
__ ucomisd(reg1, opr2->as_xmm_double_reg());
2764
} else if (opr2->is_stack()) {
2765
// xmm register - stack
2766
__ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2767
} else if (opr2->is_constant()) {
2768
// xmm register - constant
2769
__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2770
} else if (opr2->is_address()) {
2771
// xmm register - address
2772
if (op->info() != NULL) {
2773
add_debug_info_for_null_check_here(op->info());
2774
}
2775
__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2776
} else {
2777
ShouldNotReachHere();
2778
}
2779
2780
#ifndef _LP64
2781
} else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2782
assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2783
assert(opr2->is_fpu_register(), "both must be registers");
2784
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2785
#endif // LP64
2786
2787
} else if (opr1->is_address() && opr2->is_constant()) {
2788
LIR_Const* c = opr2->as_constant_ptr();
2789
#ifdef _LP64
2790
if (is_reference_type(c->type())) {
2791
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2792
__ movoop(rscratch1, c->as_jobject());
2793
}
2794
#endif // LP64
2795
if (op->info() != NULL) {
2796
add_debug_info_for_null_check_here(op->info());
2797
}
2798
// special case: address - constant
2799
LIR_Address* addr = opr1->as_address_ptr();
2800
if (c->type() == T_INT) {
2801
__ cmpl(as_Address(addr), c->as_jint());
2802
} else if (is_reference_type(c->type())) {
2803
#ifdef _LP64
2804
// %%% Make this explode if addr isn't reachable until we figure out a
2805
// better strategy by giving noreg as the temp for as_Address
2806
__ cmpoop(rscratch1, as_Address(addr, noreg));
2807
#else
2808
__ cmpoop(as_Address(addr), c->as_jobject());
2809
#endif // _LP64
2810
} else {
2811
ShouldNotReachHere();
2812
}
2813
2814
} else {
2815
ShouldNotReachHere();
2816
}
2817
}
2818
2819
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2820
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2821
if (left->is_single_xmm()) {
2822
assert(right->is_single_xmm(), "must match");
2823
__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2824
} else if (left->is_double_xmm()) {
2825
assert(right->is_double_xmm(), "must match");
2826
__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2827
2828
} else {
2829
#ifdef _LP64
2830
ShouldNotReachHere();
2831
#else
2832
assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2833
assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2834
2835
assert(left->fpu() == 0, "left must be on TOS");
2836
__ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2837
op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2838
#endif // LP64
2839
}
2840
} else {
2841
assert(code == lir_cmp_l2i, "check");
2842
#ifdef _LP64
2843
Label done;
2844
Register dest = dst->as_register();
2845
__ cmpptr(left->as_register_lo(), right->as_register_lo());
2846
__ movl(dest, -1);
2847
__ jccb(Assembler::less, done);
2848
__ set_byte_if_not_zero(dest);
2849
__ movzbl(dest, dest);
2850
__ bind(done);
2851
#else
2852
__ lcmp2int(left->as_register_hi(),
2853
left->as_register_lo(),
2854
right->as_register_hi(),
2855
right->as_register_lo());
2856
move_regs(left->as_register_hi(), dst->as_register());
2857
#endif // _LP64
2858
}
2859
}
2860
2861
2862
void LIR_Assembler::align_call(LIR_Code code) {
2863
// make sure that the displacement word of the call ends up word aligned
2864
int offset = __ offset();
2865
switch (code) {
2866
case lir_static_call:
2867
case lir_optvirtual_call:
2868
case lir_dynamic_call:
2869
offset += NativeCall::displacement_offset;
2870
break;
2871
case lir_icvirtual_call:
2872
offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2873
break;
2874
default: ShouldNotReachHere();
2875
}
2876
__ align(BytesPerWord, offset);
2877
}
2878
2879
2880
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2881
assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2882
"must be aligned");
2883
__ call(AddressLiteral(op->addr(), rtype));
2884
add_call_info(code_offset(), op->info());
2885
}
2886
2887
2888
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2889
__ ic_call(op->addr());
2890
add_call_info(code_offset(), op->info());
2891
assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2892
"must be aligned");
2893
}
2894
2895
2896
void LIR_Assembler::emit_static_call_stub() {
2897
address call_pc = __ pc();
2898
address stub = __ start_a_stub(call_stub_size());
2899
if (stub == NULL) {
2900
bailout("static call stub overflow");
2901
return;
2902
}
2903
2904
int start = __ offset();
2905
2906
// make sure that the displacement word of the call ends up word aligned
2907
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2908
__ relocate(static_stub_Relocation::spec(call_pc));
2909
__ mov_metadata(rbx, (Metadata*)NULL);
2910
// must be set to -1 at code generation time
2911
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2912
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
2913
__ jump(RuntimeAddress(__ pc()));
2914
2915
assert(__ offset() - start <= call_stub_size(), "stub too big");
2916
__ end_a_stub();
2917
}
2918
2919
2920
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2921
assert(exceptionOop->as_register() == rax, "must match");
2922
assert(exceptionPC->as_register() == rdx, "must match");
2923
2924
// exception object is not added to oop map by LinearScan
2925
// (LinearScan assumes that no oops are in fixed registers)
2926
info->add_register_oop(exceptionOop);
2927
Runtime1::StubID unwind_id;
2928
2929
// get current pc information
2930
// pc is only needed if the method has an exception handler, the unwind code does not need it.
2931
int pc_for_athrow_offset = __ offset();
2932
InternalAddress pc_for_athrow(__ pc());
2933
__ lea(exceptionPC->as_register(), pc_for_athrow);
2934
add_call_info(pc_for_athrow_offset, info); // for exception handler
2935
2936
__ verify_not_null_oop(rax);
2937
// search an exception handler (rax: exception oop, rdx: throwing pc)
2938
if (compilation()->has_fpu_code()) {
2939
unwind_id = Runtime1::handle_exception_id;
2940
} else {
2941
unwind_id = Runtime1::handle_exception_nofpu_id;
2942
}
2943
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2944
2945
// enough room for two byte trap
2946
__ nop();
2947
}
2948
2949
2950
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2951
assert(exceptionOop->as_register() == rax, "must match");
2952
2953
__ jmp(_unwind_handler_entry);
2954
}
2955
2956
2957
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2958
2959
// optimized version for linear scan:
2960
// * count must be already in ECX (guaranteed by LinearScan)
2961
// * left and dest must be equal
2962
// * tmp must be unused
2963
assert(count->as_register() == SHIFT_count, "count must be in ECX");
2964
assert(left == dest, "left and dest must be equal");
2965
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2966
2967
if (left->is_single_cpu()) {
2968
Register value = left->as_register();
2969
assert(value != SHIFT_count, "left cannot be ECX");
2970
2971
switch (code) {
2972
case lir_shl: __ shll(value); break;
2973
case lir_shr: __ sarl(value); break;
2974
case lir_ushr: __ shrl(value); break;
2975
default: ShouldNotReachHere();
2976
}
2977
} else if (left->is_double_cpu()) {
2978
Register lo = left->as_register_lo();
2979
Register hi = left->as_register_hi();
2980
assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2981
#ifdef _LP64
2982
switch (code) {
2983
case lir_shl: __ shlptr(lo); break;
2984
case lir_shr: __ sarptr(lo); break;
2985
case lir_ushr: __ shrptr(lo); break;
2986
default: ShouldNotReachHere();
2987
}
2988
#else
2989
2990
switch (code) {
2991
case lir_shl: __ lshl(hi, lo); break;
2992
case lir_shr: __ lshr(hi, lo, true); break;
2993
case lir_ushr: __ lshr(hi, lo, false); break;
2994
default: ShouldNotReachHere();
2995
}
2996
#endif // LP64
2997
} else {
2998
ShouldNotReachHere();
2999
}
3000
}
3001
3002
3003
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
3004
if (dest->is_single_cpu()) {
3005
// first move left into dest so that left is not destroyed by the shift
3006
Register value = dest->as_register();
3007
count = count & 0x1F; // Java spec
3008
3009
move_regs(left->as_register(), value);
3010
switch (code) {
3011
case lir_shl: __ shll(value, count); break;
3012
case lir_shr: __ sarl(value, count); break;
3013
case lir_ushr: __ shrl(value, count); break;
3014
default: ShouldNotReachHere();
3015
}
3016
} else if (dest->is_double_cpu()) {
3017
#ifndef _LP64
3018
Unimplemented();
3019
#else
3020
// first move left into dest so that left is not destroyed by the shift
3021
Register value = dest->as_register_lo();
3022
count = count & 0x1F; // Java spec
3023
3024
move_regs(left->as_register_lo(), value);
3025
switch (code) {
3026
case lir_shl: __ shlptr(value, count); break;
3027
case lir_shr: __ sarptr(value, count); break;
3028
case lir_ushr: __ shrptr(value, count); break;
3029
default: ShouldNotReachHere();
3030
}
3031
#endif // _LP64
3032
} else {
3033
ShouldNotReachHere();
3034
}
3035
}
3036
3037
3038
void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
3039
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3040
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3041
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3042
__ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
3043
}
3044
3045
3046
void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
3047
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3048
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3049
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3050
__ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3051
}
3052
3053
3054
void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3055
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3056
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3057
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3058
__ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3059
}
3060
3061
3062
void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3063
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3064
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3065
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3066
__ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3067
}
3068
3069
3070
// This code replaces a call to arraycopy; no exception may
3071
// be thrown in this code, they must be thrown in the System.arraycopy
3072
// activation frame; we could save some checks if this would not be the case
3073
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3074
ciArrayKlass* default_type = op->expected_type();
3075
Register src = op->src()->as_register();
3076
Register dst = op->dst()->as_register();
3077
Register src_pos = op->src_pos()->as_register();
3078
Register dst_pos = op->dst_pos()->as_register();
3079
Register length = op->length()->as_register();
3080
Register tmp = op->tmp()->as_register();
3081
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3082
3083
CodeStub* stub = op->stub();
3084
int flags = op->flags();
3085
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3086
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3087
3088
// if we don't know anything, just go through the generic arraycopy
3089
if (default_type == NULL) {
3090
// save outgoing arguments on stack in case call to System.arraycopy is needed
3091
// HACK ALERT. This code used to push the parameters in a hardwired fashion
3092
// for interpreter calling conventions. Now we have to do it in new style conventions.
3093
// For the moment until C1 gets the new register allocator I just force all the
3094
// args to the right place (except the register args) and then on the back side
3095
// reload the register args properly if we go slow path. Yuck
3096
3097
// These are proper for the calling convention
3098
store_parameter(length, 2);
3099
store_parameter(dst_pos, 1);
3100
store_parameter(dst, 0);
3101
3102
// these are just temporary placements until we need to reload
3103
store_parameter(src_pos, 3);
3104
store_parameter(src, 4);
3105
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3106
3107
address copyfunc_addr = StubRoutines::generic_arraycopy();
3108
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
3109
3110
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3111
#ifdef _LP64
3112
// The arguments are in java calling convention so we can trivially shift them to C
3113
// convention
3114
assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3115
__ mov(c_rarg0, j_rarg0);
3116
assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3117
__ mov(c_rarg1, j_rarg1);
3118
assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3119
__ mov(c_rarg2, j_rarg2);
3120
assert_different_registers(c_rarg3, j_rarg4);
3121
__ mov(c_rarg3, j_rarg3);
3122
#ifdef _WIN64
3123
// Allocate abi space for args but be sure to keep stack aligned
3124
__ subptr(rsp, 6*wordSize);
3125
store_parameter(j_rarg4, 4);
3126
#ifndef PRODUCT
3127
if (PrintC1Statistics) {
3128
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3129
}
3130
#endif
3131
__ call(RuntimeAddress(copyfunc_addr));
3132
__ addptr(rsp, 6*wordSize);
3133
#else
3134
__ mov(c_rarg4, j_rarg4);
3135
#ifndef PRODUCT
3136
if (PrintC1Statistics) {
3137
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3138
}
3139
#endif
3140
__ call(RuntimeAddress(copyfunc_addr));
3141
#endif // _WIN64
3142
#else
3143
__ push(length);
3144
__ push(dst_pos);
3145
__ push(dst);
3146
__ push(src_pos);
3147
__ push(src);
3148
3149
#ifndef PRODUCT
3150
if (PrintC1Statistics) {
3151
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3152
}
3153
#endif
3154
__ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3155
3156
#endif // _LP64
3157
3158
__ cmpl(rax, 0);
3159
__ jcc(Assembler::equal, *stub->continuation());
3160
3161
__ mov(tmp, rax);
3162
__ xorl(tmp, -1);
3163
3164
// Reload values from the stack so they are where the stub
3165
// expects them.
3166
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3167
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3168
__ movptr (length, Address(rsp, 2*BytesPerWord));
3169
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3170
__ movptr (src, Address(rsp, 4*BytesPerWord));
3171
3172
__ subl(length, tmp);
3173
__ addl(src_pos, tmp);
3174
__ addl(dst_pos, tmp);
3175
__ jmp(*stub->entry());
3176
3177
__ bind(*stub->continuation());
3178
return;
3179
}
3180
3181
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3182
3183
int elem_size = type2aelembytes(basic_type);
3184
Address::ScaleFactor scale;
3185
3186
switch (elem_size) {
3187
case 1 :
3188
scale = Address::times_1;
3189
break;
3190
case 2 :
3191
scale = Address::times_2;
3192
break;
3193
case 4 :
3194
scale = Address::times_4;
3195
break;
3196
case 8 :
3197
scale = Address::times_8;
3198
break;
3199
default:
3200
scale = Address::no_scale;
3201
ShouldNotReachHere();
3202
}
3203
3204
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3205
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3206
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3207
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3208
3209
// length and pos's are all sign extended at this point on 64bit
3210
3211
// test for NULL
3212
if (flags & LIR_OpArrayCopy::src_null_check) {
3213
__ testptr(src, src);
3214
__ jcc(Assembler::zero, *stub->entry());
3215
}
3216
if (flags & LIR_OpArrayCopy::dst_null_check) {
3217
__ testptr(dst, dst);
3218
__ jcc(Assembler::zero, *stub->entry());
3219
}
3220
3221
// If the compiler was not able to prove that exact type of the source or the destination
3222
// of the arraycopy is an array type, check at runtime if the source or the destination is
3223
// an instance type.
3224
if (flags & LIR_OpArrayCopy::type_check) {
3225
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3226
__ load_klass(tmp, dst, tmp_load_klass);
3227
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3228
__ jcc(Assembler::greaterEqual, *stub->entry());
3229
}
3230
3231
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3232
__ load_klass(tmp, src, tmp_load_klass);
3233
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3234
__ jcc(Assembler::greaterEqual, *stub->entry());
3235
}
3236
}
3237
3238
// check if negative
3239
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3240
__ testl(src_pos, src_pos);
3241
__ jcc(Assembler::less, *stub->entry());
3242
}
3243
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3244
__ testl(dst_pos, dst_pos);
3245
__ jcc(Assembler::less, *stub->entry());
3246
}
3247
3248
if (flags & LIR_OpArrayCopy::src_range_check) {
3249
__ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3250
__ cmpl(tmp, src_length_addr);
3251
__ jcc(Assembler::above, *stub->entry());
3252
}
3253
if (flags & LIR_OpArrayCopy::dst_range_check) {
3254
__ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3255
__ cmpl(tmp, dst_length_addr);
3256
__ jcc(Assembler::above, *stub->entry());
3257
}
3258
3259
if (flags & LIR_OpArrayCopy::length_positive_check) {
3260
__ testl(length, length);
3261
__ jcc(Assembler::less, *stub->entry());
3262
}
3263
3264
#ifdef _LP64
3265
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3266
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3267
#endif
3268
3269
if (flags & LIR_OpArrayCopy::type_check) {
3270
// We don't know the array types are compatible
3271
if (basic_type != T_OBJECT) {
3272
// Simple test for basic type arrays
3273
if (UseCompressedClassPointers) {
3274
__ movl(tmp, src_klass_addr);
3275
__ cmpl(tmp, dst_klass_addr);
3276
} else {
3277
__ movptr(tmp, src_klass_addr);
3278
__ cmpptr(tmp, dst_klass_addr);
3279
}
3280
__ jcc(Assembler::notEqual, *stub->entry());
3281
} else {
3282
// For object arrays, if src is a sub class of dst then we can
3283
// safely do the copy.
3284
Label cont, slow;
3285
3286
__ push(src);
3287
__ push(dst);
3288
3289
__ load_klass(src, src, tmp_load_klass);
3290
__ load_klass(dst, dst, tmp_load_klass);
3291
3292
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3293
3294
__ push(src);
3295
__ push(dst);
3296
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3297
__ pop(dst);
3298
__ pop(src);
3299
3300
__ cmpl(src, 0);
3301
__ jcc(Assembler::notEqual, cont);
3302
3303
__ bind(slow);
3304
__ pop(dst);
3305
__ pop(src);
3306
3307
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3308
if (copyfunc_addr != NULL) { // use stub if available
3309
// src is not a sub class of dst so we have to do a
3310
// per-element check.
3311
3312
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3313
if ((flags & mask) != mask) {
3314
// Check that at least both of them object arrays.
3315
assert(flags & mask, "one of the two should be known to be an object array");
3316
3317
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3318
__ load_klass(tmp, src, tmp_load_klass);
3319
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3320
__ load_klass(tmp, dst, tmp_load_klass);
3321
}
3322
int lh_offset = in_bytes(Klass::layout_helper_offset());
3323
Address klass_lh_addr(tmp, lh_offset);
3324
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3325
__ cmpl(klass_lh_addr, objArray_lh);
3326
__ jcc(Assembler::notEqual, *stub->entry());
3327
}
3328
3329
// Spill because stubs can use any register they like and it's
3330
// easier to restore just those that we care about.
3331
store_parameter(dst, 0);
3332
store_parameter(dst_pos, 1);
3333
store_parameter(length, 2);
3334
store_parameter(src_pos, 3);
3335
store_parameter(src, 4);
3336
3337
#ifndef _LP64
3338
__ movptr(tmp, dst_klass_addr);
3339
__ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3340
__ push(tmp);
3341
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3342
__ push(tmp);
3343
__ push(length);
3344
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3345
__ push(tmp);
3346
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3347
__ push(tmp);
3348
3349
__ call_VM_leaf(copyfunc_addr, 5);
3350
#else
3351
__ movl2ptr(length, length); //higher 32bits must be null
3352
3353
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3354
assert_different_registers(c_rarg0, dst, dst_pos, length);
3355
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3356
assert_different_registers(c_rarg1, dst, length);
3357
3358
__ mov(c_rarg2, length);
3359
assert_different_registers(c_rarg2, dst);
3360
3361
#ifdef _WIN64
3362
// Allocate abi space for args but be sure to keep stack aligned
3363
__ subptr(rsp, 6*wordSize);
3364
__ load_klass(c_rarg3, dst, tmp_load_klass);
3365
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3366
store_parameter(c_rarg3, 4);
3367
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3368
__ call(RuntimeAddress(copyfunc_addr));
3369
__ addptr(rsp, 6*wordSize);
3370
#else
3371
__ load_klass(c_rarg4, dst, tmp_load_klass);
3372
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3373
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3374
__ call(RuntimeAddress(copyfunc_addr));
3375
#endif
3376
3377
#endif
3378
3379
#ifndef PRODUCT
3380
if (PrintC1Statistics) {
3381
Label failed;
3382
__ testl(rax, rax);
3383
__ jcc(Assembler::notZero, failed);
3384
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3385
__ bind(failed);
3386
}
3387
#endif
3388
3389
__ testl(rax, rax);
3390
__ jcc(Assembler::zero, *stub->continuation());
3391
3392
#ifndef PRODUCT
3393
if (PrintC1Statistics) {
3394
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3395
}
3396
#endif
3397
3398
__ mov(tmp, rax);
3399
3400
__ xorl(tmp, -1);
3401
3402
// Restore previously spilled arguments
3403
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3404
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3405
__ movptr (length, Address(rsp, 2*BytesPerWord));
3406
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3407
__ movptr (src, Address(rsp, 4*BytesPerWord));
3408
3409
3410
__ subl(length, tmp);
3411
__ addl(src_pos, tmp);
3412
__ addl(dst_pos, tmp);
3413
}
3414
3415
__ jmp(*stub->entry());
3416
3417
__ bind(cont);
3418
__ pop(dst);
3419
__ pop(src);
3420
}
3421
}
3422
3423
#ifdef ASSERT
3424
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3425
// Sanity check the known type with the incoming class. For the
3426
// primitive case the types must match exactly with src.klass and
3427
// dst.klass each exactly matching the default type. For the
3428
// object array case, if no type check is needed then either the
3429
// dst type is exactly the expected type and the src type is a
3430
// subtype which we can't check or src is the same array as dst
3431
// but not necessarily exactly of type default_type.
3432
Label known_ok, halt;
3433
__ mov_metadata(tmp, default_type->constant_encoding());
3434
#ifdef _LP64
3435
if (UseCompressedClassPointers) {
3436
__ encode_klass_not_null(tmp, rscratch1);
3437
}
3438
#endif
3439
3440
if (basic_type != T_OBJECT) {
3441
3442
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3443
else __ cmpptr(tmp, dst_klass_addr);
3444
__ jcc(Assembler::notEqual, halt);
3445
if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3446
else __ cmpptr(tmp, src_klass_addr);
3447
__ jcc(Assembler::equal, known_ok);
3448
} else {
3449
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3450
else __ cmpptr(tmp, dst_klass_addr);
3451
__ jcc(Assembler::equal, known_ok);
3452
__ cmpptr(src, dst);
3453
__ jcc(Assembler::equal, known_ok);
3454
}
3455
__ bind(halt);
3456
__ stop("incorrect type information in arraycopy");
3457
__ bind(known_ok);
3458
}
3459
#endif
3460
3461
#ifndef PRODUCT
3462
if (PrintC1Statistics) {
3463
__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3464
}
3465
#endif
3466
3467
#ifdef _LP64
3468
assert_different_registers(c_rarg0, dst, dst_pos, length);
3469
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3470
assert_different_registers(c_rarg1, length);
3471
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3472
__ mov(c_rarg2, length);
3473
3474
#else
3475
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3476
store_parameter(tmp, 0);
3477
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3478
store_parameter(tmp, 1);
3479
store_parameter(length, 2);
3480
#endif // _LP64
3481
3482
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3483
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3484
const char *name;
3485
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3486
__ call_VM_leaf(entry, 0);
3487
3488
__ bind(*stub->continuation());
3489
}
3490
3491
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3492
assert(op->crc()->is_single_cpu(), "crc must be register");
3493
assert(op->val()->is_single_cpu(), "byte value must be register");
3494
assert(op->result_opr()->is_single_cpu(), "result must be register");
3495
Register crc = op->crc()->as_register();
3496
Register val = op->val()->as_register();
3497
Register res = op->result_opr()->as_register();
3498
3499
assert_different_registers(val, crc, res);
3500
3501
__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3502
__ notl(crc); // ~crc
3503
__ update_byte_crc32(crc, val, res);
3504
__ notl(crc); // ~crc
3505
__ mov(res, crc);
3506
}
3507
3508
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3509
Register obj = op->obj_opr()->as_register(); // may not be an oop
3510
Register hdr = op->hdr_opr()->as_register();
3511
Register lock = op->lock_opr()->as_register();
3512
if (!UseFastLocking) {
3513
__ jmp(*op->stub()->entry());
3514
} else if (op->code() == lir_lock) {
3515
Register scratch = noreg;
3516
if (UseBiasedLocking) {
3517
scratch = op->scratch_opr()->as_register();
3518
}
3519
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3520
// add debug info for NullPointerException only if one is possible
3521
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3522
if (op->info() != NULL) {
3523
add_debug_info_for_null_check(null_check_offset, op->info());
3524
}
3525
// done
3526
} else if (op->code() == lir_unlock) {
3527
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3528
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
3529
} else {
3530
Unimplemented();
3531
}
3532
__ bind(*op->stub()->continuation());
3533
}
3534
3535
3536
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3537
ciMethod* method = op->profiled_method();
3538
int bci = op->profiled_bci();
3539
ciMethod* callee = op->profiled_callee();
3540
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3541
3542
// Update counter for all call types
3543
ciMethodData* md = method->method_data_or_null();
3544
assert(md != NULL, "Sanity");
3545
ciProfileData* data = md->bci_to_data(bci);
3546
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3547
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3548
Register mdo = op->mdo()->as_register();
3549
__ mov_metadata(mdo, md->constant_encoding());
3550
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3551
// Perform additional virtual call profiling for invokevirtual and
3552
// invokeinterface bytecodes
3553
if (op->should_profile_receiver_type()) {
3554
assert(op->recv()->is_single_cpu(), "recv must be allocated");
3555
Register recv = op->recv()->as_register();
3556
assert_different_registers(mdo, recv);
3557
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3558
ciKlass* known_klass = op->known_holder();
3559
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3560
// We know the type that will be seen at this call site; we can
3561
// statically update the MethodData* rather than needing to do
3562
// dynamic tests on the receiver type
3563
3564
// NOTE: we should probably put a lock around this search to
3565
// avoid collisions by concurrent compilations
3566
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3567
uint i;
3568
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3569
ciKlass* receiver = vc_data->receiver(i);
3570
if (known_klass->equals(receiver)) {
3571
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3572
__ addptr(data_addr, DataLayout::counter_increment);
3573
return;
3574
}
3575
}
3576
3577
// Receiver type not found in profile data; select an empty slot
3578
3579
// Note that this is less efficient than it should be because it
3580
// always does a write to the receiver part of the
3581
// VirtualCallData rather than just the first time
3582
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3583
ciKlass* receiver = vc_data->receiver(i);
3584
if (receiver == NULL) {
3585
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3586
__ mov_metadata(recv_addr, known_klass->constant_encoding());
3587
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3588
__ addptr(data_addr, DataLayout::counter_increment);
3589
return;
3590
}
3591
}
3592
} else {
3593
__ load_klass(recv, recv, tmp_load_klass);
3594
Label update_done;
3595
type_profile_helper(mdo, md, data, recv, &update_done);
3596
// Receiver did not match any saved receiver and there is no empty row for it.
3597
// Increment total counter to indicate polymorphic case.
3598
__ addptr(counter_addr, DataLayout::counter_increment);
3599
3600
__ bind(update_done);
3601
}
3602
} else {
3603
// Static call
3604
__ addptr(counter_addr, DataLayout::counter_increment);
3605
}
3606
}
3607
3608
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3609
Register obj = op->obj()->as_register();
3610
Register tmp = op->tmp()->as_pointer_register();
3611
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3612
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3613
ciKlass* exact_klass = op->exact_klass();
3614
intptr_t current_klass = op->current_klass();
3615
bool not_null = op->not_null();
3616
bool no_conflict = op->no_conflict();
3617
3618
Label update, next, none;
3619
3620
bool do_null = !not_null;
3621
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3622
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3623
3624
assert(do_null || do_update, "why are we here?");
3625
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3626
3627
__ verify_oop(obj);
3628
3629
if (tmp != obj) {
3630
__ mov(tmp, obj);
3631
}
3632
if (do_null) {
3633
__ testptr(tmp, tmp);
3634
__ jccb(Assembler::notZero, update);
3635
if (!TypeEntries::was_null_seen(current_klass)) {
3636
__ orptr(mdo_addr, TypeEntries::null_seen);
3637
}
3638
if (do_update) {
3639
#ifndef ASSERT
3640
__ jmpb(next);
3641
}
3642
#else
3643
__ jmp(next);
3644
}
3645
} else {
3646
__ testptr(tmp, tmp);
3647
__ jcc(Assembler::notZero, update);
3648
__ stop("unexpect null obj");
3649
#endif
3650
}
3651
3652
__ bind(update);
3653
3654
if (do_update) {
3655
#ifdef ASSERT
3656
if (exact_klass != NULL) {
3657
Label ok;
3658
__ load_klass(tmp, tmp, tmp_load_klass);
3659
__ push(tmp);
3660
__ mov_metadata(tmp, exact_klass->constant_encoding());
3661
__ cmpptr(tmp, Address(rsp, 0));
3662
__ jcc(Assembler::equal, ok);
3663
__ stop("exact klass and actual klass differ");
3664
__ bind(ok);
3665
__ pop(tmp);
3666
}
3667
#endif
3668
if (!no_conflict) {
3669
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3670
if (exact_klass != NULL) {
3671
__ mov_metadata(tmp, exact_klass->constant_encoding());
3672
} else {
3673
__ load_klass(tmp, tmp, tmp_load_klass);
3674
}
3675
3676
__ xorptr(tmp, mdo_addr);
3677
__ testptr(tmp, TypeEntries::type_klass_mask);
3678
// klass seen before, nothing to do. The unknown bit may have been
3679
// set already but no need to check.
3680
__ jccb(Assembler::zero, next);
3681
3682
__ testptr(tmp, TypeEntries::type_unknown);
3683
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3684
3685
if (TypeEntries::is_type_none(current_klass)) {
3686
__ cmpptr(mdo_addr, 0);
3687
__ jccb(Assembler::equal, none);
3688
__ cmpptr(mdo_addr, TypeEntries::null_seen);
3689
__ jccb(Assembler::equal, none);
3690
// There is a chance that the checks above (re-reading profiling
3691
// data from memory) fail if another thread has just set the
3692
// profiling to this obj's klass
3693
__ xorptr(tmp, mdo_addr);
3694
__ testptr(tmp, TypeEntries::type_klass_mask);
3695
__ jccb(Assembler::zero, next);
3696
}
3697
} else {
3698
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3699
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3700
3701
__ movptr(tmp, mdo_addr);
3702
__ testptr(tmp, TypeEntries::type_unknown);
3703
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3704
}
3705
3706
// different than before. Cannot keep accurate profile.
3707
__ orptr(mdo_addr, TypeEntries::type_unknown);
3708
3709
if (TypeEntries::is_type_none(current_klass)) {
3710
__ jmpb(next);
3711
3712
__ bind(none);
3713
// first time here. Set profile type.
3714
__ movptr(mdo_addr, tmp);
3715
}
3716
} else {
3717
// There's a single possible klass at this profile point
3718
assert(exact_klass != NULL, "should be");
3719
if (TypeEntries::is_type_none(current_klass)) {
3720
__ mov_metadata(tmp, exact_klass->constant_encoding());
3721
__ xorptr(tmp, mdo_addr);
3722
__ testptr(tmp, TypeEntries::type_klass_mask);
3723
#ifdef ASSERT
3724
__ jcc(Assembler::zero, next);
3725
3726
{
3727
Label ok;
3728
__ push(tmp);
3729
__ cmpptr(mdo_addr, 0);
3730
__ jcc(Assembler::equal, ok);
3731
__ cmpptr(mdo_addr, TypeEntries::null_seen);
3732
__ jcc(Assembler::equal, ok);
3733
// may have been set by another thread
3734
__ mov_metadata(tmp, exact_klass->constant_encoding());
3735
__ xorptr(tmp, mdo_addr);
3736
__ testptr(tmp, TypeEntries::type_mask);
3737
__ jcc(Assembler::zero, ok);
3738
3739
__ stop("unexpected profiling mismatch");
3740
__ bind(ok);
3741
__ pop(tmp);
3742
}
3743
#else
3744
__ jccb(Assembler::zero, next);
3745
#endif
3746
// first time here. Set profile type.
3747
__ movptr(mdo_addr, tmp);
3748
} else {
3749
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3750
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3751
3752
__ movptr(tmp, mdo_addr);
3753
__ testptr(tmp, TypeEntries::type_unknown);
3754
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3755
3756
__ orptr(mdo_addr, TypeEntries::type_unknown);
3757
}
3758
}
3759
3760
__ bind(next);
3761
}
3762
}
3763
3764
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3765
Unimplemented();
3766
}
3767
3768
3769
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3770
__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3771
}
3772
3773
3774
void LIR_Assembler::align_backward_branch_target() {
3775
__ align(BytesPerWord);
3776
}
3777
3778
3779
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3780
if (left->is_single_cpu()) {
3781
__ negl(left->as_register());
3782
move_regs(left->as_register(), dest->as_register());
3783
3784
} else if (left->is_double_cpu()) {
3785
Register lo = left->as_register_lo();
3786
#ifdef _LP64
3787
Register dst = dest->as_register_lo();
3788
__ movptr(dst, lo);
3789
__ negptr(dst);
3790
#else
3791
Register hi = left->as_register_hi();
3792
__ lneg(hi, lo);
3793
if (dest->as_register_lo() == hi) {
3794
assert(dest->as_register_hi() != lo, "destroying register");
3795
move_regs(hi, dest->as_register_hi());
3796
move_regs(lo, dest->as_register_lo());
3797
} else {
3798
move_regs(lo, dest->as_register_lo());
3799
move_regs(hi, dest->as_register_hi());
3800
}
3801
#endif // _LP64
3802
3803
} else if (dest->is_single_xmm()) {
3804
#ifdef _LP64
3805
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3806
assert(tmp->is_valid(), "need temporary");
3807
assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg());
3808
__ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2);
3809
}
3810
else
3811
#endif
3812
{
3813
assert(!tmp->is_valid(), "do not need temporary");
3814
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3815
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3816
}
3817
__ xorps(dest->as_xmm_float_reg(),
3818
ExternalAddress((address)float_signflip_pool));
3819
}
3820
} else if (dest->is_double_xmm()) {
3821
#ifdef _LP64
3822
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3823
assert(tmp->is_valid(), "need temporary");
3824
assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg());
3825
__ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2);
3826
}
3827
else
3828
#endif
3829
{
3830
assert(!tmp->is_valid(), "do not need temporary");
3831
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3832
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3833
}
3834
__ xorpd(dest->as_xmm_double_reg(),
3835
ExternalAddress((address)double_signflip_pool));
3836
}
3837
#ifndef _LP64
3838
} else if (left->is_single_fpu() || left->is_double_fpu()) {
3839
assert(left->fpu() == 0, "arg must be on TOS");
3840
assert(dest->fpu() == 0, "dest must be TOS");
3841
__ fchs();
3842
#endif // !_LP64
3843
3844
} else {
3845
ShouldNotReachHere();
3846
}
3847
}
3848
3849
3850
void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3851
assert(src->is_address(), "must be an address");
3852
assert(dest->is_register(), "must be a register");
3853
3854
PatchingStub* patch = NULL;
3855
if (patch_code != lir_patch_none) {
3856
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3857
}
3858
3859
Register reg = dest->as_pointer_register();
3860
LIR_Address* addr = src->as_address_ptr();
3861
__ lea(reg, as_Address(addr));
3862
3863
if (patch != NULL) {
3864
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3865
}
3866
}
3867
3868
3869
3870
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3871
assert(!tmp->is_valid(), "don't need temporary");
3872
__ call(RuntimeAddress(dest));
3873
if (info != NULL) {
3874
add_call_info_here(info);
3875
}
3876
}
3877
3878
3879
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3880
assert(type == T_LONG, "only for volatile long fields");
3881
3882
if (info != NULL) {
3883
add_debug_info_for_null_check_here(info);
3884
}
3885
3886
if (src->is_double_xmm()) {
3887
if (dest->is_double_cpu()) {
3888
#ifdef _LP64
3889
__ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3890
#else
3891
__ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3892
__ psrlq(src->as_xmm_double_reg(), 32);
3893
__ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3894
#endif // _LP64
3895
} else if (dest->is_double_stack()) {
3896
__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3897
} else if (dest->is_address()) {
3898
__ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3899
} else {
3900
ShouldNotReachHere();
3901
}
3902
3903
} else if (dest->is_double_xmm()) {
3904
if (src->is_double_stack()) {
3905
__ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3906
} else if (src->is_address()) {
3907
__ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3908
} else {
3909
ShouldNotReachHere();
3910
}
3911
3912
#ifndef _LP64
3913
} else if (src->is_double_fpu()) {
3914
assert(src->fpu_regnrLo() == 0, "must be TOS");
3915
if (dest->is_double_stack()) {
3916
__ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3917
} else if (dest->is_address()) {
3918
__ fistp_d(as_Address(dest->as_address_ptr()));
3919
} else {
3920
ShouldNotReachHere();
3921
}
3922
3923
} else if (dest->is_double_fpu()) {
3924
assert(dest->fpu_regnrLo() == 0, "must be TOS");
3925
if (src->is_double_stack()) {
3926
__ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3927
} else if (src->is_address()) {
3928
__ fild_d(as_Address(src->as_address_ptr()));
3929
} else {
3930
ShouldNotReachHere();
3931
}
3932
#endif // !_LP64
3933
3934
} else {
3935
ShouldNotReachHere();
3936
}
3937
}
3938
3939
#ifdef ASSERT
3940
// emit run-time assertion
3941
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3942
assert(op->code() == lir_assert, "must be");
3943
3944
if (op->in_opr1()->is_valid()) {
3945
assert(op->in_opr2()->is_valid(), "both operands must be valid");
3946
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3947
} else {
3948
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3949
assert(op->condition() == lir_cond_always, "no other conditions allowed");
3950
}
3951
3952
Label ok;
3953
if (op->condition() != lir_cond_always) {
3954
Assembler::Condition acond = Assembler::zero;
3955
switch (op->condition()) {
3956
case lir_cond_equal: acond = Assembler::equal; break;
3957
case lir_cond_notEqual: acond = Assembler::notEqual; break;
3958
case lir_cond_less: acond = Assembler::less; break;
3959
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3960
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3961
case lir_cond_greater: acond = Assembler::greater; break;
3962
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3963
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3964
default: ShouldNotReachHere();
3965
}
3966
__ jcc(acond, ok);
3967
}
3968
if (op->halt()) {
3969
const char* str = __ code_string(op->msg());
3970
__ stop(str);
3971
} else {
3972
breakpoint();
3973
}
3974
__ bind(ok);
3975
}
3976
#endif
3977
3978
void LIR_Assembler::membar() {
3979
// QQQ sparc TSO uses this,
3980
__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3981
}
3982
3983
void LIR_Assembler::membar_acquire() {
3984
// No x86 machines currently require load fences
3985
}
3986
3987
void LIR_Assembler::membar_release() {
3988
// No x86 machines currently require store fences
3989
}
3990
3991
void LIR_Assembler::membar_loadload() {
3992
// no-op
3993
//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3994
}
3995
3996
void LIR_Assembler::membar_storestore() {
3997
// no-op
3998
//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3999
}
4000
4001
void LIR_Assembler::membar_loadstore() {
4002
// no-op
4003
//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
4004
}
4005
4006
void LIR_Assembler::membar_storeload() {
4007
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4008
}
4009
4010
void LIR_Assembler::on_spin_wait() {
4011
__ pause ();
4012
}
4013
4014
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4015
assert(result_reg->is_register(), "check");
4016
#ifdef _LP64
4017
// __ get_thread(result_reg->as_register_lo());
4018
__ mov(result_reg->as_register(), r15_thread);
4019
#else
4020
__ get_thread(result_reg->as_register());
4021
#endif // _LP64
4022
}
4023
4024
4025
void LIR_Assembler::peephole(LIR_List*) {
4026
// do nothing for now
4027
}
4028
4029
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4030
assert(data == dest, "xchg/xadd uses only 2 operands");
4031
4032
if (data->type() == T_INT) {
4033
if (code == lir_xadd) {
4034
__ lock();
4035
__ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4036
} else {
4037
__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4038
}
4039
} else if (data->is_oop()) {
4040
assert (code == lir_xchg, "xadd for oops");
4041
Register obj = data->as_register();
4042
#ifdef _LP64
4043
if (UseCompressedOops) {
4044
__ encode_heap_oop(obj);
4045
__ xchgl(obj, as_Address(src->as_address_ptr()));
4046
__ decode_heap_oop(obj);
4047
} else {
4048
__ xchgptr(obj, as_Address(src->as_address_ptr()));
4049
}
4050
#else
4051
__ xchgl(obj, as_Address(src->as_address_ptr()));
4052
#endif
4053
} else if (data->type() == T_LONG) {
4054
#ifdef _LP64
4055
assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
4056
if (code == lir_xadd) {
4057
__ lock();
4058
__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
4059
} else {
4060
__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
4061
}
4062
#else
4063
ShouldNotReachHere();
4064
#endif
4065
} else {
4066
ShouldNotReachHere();
4067
}
4068
}
4069
4070
#undef __
4071
4072