Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/x86/frame_x86.cpp
41144 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "compiler/oopMap.hpp"
27
#include "interpreter/interpreter.hpp"
28
#include "memory/resourceArea.hpp"
29
#include "memory/universe.hpp"
30
#include "oops/markWord.hpp"
31
#include "oops/method.hpp"
32
#include "oops/oop.inline.hpp"
33
#include "prims/methodHandles.hpp"
34
#include "runtime/frame.inline.hpp"
35
#include "runtime/handles.inline.hpp"
36
#include "runtime/javaCalls.hpp"
37
#include "runtime/monitorChunk.hpp"
38
#include "runtime/signature.hpp"
39
#include "runtime/stackWatermarkSet.hpp"
40
#include "runtime/stubCodeGenerator.hpp"
41
#include "runtime/stubRoutines.hpp"
42
#include "vmreg_x86.inline.hpp"
43
#include "utilities/formatBuffer.hpp"
44
#ifdef COMPILER1
45
#include "c1/c1_Runtime1.hpp"
46
#include "runtime/vframeArray.hpp"
47
#endif
48
49
#ifdef ASSERT
50
void RegisterMap::check_location_valid() {
51
}
52
#endif
53
54
// Profiling/safepoint support
55
56
bool frame::safe_for_sender(JavaThread *thread) {
57
address sp = (address)_sp;
58
address fp = (address)_fp;
59
address unextended_sp = (address)_unextended_sp;
60
61
// consider stack guards when trying to determine "safe" stack pointers
62
// sp must be within the usable part of the stack (not in guards)
63
if (!thread->is_in_usable_stack(sp)) {
64
return false;
65
}
66
67
// unextended sp must be within the stack and above or equal sp
68
if (!thread->is_in_stack_range_incl(unextended_sp, sp)) {
69
return false;
70
}
71
72
// an fp must be within the stack and above (but not equal) sp
73
// second evaluation on fp+ is added to handle situation where fp is -1
74
bool fp_safe = thread->is_in_stack_range_excl(fp, sp) &&
75
thread->is_in_full_stack_checked(fp + (return_addr_offset * sizeof(void*)));
76
77
// We know sp/unextended_sp are safe only fp is questionable here
78
79
// If the current frame is known to the code cache then we can attempt to
80
// construct the sender and do some validation of it. This goes a long way
81
// toward eliminating issues when we get in frame construction code
82
83
if (_cb != NULL ) {
84
85
// First check if frame is complete and tester is reliable
86
// Unfortunately we can only check frame complete for runtime stubs and nmethod
87
// other generic buffer blobs are more problematic so we just assume they are
88
// ok. adapter blobs never have a frame complete and are never ok.
89
90
if (!_cb->is_frame_complete_at(_pc)) {
91
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
92
return false;
93
}
94
}
95
96
// Could just be some random pointer within the codeBlob
97
if (!_cb->code_contains(_pc)) {
98
return false;
99
}
100
101
// Entry frame checks
102
if (is_entry_frame()) {
103
// an entry frame must have a valid fp.
104
return fp_safe && is_entry_frame_valid(thread);
105
} else if (is_optimized_entry_frame()) {
106
return fp_safe;
107
}
108
109
intptr_t* sender_sp = NULL;
110
intptr_t* sender_unextended_sp = NULL;
111
address sender_pc = NULL;
112
intptr_t* saved_fp = NULL;
113
114
if (is_interpreted_frame()) {
115
// fp must be safe
116
if (!fp_safe) {
117
return false;
118
}
119
120
sender_pc = (address) this->fp()[return_addr_offset];
121
// for interpreted frames, the value below is the sender "raw" sp,
122
// which can be different from the sender unextended sp (the sp seen
123
// by the sender) because of current frame local variables
124
sender_sp = (intptr_t*) addr_at(sender_sp_offset);
125
sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
126
saved_fp = (intptr_t*) this->fp()[link_offset];
127
128
} else {
129
// must be some sort of compiled/runtime frame
130
// fp does not have to be safe (although it could be check for c1?)
131
132
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
133
if (_cb->frame_size() <= 0) {
134
return false;
135
}
136
137
sender_sp = _unextended_sp + _cb->frame_size();
138
// Is sender_sp safe?
139
if (!thread->is_in_full_stack_checked((address)sender_sp)) {
140
return false;
141
}
142
sender_unextended_sp = sender_sp;
143
// On Intel the return_address is always the word on the stack
144
sender_pc = (address) *(sender_sp-1);
145
// Note: frame::sender_sp_offset is only valid for compiled frame
146
saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
147
}
148
149
150
// If the potential sender is the interpreter then we can do some more checking
151
if (Interpreter::contains(sender_pc)) {
152
153
// ebp is always saved in a recognizable place in any code we generate. However
154
// only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
155
// is really a frame pointer.
156
157
if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
158
return false;
159
}
160
161
// construct the potential sender
162
163
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
164
165
return sender.is_interpreted_frame_valid(thread);
166
167
}
168
169
// We must always be able to find a recognizable pc
170
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
171
if (sender_pc == NULL || sender_blob == NULL) {
172
return false;
173
}
174
175
// Could be a zombie method
176
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
177
return false;
178
}
179
180
// Could just be some random pointer within the codeBlob
181
if (!sender_blob->code_contains(sender_pc)) {
182
return false;
183
}
184
185
// We should never be able to see an adapter if the current frame is something from code cache
186
if (sender_blob->is_adapter_blob()) {
187
return false;
188
}
189
190
// Could be the call_stub
191
if (StubRoutines::returns_to_call_stub(sender_pc)) {
192
if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
193
return false;
194
}
195
196
// construct the potential sender
197
198
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
199
200
// Validate the JavaCallWrapper an entry frame must have
201
address jcw = (address)sender.entry_frame_call_wrapper();
202
203
return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
204
} else if (sender_blob->is_optimized_entry_blob()) {
205
return false;
206
}
207
208
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
209
if (nm != NULL) {
210
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
211
nm->method()->is_method_handle_intrinsic()) {
212
return false;
213
}
214
}
215
216
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
217
// because the return address counts against the callee's frame.
218
219
if (sender_blob->frame_size() <= 0) {
220
assert(!sender_blob->is_compiled(), "should count return address at least");
221
return false;
222
}
223
224
// We should never be able to see anything here except an nmethod. If something in the
225
// code cache (current frame) is called by an entity within the code cache that entity
226
// should not be anything but the call stub (already covered), the interpreter (already covered)
227
// or an nmethod.
228
229
if (!sender_blob->is_compiled()) {
230
return false;
231
}
232
233
// Could put some more validation for the potential non-interpreted sender
234
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
235
236
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
237
238
// We've validated the potential sender that would be created
239
return true;
240
}
241
242
// Must be native-compiled frame. Since sender will try and use fp to find
243
// linkages it must be safe
244
245
if (!fp_safe) {
246
return false;
247
}
248
249
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
250
251
if ( (address) this->fp()[return_addr_offset] == NULL) return false;
252
253
254
// could try and do some more potential verification of native frame if we could think of some...
255
256
return true;
257
258
}
259
260
261
void frame::patch_pc(Thread* thread, address pc) {
262
assert(_cb == CodeCache::find_blob(pc), "unexpected pc");
263
address* pc_addr = &(((address*) sp())[-1]);
264
if (TracePcPatching) {
265
tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
266
p2i(pc_addr), p2i(*pc_addr), p2i(pc));
267
}
268
// Either the return address is the original one or we are going to
269
// patch in the same address that's already there.
270
assert(_pc == *pc_addr || pc == *pc_addr, "must be");
271
*pc_addr = pc;
272
address original_pc = CompiledMethod::get_deopt_original_pc(this);
273
if (original_pc != NULL) {
274
assert(original_pc == _pc, "expected original PC to be stored before patching");
275
_deopt_state = is_deoptimized;
276
// leave _pc as is
277
} else {
278
_deopt_state = not_deoptimized;
279
_pc = pc;
280
}
281
}
282
283
bool frame::is_interpreted_frame() const {
284
return Interpreter::contains(pc());
285
}
286
287
int frame::frame_size(RegisterMap* map) const {
288
frame sender = this->sender(map);
289
return sender.sp() - sp();
290
}
291
292
intptr_t* frame::entry_frame_argument_at(int offset) const {
293
// convert offset to index to deal with tsi
294
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
295
// Entry frame's arguments are always in relation to unextended_sp()
296
return &unextended_sp()[index];
297
}
298
299
// sender_sp
300
301
intptr_t* frame::interpreter_frame_sender_sp() const {
302
assert(is_interpreted_frame(), "interpreted frame expected");
303
return (intptr_t*) at(interpreter_frame_sender_sp_offset);
304
}
305
306
void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
307
assert(is_interpreted_frame(), "interpreted frame expected");
308
ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
309
}
310
311
312
// monitor elements
313
314
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
315
return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
316
}
317
318
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
319
BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
320
// make sure the pointer points inside the frame
321
assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
322
assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
323
return result;
324
}
325
326
void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
327
*((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
328
}
329
330
// Used by template based interpreter deoptimization
331
void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
332
*((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
333
}
334
335
frame frame::sender_for_entry_frame(RegisterMap* map) const {
336
assert(map != NULL, "map must be set");
337
// Java frame called from C; skip all C frames and return top C
338
// frame of that chunk as the sender
339
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
340
assert(!entry_frame_is_first(), "next Java fp must be non zero");
341
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
342
// Since we are walking the stack now this nested anchor is obviously walkable
343
// even if it wasn't when it was stacked.
344
if (!jfa->walkable()) {
345
// Capture _last_Java_pc (if needed) and mark anchor walkable.
346
jfa->capture_last_Java_pc();
347
}
348
map->clear();
349
assert(map->include_argument_oops(), "should be set by clear");
350
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
351
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
352
353
return fr;
354
}
355
356
JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
357
// need unextended_sp here, since normal sp is wrong for interpreter callees
358
return reinterpret_cast<JavaFrameAnchor*>(reinterpret_cast<char*>(frame.unextended_sp()) + in_bytes(jfa_sp_offset()));
359
}
360
361
frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const {
362
assert(map != NULL, "map must be set");
363
OptimizedEntryBlob* blob = _cb->as_optimized_entry_blob();
364
// Java frame called from C; skip all C frames and return top C
365
// frame of that chunk as the sender
366
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
367
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
368
// Since we are walking the stack now this nested anchor is obviously walkable
369
// even if it wasn't when it was stacked.
370
if (!jfa->walkable()) {
371
// Capture _last_Java_pc (if needed) and mark anchor walkable.
372
jfa->capture_last_Java_pc();
373
}
374
map->clear();
375
assert(map->include_argument_oops(), "should be set by clear");
376
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
377
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
378
379
return fr;
380
}
381
382
//------------------------------------------------------------------------------
383
// frame::verify_deopt_original_pc
384
//
385
// Verifies the calculated original PC of a deoptimization PC for the
386
// given unextended SP.
387
#ifdef ASSERT
388
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
389
frame fr;
390
391
// This is ugly but it's better than to change {get,set}_original_pc
392
// to take an SP value as argument. And it's only a debugging
393
// method anyway.
394
fr._unextended_sp = unextended_sp;
395
396
address original_pc = nm->get_original_pc(&fr);
397
assert(nm->insts_contains_inclusive(original_pc),
398
"original PC must be in the main code section of the the compiled method (or must be immediately following it)");
399
}
400
#endif
401
402
//------------------------------------------------------------------------------
403
// frame::adjust_unextended_sp
404
#ifdef ASSERT
405
void frame::adjust_unextended_sp() {
406
// On x86, sites calling method handle intrinsics and lambda forms are treated
407
// as any other call site. Therefore, no special action is needed when we are
408
// returning to any of these call sites.
409
410
if (_cb != NULL) {
411
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
412
if (sender_cm != NULL) {
413
// If the sender PC is a deoptimization point, get the original PC.
414
if (sender_cm->is_deopt_entry(_pc) ||
415
sender_cm->is_deopt_mh_entry(_pc)) {
416
verify_deopt_original_pc(sender_cm, _unextended_sp);
417
}
418
}
419
}
420
}
421
#endif
422
423
//------------------------------------------------------------------------------
424
// frame::update_map_with_saved_link
425
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
426
// The interpreter and compiler(s) always save EBP/RBP in a known
427
// location on entry. We must record where that location is
428
// so this if EBP/RBP was live on callout from c2 we can find
429
// the saved copy no matter what it called.
430
431
// Since the interpreter always saves EBP/RBP if we record where it is then
432
// we don't have to always save EBP/RBP on entry and exit to c2 compiled
433
// code, on entry will be enough.
434
map->set_location(rbp->as_VMReg(), (address) link_addr);
435
#ifdef AMD64
436
// this is weird "H" ought to be at a higher address however the
437
// oopMaps seems to have the "H" regs at the same address and the
438
// vanilla register.
439
// XXXX make this go away
440
if (true) {
441
map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
442
}
443
#endif // AMD64
444
}
445
446
447
//------------------------------------------------------------------------------
448
// frame::sender_for_interpreter_frame
449
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
450
// SP is the raw SP from the sender after adapter or interpreter
451
// extension.
452
intptr_t* sender_sp = this->sender_sp();
453
454
// This is the sp before any possible extension (adapter/locals).
455
intptr_t* unextended_sp = interpreter_frame_sender_sp();
456
457
#if COMPILER2_OR_JVMCI
458
if (map->update_map()) {
459
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
460
}
461
#endif // COMPILER2_OR_JVMCI
462
463
return frame(sender_sp, unextended_sp, link(), sender_pc());
464
}
465
466
467
//------------------------------------------------------------------------------
468
// frame::sender_for_compiled_frame
469
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
470
assert(map != NULL, "map must be set");
471
472
// frame owned by optimizing compiler
473
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
474
intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
475
intptr_t* unextended_sp = sender_sp;
476
477
// On Intel the return_address is always the word on the stack
478
address sender_pc = (address) *(sender_sp-1);
479
480
// This is the saved value of EBP which may or may not really be an FP.
481
// It is only an FP if the sender is an interpreter frame (or C1?).
482
intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
483
484
if (map->update_map()) {
485
// Tell GC to use argument oopmaps for some runtime stubs that need it.
486
// For C1, the runtime stub might not have oop maps, so set this flag
487
// outside of update_register_map.
488
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
489
if (_cb->oop_maps() != NULL) {
490
OopMapSet::update_register_map(this, map);
491
}
492
493
// Since the prolog does the save and restore of EBP there is no oopmap
494
// for it so we must fill in its location as if there was an oopmap entry
495
// since if our caller was compiled code there could be live jvm state in it.
496
update_map_with_saved_link(map, saved_fp_addr);
497
}
498
499
assert(sender_sp != sp(), "must have changed");
500
return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
501
}
502
503
504
//------------------------------------------------------------------------------
505
// frame::sender_raw
506
frame frame::sender_raw(RegisterMap* map) const {
507
// Default is we done have to follow them. The sender_for_xxx will
508
// update it accordingly
509
map->set_include_argument_oops(false);
510
511
if (is_entry_frame()) return sender_for_entry_frame(map);
512
if (is_optimized_entry_frame()) return sender_for_optimized_entry_frame(map);
513
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
514
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
515
516
if (_cb != NULL) {
517
return sender_for_compiled_frame(map);
518
}
519
// Must be native-compiled frame, i.e. the marshaling code for native
520
// methods that exists in the core system.
521
return frame(sender_sp(), link(), sender_pc());
522
}
523
524
frame frame::sender(RegisterMap* map) const {
525
frame result = sender_raw(map);
526
527
if (map->process_frames()) {
528
StackWatermarkSet::on_iteration(map->thread(), result);
529
}
530
531
return result;
532
}
533
534
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
535
assert(is_interpreted_frame(), "Not an interpreted frame");
536
// These are reasonable sanity checks
537
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
538
return false;
539
}
540
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
541
return false;
542
}
543
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
544
return false;
545
}
546
// These are hacks to keep us out of trouble.
547
// The problem with these is that they mask other problems
548
if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
549
return false;
550
}
551
552
// do some validation of frame elements
553
// first the method
554
555
Method* m = *interpreter_frame_method_addr();
556
557
// validate the method we'd find in this potential sender
558
if (!Method::is_valid_method(m)) return false;
559
560
// stack frames shouldn't be much larger than max_stack elements
561
// this test requires the use the unextended_sp which is the sp as seen by
562
// the current frame, and not sp which is the "raw" pc which could point
563
// further because of local variables of the callee method inserted after
564
// method arguments
565
if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
566
return false;
567
}
568
569
// validate bci/bcp
570
571
address bcp = interpreter_frame_bcp();
572
if (m->validate_bci_from_bcp(bcp) < 0) {
573
return false;
574
}
575
576
// validate ConstantPoolCache*
577
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
578
if (MetaspaceObj::is_valid(cp) == false) return false;
579
580
// validate locals
581
582
address locals = (address) *interpreter_frame_locals_addr();
583
return thread->is_in_stack_range_incl(locals, (address)fp());
584
}
585
586
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
587
assert(is_interpreted_frame(), "interpreted frame expected");
588
Method* method = interpreter_frame_method();
589
BasicType type = method->result_type();
590
591
intptr_t* tos_addr;
592
if (method->is_native()) {
593
// Prior to calling into the runtime to report the method_exit the possible
594
// return value is pushed to the native stack. If the result is a jfloat/jdouble
595
// then ST0 is saved before EAX/EDX. See the note in generate_native_result
596
tos_addr = (intptr_t*)sp();
597
if (type == T_FLOAT || type == T_DOUBLE) {
598
// QQQ seems like this code is equivalent on the two platforms
599
#ifdef AMD64
600
// This is times two because we do a push(ltos) after pushing XMM0
601
// and that takes two interpreter stack slots.
602
tos_addr += 2 * Interpreter::stackElementWords;
603
#else
604
tos_addr += 2;
605
#endif // AMD64
606
}
607
} else {
608
tos_addr = (intptr_t*)interpreter_frame_tos_address();
609
}
610
611
switch (type) {
612
case T_OBJECT :
613
case T_ARRAY : {
614
oop obj;
615
if (method->is_native()) {
616
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
617
} else {
618
oop* obj_p = (oop*)tos_addr;
619
obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
620
}
621
assert(Universe::is_in_heap_or_null(obj), "sanity check");
622
*oop_result = obj;
623
break;
624
}
625
case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
626
case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
627
case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
628
case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
629
case T_INT : value_result->i = *(jint*)tos_addr; break;
630
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
631
case T_FLOAT : {
632
#ifdef AMD64
633
value_result->f = *(jfloat*)tos_addr;
634
#else
635
if (method->is_native()) {
636
jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat
637
value_result->f = (jfloat)d;
638
} else {
639
value_result->f = *(jfloat*)tos_addr;
640
}
641
#endif // AMD64
642
break;
643
}
644
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
645
case T_VOID : /* Nothing to do */ break;
646
default : ShouldNotReachHere();
647
}
648
649
return type;
650
}
651
652
653
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
654
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
655
return &interpreter_frame_tos_address()[index];
656
}
657
658
#ifndef PRODUCT
659
660
#define DESCRIBE_FP_OFFSET(name) \
661
values.describe(frame_no, fp() + frame::name##_offset, #name)
662
663
void frame::describe_pd(FrameValues& values, int frame_no) {
664
if (is_interpreted_frame()) {
665
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
666
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
667
DESCRIBE_FP_OFFSET(interpreter_frame_method);
668
DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
669
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
670
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
671
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
672
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
673
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
674
#ifdef AMD64
675
} else if (is_entry_frame()) {
676
// This could be more descriptive if we use the enum in
677
// stubGenerator to map to real names but it's most important to
678
// claim these frame slots so the error checking works.
679
for (int i = 0; i < entry_frame_after_call_words; i++) {
680
values.describe(frame_no, fp() - i, err_msg("call_stub word fp - %d", i));
681
}
682
#endif // AMD64
683
}
684
}
685
#endif // !PRODUCT
686
687
intptr_t *frame::initial_deoptimization_info() {
688
// used to reset the saved FP
689
return fp();
690
}
691
692
intptr_t* frame::real_fp() const {
693
if (_cb != NULL) {
694
// use the frame size if valid
695
int size = _cb->frame_size();
696
if (size > 0) {
697
return unextended_sp() + size;
698
}
699
}
700
// else rely on fp()
701
assert(! is_compiled_frame(), "unknown compiled frame size");
702
return fp();
703
}
704
705
#ifndef PRODUCT
706
// This is a generic constructor which is only used by pns() in debug.cpp.
707
frame::frame(void* sp, void* fp, void* pc) {
708
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
709
}
710
711
void frame::pd_ps() {}
712
#endif
713
714
void JavaFrameAnchor::make_walkable(JavaThread* thread) {
715
// last frame set?
716
if (last_Java_sp() == NULL) return;
717
// already walkable?
718
if (walkable()) return;
719
vmassert(Thread::current() == (Thread*)thread, "not current thread");
720
vmassert(last_Java_sp() != NULL, "not called from Java code?");
721
vmassert(last_Java_pc() == NULL, "already walkable");
722
capture_last_Java_pc();
723
vmassert(walkable(), "something went wrong");
724
}
725
726
void JavaFrameAnchor::capture_last_Java_pc() {
727
vmassert(_last_Java_sp != NULL, "no last frame set");
728
vmassert(_last_Java_pc == NULL, "already walkable");
729
_last_Java_pc = (address)_last_Java_sp[-1];
730
}
731
732