Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/prims/jvmtiEnvBase.cpp
41145 views
1
/*
2
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/classLoaderDataGraph.hpp"
27
#include "classfile/javaClasses.hpp"
28
#include "classfile/moduleEntry.hpp"
29
#include "jvmtifiles/jvmtiEnv.hpp"
30
#include "memory/iterator.hpp"
31
#include "memory/resourceArea.hpp"
32
#include "oops/klass.inline.hpp"
33
#include "oops/objArrayKlass.hpp"
34
#include "oops/objArrayOop.hpp"
35
#include "oops/oop.inline.hpp"
36
#include "oops/oopHandle.inline.hpp"
37
#include "prims/jvmtiEnvBase.hpp"
38
#include "prims/jvmtiEventController.inline.hpp"
39
#include "prims/jvmtiExtensions.hpp"
40
#include "prims/jvmtiImpl.hpp"
41
#include "prims/jvmtiManageCapabilities.hpp"
42
#include "prims/jvmtiTagMap.hpp"
43
#include "prims/jvmtiThreadState.inline.hpp"
44
#include "runtime/biasedLocking.hpp"
45
#include "runtime/deoptimization.hpp"
46
#include "runtime/frame.inline.hpp"
47
#include "runtime/handles.inline.hpp"
48
#include "runtime/interfaceSupport.inline.hpp"
49
#include "runtime/jfieldIDWorkaround.hpp"
50
#include "runtime/jniHandles.inline.hpp"
51
#include "runtime/objectMonitor.inline.hpp"
52
#include "runtime/osThread.hpp"
53
#include "runtime/signature.hpp"
54
#include "runtime/thread.inline.hpp"
55
#include "runtime/threadSMR.hpp"
56
#include "runtime/vframe.inline.hpp"
57
#include "runtime/vframe_hp.hpp"
58
#include "runtime/vmThread.hpp"
59
#include "runtime/vmOperations.hpp"
60
61
62
///////////////////////////////////////////////////////////////
63
//
64
// JvmtiEnvBase
65
//
66
67
JvmtiEnvBase* JvmtiEnvBase::_head_environment = NULL;
68
69
bool JvmtiEnvBase::_globally_initialized = false;
70
volatile bool JvmtiEnvBase::_needs_clean_up = false;
71
72
jvmtiPhase JvmtiEnvBase::_phase = JVMTI_PHASE_PRIMORDIAL;
73
74
volatile int JvmtiEnvBase::_dying_thread_env_iteration_count = 0;
75
76
extern jvmtiInterface_1_ jvmti_Interface;
77
extern jvmtiInterface_1_ jvmtiTrace_Interface;
78
79
80
// perform initializations that must occur before any JVMTI environments
81
// are released but which should only be initialized once (no matter
82
// how many environments are created).
83
void
84
JvmtiEnvBase::globally_initialize() {
85
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
86
assert(_globally_initialized == false, "bad call");
87
88
JvmtiManageCapabilities::initialize();
89
90
// register extension functions and events
91
JvmtiExtensions::register_extensions();
92
93
#ifdef JVMTI_TRACE
94
JvmtiTrace::initialize();
95
#endif
96
97
_globally_initialized = true;
98
}
99
100
101
void
102
JvmtiEnvBase::initialize() {
103
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
104
105
// Add this environment to the end of the environment list (order is important)
106
{
107
// This block of code must not contain any safepoints, as list deallocation
108
// (which occurs at a safepoint) cannot occur simultaneously with this list
109
// addition. Note: NoSafepointVerifier cannot, currently, be used before
110
// threads exist.
111
JvmtiEnvIterator it;
112
JvmtiEnvBase *previous_env = NULL;
113
for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
114
previous_env = env;
115
}
116
if (previous_env == NULL) {
117
_head_environment = this;
118
} else {
119
previous_env->set_next_environment(this);
120
}
121
}
122
123
if (_globally_initialized == false) {
124
globally_initialize();
125
}
126
}
127
128
jvmtiPhase
129
JvmtiEnvBase::phase() {
130
// For the JVMTI environments possessed the can_generate_early_vmstart:
131
// replace JVMTI_PHASE_PRIMORDIAL with JVMTI_PHASE_START
132
if (_phase == JVMTI_PHASE_PRIMORDIAL &&
133
JvmtiExport::early_vmstart_recorded() &&
134
early_vmstart_env()) {
135
return JVMTI_PHASE_START;
136
}
137
return _phase; // Normal case
138
}
139
140
bool
141
JvmtiEnvBase::is_valid() {
142
jint value = 0;
143
144
// This object might not be a JvmtiEnvBase so we can't assume
145
// the _magic field is properly aligned. Get the value in a safe
146
// way and then check against JVMTI_MAGIC.
147
148
switch (sizeof(_magic)) {
149
case 2:
150
value = Bytes::get_native_u2((address)&_magic);
151
break;
152
153
case 4:
154
value = Bytes::get_native_u4((address)&_magic);
155
break;
156
157
case 8:
158
value = Bytes::get_native_u8((address)&_magic);
159
break;
160
161
default:
162
guarantee(false, "_magic field is an unexpected size");
163
}
164
165
return value == JVMTI_MAGIC;
166
}
167
168
169
bool
170
JvmtiEnvBase::use_version_1_0_semantics() {
171
int major, minor, micro;
172
173
JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
174
return major == 1 && minor == 0; // micro version doesn't matter here
175
}
176
177
178
bool
179
JvmtiEnvBase::use_version_1_1_semantics() {
180
int major, minor, micro;
181
182
JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
183
return major == 1 && minor == 1; // micro version doesn't matter here
184
}
185
186
bool
187
JvmtiEnvBase::use_version_1_2_semantics() {
188
int major, minor, micro;
189
190
JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
191
return major == 1 && minor == 2; // micro version doesn't matter here
192
}
193
194
195
JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() {
196
_version = version;
197
_env_local_storage = NULL;
198
_tag_map = NULL;
199
_native_method_prefix_count = 0;
200
_native_method_prefixes = NULL;
201
_next = NULL;
202
_class_file_load_hook_ever_enabled = false;
203
204
// Moot since ClassFileLoadHook not yet enabled.
205
// But "true" will give a more predictable ClassFileLoadHook behavior
206
// for environment creation during ClassFileLoadHook.
207
_is_retransformable = true;
208
209
// all callbacks initially NULL
210
memset(&_event_callbacks,0,sizeof(jvmtiEventCallbacks));
211
212
// all capabilities initially off
213
memset(&_current_capabilities, 0, sizeof(_current_capabilities));
214
215
// all prohibited capabilities initially off
216
memset(&_prohibited_capabilities, 0, sizeof(_prohibited_capabilities));
217
218
_magic = JVMTI_MAGIC;
219
220
JvmtiEventController::env_initialize((JvmtiEnv*)this);
221
222
#ifdef JVMTI_TRACE
223
_jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface;
224
#else
225
_jvmti_external.functions = &jvmti_Interface;
226
#endif
227
}
228
229
230
void
231
JvmtiEnvBase::dispose() {
232
233
#ifdef JVMTI_TRACE
234
JvmtiTrace::shutdown();
235
#endif
236
237
// Dispose of event info and let the event controller call us back
238
// in a locked state (env_dispose, below)
239
JvmtiEventController::env_dispose(this);
240
}
241
242
void
243
JvmtiEnvBase::env_dispose() {
244
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
245
246
// We have been entered with all events disabled on this environment.
247
// A race to re-enable events (by setting callbacks) is prevented by
248
// checking for a valid environment when setting callbacks (while
249
// holding the JvmtiThreadState_lock).
250
251
// Mark as invalid.
252
_magic = DISPOSED_MAGIC;
253
254
// Relinquish all capabilities.
255
jvmtiCapabilities *caps = get_capabilities();
256
JvmtiManageCapabilities::relinquish_capabilities(caps, caps, caps);
257
258
// Same situation as with events (see above)
259
set_native_method_prefixes(0, NULL);
260
261
JvmtiTagMap* tag_map_to_clear = tag_map_acquire();
262
// A tag map can be big, clear it now to save memory until
263
// the destructor runs.
264
if (tag_map_to_clear != NULL) {
265
tag_map_to_clear->clear();
266
}
267
268
_needs_clean_up = true;
269
}
270
271
272
JvmtiEnvBase::~JvmtiEnvBase() {
273
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
274
275
// There is a small window of time during which the tag map of a
276
// disposed environment could have been reallocated.
277
// Make sure it is gone.
278
JvmtiTagMap* tag_map_to_deallocate = _tag_map;
279
set_tag_map(NULL);
280
// A tag map can be big, deallocate it now
281
if (tag_map_to_deallocate != NULL) {
282
delete tag_map_to_deallocate;
283
}
284
285
_magic = BAD_MAGIC;
286
}
287
288
289
void
290
JvmtiEnvBase::periodic_clean_up() {
291
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
292
293
// JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So
294
// clean up JvmtiThreadState before deleting JvmtiEnv pointer.
295
JvmtiThreadState::periodic_clean_up();
296
297
// Unlink all invalid environments from the list of environments
298
// and deallocate them
299
JvmtiEnvIterator it;
300
JvmtiEnvBase* previous_env = NULL;
301
JvmtiEnvBase* env = it.first();
302
while (env != NULL) {
303
if (env->is_valid()) {
304
previous_env = env;
305
env = it.next(env);
306
} else {
307
// This one isn't valid, remove it from the list and deallocate it
308
JvmtiEnvBase* defunct_env = env;
309
env = it.next(env);
310
if (previous_env == NULL) {
311
_head_environment = env;
312
} else {
313
previous_env->set_next_environment(env);
314
}
315
delete defunct_env;
316
}
317
}
318
319
}
320
321
322
void
323
JvmtiEnvBase::check_for_periodic_clean_up() {
324
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
325
326
class ThreadInsideIterationClosure: public ThreadClosure {
327
private:
328
bool _inside;
329
public:
330
ThreadInsideIterationClosure() : _inside(false) {};
331
332
void do_thread(Thread* thread) {
333
_inside |= thread->is_inside_jvmti_env_iteration();
334
}
335
336
bool is_inside_jvmti_env_iteration() {
337
return _inside;
338
}
339
};
340
341
if (_needs_clean_up) {
342
// Check if we are currently iterating environment,
343
// deallocation should not occur if we are
344
ThreadInsideIterationClosure tiic;
345
Threads::threads_do(&tiic);
346
if (!tiic.is_inside_jvmti_env_iteration() &&
347
!is_inside_dying_thread_env_iteration()) {
348
_needs_clean_up = false;
349
JvmtiEnvBase::periodic_clean_up();
350
}
351
}
352
}
353
354
355
void
356
JvmtiEnvBase::record_first_time_class_file_load_hook_enabled() {
357
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(),
358
"sanity check");
359
360
if (!_class_file_load_hook_ever_enabled) {
361
_class_file_load_hook_ever_enabled = true;
362
363
if (get_capabilities()->can_retransform_classes) {
364
_is_retransformable = true;
365
} else {
366
_is_retransformable = false;
367
368
// cannot add retransform capability after ClassFileLoadHook has been enabled
369
get_prohibited_capabilities()->can_retransform_classes = 1;
370
}
371
}
372
}
373
374
375
void
376
JvmtiEnvBase::record_class_file_load_hook_enabled() {
377
if (!_class_file_load_hook_ever_enabled) {
378
if (Threads::number_of_threads() == 0) {
379
record_first_time_class_file_load_hook_enabled();
380
} else {
381
MutexLocker mu(JvmtiThreadState_lock);
382
record_first_time_class_file_load_hook_enabled();
383
}
384
}
385
}
386
387
388
jvmtiError
389
JvmtiEnvBase::set_native_method_prefixes(jint prefix_count, char** prefixes) {
390
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(),
391
"sanity check");
392
393
int old_prefix_count = get_native_method_prefix_count();
394
char **old_prefixes = get_native_method_prefixes();
395
396
// allocate and install the new prefixex
397
if (prefix_count == 0 || !is_valid()) {
398
_native_method_prefix_count = 0;
399
_native_method_prefixes = NULL;
400
} else {
401
// there are prefixes, allocate an array to hold them, and fill it
402
char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal);
403
if (new_prefixes == NULL) {
404
return JVMTI_ERROR_OUT_OF_MEMORY;
405
}
406
for (int i = 0; i < prefix_count; i++) {
407
char* prefix = prefixes[i];
408
if (prefix == NULL) {
409
for (int j = 0; j < (i-1); j++) {
410
os::free(new_prefixes[j]);
411
}
412
os::free(new_prefixes);
413
return JVMTI_ERROR_NULL_POINTER;
414
}
415
prefix = os::strdup(prefixes[i]);
416
if (prefix == NULL) {
417
for (int j = 0; j < (i-1); j++) {
418
os::free(new_prefixes[j]);
419
}
420
os::free(new_prefixes);
421
return JVMTI_ERROR_OUT_OF_MEMORY;
422
}
423
new_prefixes[i] = prefix;
424
}
425
_native_method_prefix_count = prefix_count;
426
_native_method_prefixes = new_prefixes;
427
}
428
429
// now that we know the new prefixes have been successfully installed we can
430
// safely remove the old ones
431
if (old_prefix_count != 0) {
432
for (int i = 0; i < old_prefix_count; i++) {
433
os::free(old_prefixes[i]);
434
}
435
os::free(old_prefixes);
436
}
437
438
return JVMTI_ERROR_NONE;
439
}
440
441
442
// Collect all the prefixes which have been set in any JVM TI environments
443
// by the SetNativeMethodPrefix(es) functions. Be sure to maintain the
444
// order of environments and the order of prefixes within each environment.
445
// Return in a resource allocated array.
446
char**
447
JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) {
448
assert(Threads::number_of_threads() == 0 ||
449
SafepointSynchronize::is_at_safepoint() ||
450
JvmtiThreadState_lock->is_locked(),
451
"sanity check");
452
453
int total_count = 0;
454
GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5);
455
456
JvmtiEnvIterator it;
457
for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
458
int prefix_count = env->get_native_method_prefix_count();
459
char** prefixes = env->get_native_method_prefixes();
460
for (int j = 0; j < prefix_count; j++) {
461
// retrieve a prefix and so that it is safe against asynchronous changes
462
// copy it into the resource area
463
char* prefix = prefixes[j];
464
char* prefix_copy = NEW_RESOURCE_ARRAY(char, strlen(prefix)+1);
465
strcpy(prefix_copy, prefix);
466
prefix_array->at_put_grow(total_count++, prefix_copy);
467
}
468
}
469
470
char** all_prefixes = NEW_RESOURCE_ARRAY(char*, total_count);
471
char** p = all_prefixes;
472
for (int i = 0; i < total_count; ++i) {
473
*p++ = prefix_array->at(i);
474
}
475
*count_ptr = total_count;
476
return all_prefixes;
477
}
478
479
void
480
JvmtiEnvBase::set_event_callbacks(const jvmtiEventCallbacks* callbacks,
481
jint size_of_callbacks) {
482
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
483
484
size_t byte_cnt = sizeof(jvmtiEventCallbacks);
485
486
// clear in either case to be sure we got any gap between sizes
487
memset(&_event_callbacks, 0, byte_cnt);
488
489
// Now that JvmtiThreadState_lock is held, prevent a possible race condition where events
490
// are re-enabled by a call to set event callbacks where the DisposeEnvironment
491
// occurs after the boiler-plate environment check and before the lock is acquired.
492
if (callbacks != NULL && is_valid()) {
493
if (size_of_callbacks < (jint)byte_cnt) {
494
byte_cnt = size_of_callbacks;
495
}
496
memcpy(&_event_callbacks, callbacks, byte_cnt);
497
}
498
}
499
500
501
// In the fullness of time, all users of the method should instead
502
// directly use allocate, besides being cleaner and faster, this will
503
// mean much better out of memory handling
504
unsigned char *
505
JvmtiEnvBase::jvmtiMalloc(jlong size) {
506
unsigned char* mem = NULL;
507
jvmtiError result = allocate(size, &mem);
508
assert(result == JVMTI_ERROR_NONE, "Allocate failed");
509
return mem;
510
}
511
512
513
// Handle management
514
515
jobject JvmtiEnvBase::jni_reference(Handle hndl) {
516
return JNIHandles::make_local(hndl());
517
}
518
519
jobject JvmtiEnvBase::jni_reference(JavaThread *thread, Handle hndl) {
520
return JNIHandles::make_local(thread, hndl());
521
}
522
523
void JvmtiEnvBase::destroy_jni_reference(jobject jobj) {
524
JNIHandles::destroy_local(jobj);
525
}
526
527
void JvmtiEnvBase::destroy_jni_reference(JavaThread *thread, jobject jobj) {
528
JNIHandles::destroy_local(jobj); // thread is unused.
529
}
530
531
//
532
// Threads
533
//
534
535
jobject *
536
JvmtiEnvBase::new_jobjectArray(int length, Handle *handles) {
537
if (length == 0) {
538
return NULL;
539
}
540
541
jobject *objArray = (jobject *) jvmtiMalloc(sizeof(jobject) * length);
542
NULL_CHECK(objArray, NULL);
543
544
for (int i=0; i<length; i++) {
545
objArray[i] = jni_reference(handles[i]);
546
}
547
return objArray;
548
}
549
550
jthread *
551
JvmtiEnvBase::new_jthreadArray(int length, Handle *handles) {
552
return (jthread *) new_jobjectArray(length,handles);
553
}
554
555
jthreadGroup *
556
JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) {
557
return (jthreadGroup *) new_jobjectArray(length,handles);
558
}
559
560
// return the vframe on the specified thread and depth, NULL if no such frame
561
// The thread and the oops in the returned vframe might not have been process.
562
vframe*
563
JvmtiEnvBase::vframeForNoProcess(JavaThread* java_thread, jint depth) {
564
if (!java_thread->has_last_Java_frame()) {
565
return NULL;
566
}
567
RegisterMap reg_map(java_thread, true /* update_map */, false /* process_frames */);
568
vframe *vf = java_thread->last_java_vframe(&reg_map);
569
int d = 0;
570
while ((vf != NULL) && (d < depth)) {
571
vf = vf->java_sender();
572
d++;
573
}
574
return vf;
575
}
576
577
578
//
579
// utilities: JNI objects
580
//
581
582
583
jclass
584
JvmtiEnvBase::get_jni_class_non_null(Klass* k) {
585
assert(k != NULL, "k != NULL");
586
Thread *thread = Thread::current();
587
return (jclass)jni_reference(Handle(thread, k->java_mirror()));
588
}
589
590
//
591
// Field Information
592
//
593
594
bool
595
JvmtiEnvBase::get_field_descriptor(Klass* k, jfieldID field, fieldDescriptor* fd) {
596
if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) {
597
return false;
598
}
599
bool found = false;
600
if (jfieldIDWorkaround::is_static_jfieldID(field)) {
601
JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field);
602
found = id->find_local_field(fd);
603
} else {
604
// Non-static field. The fieldID is really the offset of the field within the object.
605
int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field);
606
found = InstanceKlass::cast(k)->find_field_from_offset(offset, false, fd);
607
}
608
return found;
609
}
610
611
//
612
// Object Monitor Information
613
//
614
615
//
616
// Count the number of objects for a lightweight monitor. The hobj
617
// parameter is object that owns the monitor so this routine will
618
// count the number of times the same object was locked by frames
619
// in java_thread.
620
//
621
jint
622
JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) {
623
jint ret = 0;
624
if (!java_thread->has_last_Java_frame()) {
625
return ret; // no Java frames so no monitors
626
}
627
628
Thread* current_thread = Thread::current();
629
ResourceMark rm(current_thread);
630
HandleMark hm(current_thread);
631
RegisterMap reg_map(java_thread);
632
633
for(javaVFrame *jvf=java_thread->last_java_vframe(&reg_map); jvf != NULL;
634
jvf = jvf->java_sender()) {
635
GrowableArray<MonitorInfo*>* mons = jvf->monitors();
636
if (!mons->is_empty()) {
637
for (int i = 0; i < mons->length(); i++) {
638
MonitorInfo *mi = mons->at(i);
639
if (mi->owner_is_scalar_replaced()) continue;
640
641
// see if owner of the monitor is our object
642
if (mi->owner() != NULL && mi->owner() == hobj()) {
643
ret++;
644
}
645
}
646
}
647
}
648
return ret;
649
}
650
651
652
653
jvmtiError
654
JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThread *java_thread, jobject *monitor_ptr) {
655
Thread *current_thread = Thread::current();
656
assert(java_thread->is_handshake_safe_for(current_thread),
657
"call by myself or at handshake");
658
oop obj = NULL;
659
// The ObjectMonitor* can't be async deflated since we are either
660
// at a safepoint or the calling thread is operating on itself so
661
// it cannot leave the underlying wait()/enter() call.
662
ObjectMonitor *mon = java_thread->current_waiting_monitor();
663
if (mon == NULL) {
664
// thread is not doing an Object.wait() call
665
mon = java_thread->current_pending_monitor();
666
if (mon != NULL) {
667
// The thread is trying to enter() an ObjectMonitor.
668
obj = mon->object();
669
assert(obj != NULL, "ObjectMonitor should have a valid object!");
670
}
671
// implied else: no contended ObjectMonitor
672
} else {
673
// thread is doing an Object.wait() call
674
obj = mon->object();
675
assert(obj != NULL, "Object.wait() should have an object");
676
}
677
678
if (obj == NULL) {
679
*monitor_ptr = NULL;
680
} else {
681
HandleMark hm(current_thread);
682
Handle hobj(current_thread, obj);
683
*monitor_ptr = jni_reference(calling_thread, hobj);
684
}
685
return JVMTI_ERROR_NONE;
686
}
687
688
689
jvmtiError
690
JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread,
691
GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) {
692
// Note:
693
// calling_thread is the thread that requested the list of monitors for java_thread.
694
// java_thread is the thread owning the monitors.
695
// current_thread is the thread executing this code, can be a non-JavaThread (e.g. VM Thread).
696
// And they all may be different threads.
697
jvmtiError err = JVMTI_ERROR_NONE;
698
Thread *current_thread = Thread::current();
699
assert(java_thread->is_handshake_safe_for(current_thread),
700
"call by myself or at handshake");
701
702
if (java_thread->has_last_Java_frame()) {
703
ResourceMark rm(current_thread);
704
HandleMark hm(current_thread);
705
RegisterMap reg_map(java_thread);
706
707
int depth = 0;
708
for (javaVFrame *jvf = java_thread->last_java_vframe(&reg_map); jvf != NULL;
709
jvf = jvf->java_sender()) {
710
if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) { // check for stack too deep
711
// add locked objects for this frame into list
712
err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1);
713
if (err != JVMTI_ERROR_NONE) {
714
return err;
715
}
716
}
717
}
718
}
719
720
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
721
JvmtiMonitorClosure jmc(java_thread, calling_thread, owned_monitors_list, this);
722
ObjectSynchronizer::monitors_iterate(&jmc);
723
err = jmc.error();
724
725
return err;
726
}
727
728
// Save JNI local handles for any objects that this frame owns.
729
jvmtiError
730
JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread,
731
javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, jint stack_depth) {
732
jvmtiError err = JVMTI_ERROR_NONE;
733
Thread* current_thread = Thread::current();
734
ResourceMark rm(current_thread);
735
HandleMark hm(current_thread);
736
737
GrowableArray<MonitorInfo*>* mons = jvf->monitors();
738
if (mons->is_empty()) {
739
return err; // this javaVFrame holds no monitors
740
}
741
742
oop wait_obj = NULL;
743
{
744
// The ObjectMonitor* can't be async deflated since we are either
745
// at a safepoint or the calling thread is operating on itself so
746
// it cannot leave the underlying wait() call.
747
// Save object of current wait() call (if any) for later comparison.
748
ObjectMonitor *mon = java_thread->current_waiting_monitor();
749
if (mon != NULL) {
750
wait_obj = mon->object();
751
}
752
}
753
oop pending_obj = NULL;
754
{
755
// The ObjectMonitor* can't be async deflated since we are either
756
// at a safepoint or the calling thread is operating on itself so
757
// it cannot leave the underlying enter() call.
758
// Save object of current enter() call (if any) for later comparison.
759
ObjectMonitor *mon = java_thread->current_pending_monitor();
760
if (mon != NULL) {
761
pending_obj = mon->object();
762
}
763
}
764
765
for (int i = 0; i < mons->length(); i++) {
766
MonitorInfo *mi = mons->at(i);
767
768
if (mi->owner_is_scalar_replaced()) continue;
769
770
oop obj = mi->owner();
771
if (obj == NULL) {
772
// this monitor doesn't have an owning object so skip it
773
continue;
774
}
775
776
if (wait_obj == obj) {
777
// the thread is waiting on this monitor so it isn't really owned
778
continue;
779
}
780
781
if (pending_obj == obj) {
782
// the thread is pending on this monitor so it isn't really owned
783
continue;
784
}
785
786
if (owned_monitors_list->length() > 0) {
787
// Our list has at least one object on it so we have to check
788
// for recursive object locking
789
bool found = false;
790
for (int j = 0; j < owned_monitors_list->length(); j++) {
791
jobject jobj = ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(j))->monitor;
792
oop check = JNIHandles::resolve(jobj);
793
if (check == obj) {
794
found = true; // we found the object
795
break;
796
}
797
}
798
799
if (found) {
800
// already have this object so don't include it
801
continue;
802
}
803
}
804
805
// add the owning object to our list
806
jvmtiMonitorStackDepthInfo *jmsdi;
807
err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi);
808
if (err != JVMTI_ERROR_NONE) {
809
return err;
810
}
811
Handle hobj(Thread::current(), obj);
812
jmsdi->monitor = jni_reference(calling_thread, hobj);
813
jmsdi->stack_depth = stack_depth;
814
owned_monitors_list->append(jmsdi);
815
}
816
817
return err;
818
}
819
820
jvmtiError
821
JvmtiEnvBase::get_stack_trace(JavaThread *java_thread,
822
jint start_depth, jint max_count,
823
jvmtiFrameInfo* frame_buffer, jint* count_ptr) {
824
#ifdef ASSERT
825
uint32_t debug_bits = 0;
826
#endif
827
Thread *current_thread = Thread::current();
828
assert(SafepointSynchronize::is_at_safepoint() ||
829
java_thread->is_handshake_safe_for(current_thread),
830
"call by myself / at safepoint / at handshake");
831
int count = 0;
832
if (java_thread->has_last_Java_frame()) {
833
RegisterMap reg_map(java_thread);
834
ResourceMark rm(current_thread);
835
javaVFrame *jvf = java_thread->last_java_vframe(&reg_map);
836
HandleMark hm(current_thread);
837
if (start_depth != 0) {
838
if (start_depth > 0) {
839
for (int j = 0; j < start_depth && jvf != NULL; j++) {
840
jvf = jvf->java_sender();
841
}
842
if (jvf == NULL) {
843
// start_depth is deeper than the stack depth
844
return JVMTI_ERROR_ILLEGAL_ARGUMENT;
845
}
846
} else { // start_depth < 0
847
// we are referencing the starting depth based on the oldest
848
// part of the stack.
849
// optimize to limit the number of times that java_sender() is called
850
javaVFrame *jvf_cursor = jvf;
851
javaVFrame *jvf_prev = NULL;
852
javaVFrame *jvf_prev_prev = NULL;
853
int j = 0;
854
while (jvf_cursor != NULL) {
855
jvf_prev_prev = jvf_prev;
856
jvf_prev = jvf_cursor;
857
for (j = 0; j > start_depth && jvf_cursor != NULL; j--) {
858
jvf_cursor = jvf_cursor->java_sender();
859
}
860
}
861
if (j == start_depth) {
862
// previous pointer is exactly where we want to start
863
jvf = jvf_prev;
864
} else {
865
// we need to back up further to get to the right place
866
if (jvf_prev_prev == NULL) {
867
// the -start_depth is greater than the stack depth
868
return JVMTI_ERROR_ILLEGAL_ARGUMENT;
869
}
870
// j now is the number of frames on the stack starting with
871
// jvf_prev, we start from jvf_prev_prev and move older on
872
// the stack that many, the result is -start_depth frames
873
// remaining.
874
jvf = jvf_prev_prev;
875
for (; j < 0; j++) {
876
jvf = jvf->java_sender();
877
}
878
}
879
}
880
}
881
for (; count < max_count && jvf != NULL; count++) {
882
frame_buffer[count].method = jvf->method()->jmethod_id();
883
frame_buffer[count].location = (jvf->method()->is_native() ? -1 : jvf->bci());
884
jvf = jvf->java_sender();
885
}
886
} else {
887
if (start_depth != 0) {
888
// no frames and there is a starting depth
889
return JVMTI_ERROR_ILLEGAL_ARGUMENT;
890
}
891
}
892
*count_ptr = count;
893
return JVMTI_ERROR_NONE;
894
}
895
896
jvmtiError
897
JvmtiEnvBase::get_frame_count(JvmtiThreadState *state, jint *count_ptr) {
898
assert((state != NULL),
899
"JavaThread should create JvmtiThreadState before calling this method");
900
*count_ptr = state->count_frames();
901
return JVMTI_ERROR_NONE;
902
}
903
904
jvmtiError
905
JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth,
906
jmethodID* method_ptr, jlocation* location_ptr) {
907
#ifdef ASSERT
908
uint32_t debug_bits = 0;
909
#endif
910
Thread* current_thread = Thread::current();
911
assert(java_thread->is_handshake_safe_for(current_thread),
912
"call by myself or at handshake");
913
ResourceMark rm(current_thread);
914
915
vframe *vf = vframeForNoProcess(java_thread, depth);
916
if (vf == NULL) {
917
return JVMTI_ERROR_NO_MORE_FRAMES;
918
}
919
920
// vframeFor should return a java frame. If it doesn't
921
// it means we've got an internal error and we return the
922
// error in product mode. In debug mode we will instead
923
// attempt to cast the vframe to a javaVFrame and will
924
// cause an assertion/crash to allow further diagnosis.
925
#ifdef PRODUCT
926
if (!vf->is_java_frame()) {
927
return JVMTI_ERROR_INTERNAL;
928
}
929
#endif
930
931
HandleMark hm(current_thread);
932
javaVFrame *jvf = javaVFrame::cast(vf);
933
Method* method = jvf->method();
934
if (method->is_native()) {
935
*location_ptr = -1;
936
} else {
937
*location_ptr = jvf->bci();
938
}
939
*method_ptr = method->jmethod_id();
940
941
return JVMTI_ERROR_NONE;
942
}
943
944
945
jvmtiError
946
JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject object, jvmtiMonitorUsage* info_ptr) {
947
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
948
Thread* current_thread = VMThread::vm_thread();
949
assert(current_thread == Thread::current(), "must be");
950
951
HandleMark hm(current_thread);
952
Handle hobj;
953
954
// Check arguments
955
{
956
oop mirror = JNIHandles::resolve_external_guard(object);
957
NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
958
NULL_CHECK(info_ptr, JVMTI_ERROR_NULL_POINTER);
959
960
hobj = Handle(current_thread, mirror);
961
}
962
963
ThreadsListHandle tlh(current_thread);
964
JavaThread *owning_thread = NULL;
965
ObjectMonitor *mon = NULL;
966
jvmtiMonitorUsage ret = {
967
NULL, 0, 0, NULL, 0, NULL
968
};
969
970
uint32_t debug_bits = 0;
971
// first derive the object's owner and entry_count (if any)
972
{
973
// Revoke any biases before querying the mark word
974
BiasedLocking::revoke_at_safepoint(hobj);
975
976
address owner = NULL;
977
{
978
markWord mark = hobj()->mark();
979
980
if (!mark.has_monitor()) {
981
// this object has a lightweight monitor
982
983
if (mark.has_locker()) {
984
owner = (address)mark.locker(); // save the address of the Lock word
985
}
986
// implied else: no owner
987
} else {
988
// this object has a heavyweight monitor
989
mon = mark.monitor();
990
991
// The owner field of a heavyweight monitor may be NULL for no
992
// owner, a JavaThread * or it may still be the address of the
993
// Lock word in a JavaThread's stack. A monitor can be inflated
994
// by a non-owning JavaThread, but only the owning JavaThread
995
// can change the owner field from the Lock word to the
996
// JavaThread * and it may not have done that yet.
997
owner = (address)mon->owner();
998
}
999
}
1000
1001
if (owner != NULL) {
1002
// This monitor is owned so we have to find the owning JavaThread.
1003
owning_thread = Threads::owning_thread_from_monitor_owner(tlh.list(), owner);
1004
assert(owning_thread != NULL, "owning JavaThread must not be NULL");
1005
Handle th(current_thread, owning_thread->threadObj());
1006
ret.owner = (jthread)jni_reference(calling_thread, th);
1007
}
1008
1009
if (owning_thread != NULL) { // monitor is owned
1010
// The recursions field of a monitor does not reflect recursions
1011
// as lightweight locks before inflating the monitor are not included.
1012
// We have to count the number of recursive monitor entries the hard way.
1013
// We pass a handle to survive any GCs along the way.
1014
ret.entry_count = count_locked_objects(owning_thread, hobj);
1015
}
1016
// implied else: entry_count == 0
1017
}
1018
1019
jint nWant = 0, nWait = 0;
1020
if (mon != NULL) {
1021
// this object has a heavyweight monitor
1022
nWant = mon->contentions(); // # of threads contending for monitor
1023
nWait = mon->waiters(); // # of threads in Object.wait()
1024
ret.waiter_count = nWant + nWait;
1025
ret.notify_waiter_count = nWait;
1026
} else {
1027
// this object has a lightweight monitor
1028
ret.waiter_count = 0;
1029
ret.notify_waiter_count = 0;
1030
}
1031
1032
// Allocate memory for heavyweight and lightweight monitor.
1033
jvmtiError err;
1034
err = allocate(ret.waiter_count * sizeof(jthread *), (unsigned char**)&ret.waiters);
1035
if (err != JVMTI_ERROR_NONE) {
1036
return err;
1037
}
1038
err = allocate(ret.notify_waiter_count * sizeof(jthread *),
1039
(unsigned char**)&ret.notify_waiters);
1040
if (err != JVMTI_ERROR_NONE) {
1041
deallocate((unsigned char*)ret.waiters);
1042
return err;
1043
}
1044
1045
// now derive the rest of the fields
1046
if (mon != NULL) {
1047
// this object has a heavyweight monitor
1048
1049
// Number of waiters may actually be less than the waiter count.
1050
// So NULL out memory so that unused memory will be NULL.
1051
memset(ret.waiters, 0, ret.waiter_count * sizeof(jthread *));
1052
memset(ret.notify_waiters, 0, ret.notify_waiter_count * sizeof(jthread *));
1053
1054
if (ret.waiter_count > 0) {
1055
// we have contending and/or waiting threads
1056
if (nWant > 0) {
1057
// we have contending threads
1058
ResourceMark rm(current_thread);
1059
// get_pending_threads returns only java thread so we do not need to
1060
// check for non java threads.
1061
GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads(tlh.list(), nWant, (address)mon);
1062
if (wantList->length() < nWant) {
1063
// robustness: the pending list has gotten smaller
1064
nWant = wantList->length();
1065
}
1066
for (int i = 0; i < nWant; i++) {
1067
JavaThread *pending_thread = wantList->at(i);
1068
Handle th(current_thread, pending_thread->threadObj());
1069
ret.waiters[i] = (jthread)jni_reference(calling_thread, th);
1070
}
1071
}
1072
if (nWait > 0) {
1073
// we have threads in Object.wait()
1074
int offset = nWant; // add after any contending threads
1075
ObjectWaiter *waiter = mon->first_waiter();
1076
for (int i = 0, j = 0; i < nWait; i++) {
1077
if (waiter == NULL) {
1078
// robustness: the waiting list has gotten smaller
1079
nWait = j;
1080
break;
1081
}
1082
JavaThread *w = mon->thread_of_waiter(waiter);
1083
if (w != NULL) {
1084
// If the thread was found on the ObjectWaiter list, then
1085
// it has not been notified. This thread can't change the
1086
// state of the monitor so it doesn't need to be suspended.
1087
Handle th(current_thread, w->threadObj());
1088
ret.waiters[offset + j] = (jthread)jni_reference(calling_thread, th);
1089
ret.notify_waiters[j++] = (jthread)jni_reference(calling_thread, th);
1090
}
1091
waiter = mon->next_waiter(waiter);
1092
}
1093
}
1094
} // ThreadsListHandle is destroyed here.
1095
1096
// Adjust count. nWant and nWait count values may be less than original.
1097
ret.waiter_count = nWant + nWait;
1098
ret.notify_waiter_count = nWait;
1099
} else {
1100
// this object has a lightweight monitor and we have nothing more
1101
// to do here because the defaults are just fine.
1102
}
1103
1104
// we don't update return parameter unless everything worked
1105
*info_ptr = ret;
1106
1107
return JVMTI_ERROR_NONE;
1108
}
1109
1110
ResourceTracker::ResourceTracker(JvmtiEnv* env) {
1111
_env = env;
1112
_allocations = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<unsigned char*>(20, mtServiceability);
1113
_failed = false;
1114
}
1115
ResourceTracker::~ResourceTracker() {
1116
if (_failed) {
1117
for (int i=0; i<_allocations->length(); i++) {
1118
_env->deallocate(_allocations->at(i));
1119
}
1120
}
1121
delete _allocations;
1122
}
1123
1124
jvmtiError ResourceTracker::allocate(jlong size, unsigned char** mem_ptr) {
1125
unsigned char *ptr;
1126
jvmtiError err = _env->allocate(size, &ptr);
1127
if (err == JVMTI_ERROR_NONE) {
1128
_allocations->append(ptr);
1129
*mem_ptr = ptr;
1130
} else {
1131
*mem_ptr = NULL;
1132
_failed = true;
1133
}
1134
return err;
1135
}
1136
1137
unsigned char* ResourceTracker::allocate(jlong size) {
1138
unsigned char* ptr;
1139
allocate(size, &ptr);
1140
return ptr;
1141
}
1142
1143
char* ResourceTracker::strdup(const char* str) {
1144
char *dup_str = (char*)allocate(strlen(str)+1);
1145
if (dup_str != NULL) {
1146
strcpy(dup_str, str);
1147
}
1148
return dup_str;
1149
}
1150
1151
struct StackInfoNode {
1152
struct StackInfoNode *next;
1153
jvmtiStackInfo info;
1154
};
1155
1156
// Create a jvmtiStackInfo inside a linked list node and create a
1157
// buffer for the frame information, both allocated as resource objects.
1158
// Fill in both the jvmtiStackInfo and the jvmtiFrameInfo.
1159
// Note that either or both of thr and thread_oop
1160
// may be null if the thread is new or has exited.
1161
void
1162
MultipleStackTracesCollector::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) {
1163
#ifdef ASSERT
1164
Thread *current_thread = Thread::current();
1165
assert(SafepointSynchronize::is_at_safepoint() ||
1166
thr->is_handshake_safe_for(current_thread),
1167
"call by myself / at safepoint / at handshake");
1168
#endif
1169
1170
jint state = 0;
1171
struct StackInfoNode *node = NEW_RESOURCE_OBJ(struct StackInfoNode);
1172
jvmtiStackInfo *infop = &(node->info);
1173
node->next = head();
1174
set_head(node);
1175
infop->frame_count = 0;
1176
infop->thread = jt;
1177
1178
if (thread_oop != NULL) {
1179
// get most state bits
1180
state = (jint)java_lang_Thread::get_thread_status(thread_oop);
1181
}
1182
1183
if (thr != NULL) { // add more state bits if there is a JavaThead to query
1184
if (thr->is_suspended()) {
1185
state |= JVMTI_THREAD_STATE_SUSPENDED;
1186
}
1187
JavaThreadState jts = thr->thread_state();
1188
if (jts == _thread_in_native) {
1189
state |= JVMTI_THREAD_STATE_IN_NATIVE;
1190
}
1191
if (thr->is_interrupted(false)) {
1192
state |= JVMTI_THREAD_STATE_INTERRUPTED;
1193
}
1194
}
1195
infop->state = state;
1196
1197
if (thr != NULL && (state & JVMTI_THREAD_STATE_ALIVE) != 0) {
1198
infop->frame_buffer = NEW_RESOURCE_ARRAY(jvmtiFrameInfo, max_frame_count());
1199
env()->get_stack_trace(thr, 0, max_frame_count(),
1200
infop->frame_buffer, &(infop->frame_count));
1201
} else {
1202
infop->frame_buffer = NULL;
1203
infop->frame_count = 0;
1204
}
1205
_frame_count_total += infop->frame_count;
1206
}
1207
1208
// Based on the stack information in the linked list, allocate memory
1209
// block to return and fill it from the info in the linked list.
1210
void
1211
MultipleStackTracesCollector::allocate_and_fill_stacks(jint thread_count) {
1212
// do I need to worry about alignment issues?
1213
jlong alloc_size = thread_count * sizeof(jvmtiStackInfo)
1214
+ _frame_count_total * sizeof(jvmtiFrameInfo);
1215
env()->allocate(alloc_size, (unsigned char **)&_stack_info);
1216
1217
// pointers to move through the newly allocated space as it is filled in
1218
jvmtiStackInfo *si = _stack_info + thread_count; // bottom of stack info
1219
jvmtiFrameInfo *fi = (jvmtiFrameInfo *)si; // is the top of frame info
1220
1221
// copy information in resource area into allocated buffer
1222
// insert stack info backwards since linked list is backwards
1223
// insert frame info forwards
1224
// walk the StackInfoNodes
1225
for (struct StackInfoNode *sin = head(); sin != NULL; sin = sin->next) {
1226
jint frame_count = sin->info.frame_count;
1227
size_t frames_size = frame_count * sizeof(jvmtiFrameInfo);
1228
--si;
1229
memcpy(si, &(sin->info), sizeof(jvmtiStackInfo));
1230
if (frames_size == 0) {
1231
si->frame_buffer = NULL;
1232
} else {
1233
memcpy(fi, sin->info.frame_buffer, frames_size);
1234
si->frame_buffer = fi; // point to the new allocated copy of the frames
1235
fi += frame_count;
1236
}
1237
}
1238
assert(si == _stack_info, "the last copied stack info must be the first record");
1239
assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size,
1240
"the last copied frame info must be the last record");
1241
}
1242
1243
1244
void
1245
VM_GetThreadListStackTraces::doit() {
1246
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1247
1248
ResourceMark rm;
1249
ThreadsListHandle tlh;
1250
for (int i = 0; i < _thread_count; ++i) {
1251
jthread jt = _thread_list[i];
1252
JavaThread* java_thread = NULL;
1253
oop thread_oop = NULL;
1254
jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), jt, &java_thread, &thread_oop);
1255
if (err != JVMTI_ERROR_NONE) {
1256
// We got an error code so we don't have a JavaThread *, but
1257
// only return an error from here if we didn't get a valid
1258
// thread_oop.
1259
if (thread_oop == NULL) {
1260
_collector.set_result(err);
1261
return;
1262
}
1263
// We have a valid thread_oop.
1264
}
1265
_collector.fill_frames(jt, java_thread, thread_oop);
1266
}
1267
_collector.allocate_and_fill_stacks(_thread_count);
1268
}
1269
1270
void
1271
GetSingleStackTraceClosure::do_thread(Thread *target) {
1272
JavaThread *jt = target->as_Java_thread();
1273
oop thread_oop = jt->threadObj();
1274
1275
if (!jt->is_exiting() && thread_oop != NULL) {
1276
ResourceMark rm;
1277
_collector.fill_frames(_jthread, jt, thread_oop);
1278
_collector.allocate_and_fill_stacks(1);
1279
}
1280
}
1281
1282
void
1283
VM_GetAllStackTraces::doit() {
1284
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1285
1286
ResourceMark rm;
1287
_final_thread_count = 0;
1288
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1289
oop thread_oop = jt->threadObj();
1290
if (thread_oop != NULL &&
1291
!jt->is_exiting() &&
1292
java_lang_Thread::is_alive(thread_oop) &&
1293
!jt->is_hidden_from_external_view()) {
1294
++_final_thread_count;
1295
// Handle block of the calling thread is used to create local refs.
1296
_collector.fill_frames((jthread)JNIHandles::make_local(_calling_thread, thread_oop),
1297
jt, thread_oop);
1298
}
1299
}
1300
_collector.allocate_and_fill_stacks(_final_thread_count);
1301
}
1302
1303
// Verifies that the top frame is a java frame in an expected state.
1304
// Deoptimizes frame if needed.
1305
// Checks that the frame method signature matches the return type (tos).
1306
// HandleMark must be defined in the caller only.
1307
// It is to keep a ret_ob_h handle alive after return to the caller.
1308
jvmtiError
1309
JvmtiEnvBase::check_top_frame(Thread* current_thread, JavaThread* java_thread,
1310
jvalue value, TosState tos, Handle* ret_ob_h) {
1311
ResourceMark rm(current_thread);
1312
1313
vframe *vf = vframeForNoProcess(java_thread, 0);
1314
NULL_CHECK(vf, JVMTI_ERROR_NO_MORE_FRAMES);
1315
1316
javaVFrame *jvf = (javaVFrame*) vf;
1317
if (!vf->is_java_frame() || jvf->method()->is_native()) {
1318
return JVMTI_ERROR_OPAQUE_FRAME;
1319
}
1320
1321
// If the frame is a compiled one, need to deoptimize it.
1322
if (vf->is_compiled_frame()) {
1323
if (!vf->fr().can_be_deoptimized()) {
1324
return JVMTI_ERROR_OPAQUE_FRAME;
1325
}
1326
Deoptimization::deoptimize_frame(java_thread, jvf->fr().id());
1327
}
1328
1329
// Get information about method return type
1330
Symbol* signature = jvf->method()->signature();
1331
1332
ResultTypeFinder rtf(signature);
1333
TosState fr_tos = as_TosState(rtf.type());
1334
if (fr_tos != tos) {
1335
if (tos != itos || (fr_tos != btos && fr_tos != ztos && fr_tos != ctos && fr_tos != stos)) {
1336
return JVMTI_ERROR_TYPE_MISMATCH;
1337
}
1338
}
1339
1340
// Check that the jobject class matches the return type signature.
1341
jobject jobj = value.l;
1342
if (tos == atos && jobj != NULL) { // NULL reference is allowed
1343
Handle ob_h(current_thread, JNIHandles::resolve_external_guard(jobj));
1344
NULL_CHECK(ob_h, JVMTI_ERROR_INVALID_OBJECT);
1345
Klass* ob_k = ob_h()->klass();
1346
NULL_CHECK(ob_k, JVMTI_ERROR_INVALID_OBJECT);
1347
1348
// Method return type signature.
1349
char* ty_sign = 1 + strchr(signature->as_C_string(), JVM_SIGNATURE_ENDFUNC);
1350
1351
if (!VM_GetOrSetLocal::is_assignable(ty_sign, ob_k, current_thread)) {
1352
return JVMTI_ERROR_TYPE_MISMATCH;
1353
}
1354
*ret_ob_h = ob_h;
1355
}
1356
return JVMTI_ERROR_NONE;
1357
} /* end check_top_frame */
1358
1359
1360
// ForceEarlyReturn<type> follows the PopFrame approach in many aspects.
1361
// Main difference is on the last stage in the interpreter.
1362
// The PopFrame stops method execution to continue execution
1363
// from the same method call instruction.
1364
// The ForceEarlyReturn forces return from method so the execution
1365
// continues at the bytecode following the method call.
1366
1367
// java_thread - protected by ThreadsListHandle and pre-checked
1368
1369
jvmtiError
1370
JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState tos) {
1371
// retrieve or create the state
1372
JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
1373
if (state == NULL) {
1374
return JVMTI_ERROR_THREAD_NOT_ALIVE;
1375
}
1376
1377
// Eagerly reallocate scalar replaced objects.
1378
JavaThread* current_thread = JavaThread::current();
1379
EscapeBarrier eb(true, current_thread, java_thread);
1380
if (!eb.deoptimize_objects(0)) {
1381
// Reallocation of scalar replaced objects failed -> return with error
1382
return JVMTI_ERROR_OUT_OF_MEMORY;
1383
}
1384
1385
SetForceEarlyReturn op(state, value, tos);
1386
if (java_thread == current_thread) {
1387
op.doit(java_thread, true /* self */);
1388
} else {
1389
Handshake::execute(&op, java_thread);
1390
}
1391
return op.result();
1392
}
1393
1394
void
1395
SetForceEarlyReturn::doit(Thread *target, bool self) {
1396
JavaThread* java_thread = target->as_Java_thread();
1397
Thread* current_thread = Thread::current();
1398
HandleMark hm(current_thread);
1399
1400
if (!self) {
1401
if (!java_thread->is_suspended()) {
1402
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
1403
return;
1404
}
1405
}
1406
1407
// Check to see if a ForceEarlyReturn was already in progress
1408
if (_state->is_earlyret_pending()) {
1409
// Probably possible for JVMTI clients to trigger this, but the
1410
// JPDA backend shouldn't allow this to happen
1411
_result = JVMTI_ERROR_INTERNAL;
1412
return;
1413
}
1414
{
1415
// The same as for PopFrame. Workaround bug:
1416
// 4812902: popFrame hangs if the method is waiting at a synchronize
1417
// Catch this condition and return an error to avoid hanging.
1418
// Now JVMTI spec allows an implementation to bail out with an opaque
1419
// frame error.
1420
OSThread* osThread = java_thread->osthread();
1421
if (osThread->get_state() == MONITOR_WAIT) {
1422
_result = JVMTI_ERROR_OPAQUE_FRAME;
1423
return;
1424
}
1425
}
1426
1427
Handle ret_ob_h;
1428
_result = JvmtiEnvBase::check_top_frame(current_thread, java_thread, _value, _tos, &ret_ob_h);
1429
if (_result != JVMTI_ERROR_NONE) {
1430
return;
1431
}
1432
assert(_tos != atos || _value.l == NULL || ret_ob_h() != NULL,
1433
"return object oop must not be NULL if jobject is not NULL");
1434
1435
// Update the thread state to reflect that the top frame must be
1436
// forced to return.
1437
// The current frame will be returned later when the suspended
1438
// thread is resumed and right before returning from VM to Java.
1439
// (see call_VM_base() in assembler_<cpu>.cpp).
1440
1441
_state->set_earlyret_pending();
1442
_state->set_earlyret_oop(ret_ob_h());
1443
_state->set_earlyret_value(_value, _tos);
1444
1445
// Set pending step flag for this early return.
1446
// It is cleared when next step event is posted.
1447
_state->set_pending_step_for_earlyret();
1448
}
1449
1450
void
1451
JvmtiMonitorClosure::do_monitor(ObjectMonitor* mon) {
1452
if ( _error != JVMTI_ERROR_NONE) {
1453
// Error occurred in previous iteration so no need to add
1454
// to the list.
1455
return;
1456
}
1457
if (mon->owner() == _java_thread ) {
1458
// Filter out on stack monitors collected during stack walk.
1459
oop obj = mon->object();
1460
bool found = false;
1461
for (int j = 0; j < _owned_monitors_list->length(); j++) {
1462
jobject jobj = ((jvmtiMonitorStackDepthInfo*)_owned_monitors_list->at(j))->monitor;
1463
oop check = JNIHandles::resolve(jobj);
1464
if (check == obj) {
1465
// On stack monitor already collected during the stack walk.
1466
found = true;
1467
break;
1468
}
1469
}
1470
if (found == false) {
1471
// This is off stack monitor (e.g. acquired via jni MonitorEnter).
1472
jvmtiError err;
1473
jvmtiMonitorStackDepthInfo *jmsdi;
1474
err = _env->allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi);
1475
if (err != JVMTI_ERROR_NONE) {
1476
_error = err;
1477
return;
1478
}
1479
Handle hobj(Thread::current(), obj);
1480
jmsdi->monitor = _env->jni_reference(_calling_thread, hobj);
1481
// stack depth is unknown for this monitor.
1482
jmsdi->stack_depth = -1;
1483
_owned_monitors_list->append(jmsdi);
1484
}
1485
}
1486
}
1487
1488
GrowableArray<OopHandle>* JvmtiModuleClosure::_tbl = NULL;
1489
1490
void JvmtiModuleClosure::do_module(ModuleEntry* entry) {
1491
assert_locked_or_safepoint(Module_lock);
1492
OopHandle module = entry->module_handle();
1493
guarantee(module.resolve() != NULL, "module object is NULL");
1494
_tbl->push(module);
1495
}
1496
1497
jvmtiError
1498
JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobject** modules_ptr) {
1499
ResourceMark rm;
1500
MutexLocker mcld(ClassLoaderDataGraph_lock);
1501
MutexLocker ml(Module_lock);
1502
1503
_tbl = new GrowableArray<OopHandle>(77);
1504
if (_tbl == NULL) {
1505
return JVMTI_ERROR_OUT_OF_MEMORY;
1506
}
1507
1508
// Iterate over all the modules loaded to the system.
1509
ClassLoaderDataGraph::modules_do(&do_module);
1510
1511
jint len = _tbl->length();
1512
guarantee(len > 0, "at least one module must be present");
1513
1514
jobject* array = (jobject*)env->jvmtiMalloc((jlong)(len * sizeof(jobject)));
1515
if (array == NULL) {
1516
return JVMTI_ERROR_OUT_OF_MEMORY;
1517
}
1518
for (jint idx = 0; idx < len; idx++) {
1519
array[idx] = JNIHandles::make_local(Thread::current(), _tbl->at(idx).resolve());
1520
}
1521
_tbl = NULL;
1522
*modules_ptr = array;
1523
*module_count_ptr = len;
1524
return JVMTI_ERROR_NONE;
1525
}
1526
1527
void
1528
UpdateForPopTopFrameClosure::doit(Thread *target, bool self) {
1529
Thread* current_thread = Thread::current();
1530
HandleMark hm(current_thread);
1531
JavaThread* java_thread = target->as_Java_thread();
1532
assert(java_thread == _state->get_thread(), "Must be");
1533
1534
if (!self && !java_thread->is_suspended()) {
1535
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
1536
return;
1537
}
1538
1539
// Check to see if a PopFrame was already in progress
1540
if (java_thread->popframe_condition() != JavaThread::popframe_inactive) {
1541
// Probably possible for JVMTI clients to trigger this, but the
1542
// JPDA backend shouldn't allow this to happen
1543
_result = JVMTI_ERROR_INTERNAL;
1544
return;
1545
}
1546
1547
// Was workaround bug
1548
// 4812902: popFrame hangs if the method is waiting at a synchronize
1549
// Catch this condition and return an error to avoid hanging.
1550
// Now JVMTI spec allows an implementation to bail out with an opaque frame error.
1551
OSThread* osThread = java_thread->osthread();
1552
if (osThread->get_state() == MONITOR_WAIT) {
1553
_result = JVMTI_ERROR_OPAQUE_FRAME;
1554
return;
1555
}
1556
1557
ResourceMark rm(current_thread);
1558
// Check if there is more than one Java frame in this thread, that the top two frames
1559
// are Java (not native) frames, and that there is no intervening VM frame
1560
int frame_count = 0;
1561
bool is_interpreted[2];
1562
intptr_t *frame_sp[2];
1563
// The 2-nd arg of constructor is needed to stop iterating at java entry frame.
1564
for (vframeStream vfs(java_thread, true, false /* process_frames */); !vfs.at_end(); vfs.next()) {
1565
methodHandle mh(current_thread, vfs.method());
1566
if (mh->is_native()) {
1567
_result = JVMTI_ERROR_OPAQUE_FRAME;
1568
return;
1569
}
1570
is_interpreted[frame_count] = vfs.is_interpreted_frame();
1571
frame_sp[frame_count] = vfs.frame_id();
1572
if (++frame_count > 1) break;
1573
}
1574
if (frame_count < 2) {
1575
// We haven't found two adjacent non-native Java frames on the top.
1576
// There can be two situations here:
1577
// 1. There are no more java frames
1578
// 2. Two top java frames are separated by non-java native frames
1579
if(JvmtiEnvBase::vframeForNoProcess(java_thread, 1) == NULL) {
1580
_result = JVMTI_ERROR_NO_MORE_FRAMES;
1581
return;
1582
} else {
1583
// Intervening non-java native or VM frames separate java frames.
1584
// Current implementation does not support this. See bug #5031735.
1585
// In theory it is possible to pop frames in such cases.
1586
_result = JVMTI_ERROR_OPAQUE_FRAME;
1587
return;
1588
}
1589
}
1590
1591
// If any of the top 2 frames is a compiled one, need to deoptimize it
1592
for (int i = 0; i < 2; i++) {
1593
if (!is_interpreted[i]) {
1594
Deoptimization::deoptimize_frame(java_thread, frame_sp[i]);
1595
}
1596
}
1597
1598
// Update the thread state to reflect that the top frame is popped
1599
// so that cur_stack_depth is maintained properly and all frameIDs
1600
// are invalidated.
1601
// The current frame will be popped later when the suspended thread
1602
// is resumed and right before returning from VM to Java.
1603
// (see call_VM_base() in assembler_<cpu>.cpp).
1604
1605
// It's fine to update the thread state here because no JVMTI events
1606
// shall be posted for this PopFrame.
1607
1608
if (!java_thread->is_exiting() && java_thread->threadObj() != NULL) {
1609
_state->update_for_pop_top_frame();
1610
java_thread->set_popframe_condition(JavaThread::popframe_pending_bit);
1611
// Set pending step flag for this popframe and it is cleared when next
1612
// step event is posted.
1613
_state->set_pending_step_for_popframe();
1614
_result = JVMTI_ERROR_NONE;
1615
}
1616
}
1617
1618
void
1619
SetFramePopClosure::doit(Thread *target, bool self) {
1620
ResourceMark rm;
1621
JavaThread* java_thread = target->as_Java_thread();
1622
1623
assert(_state->get_thread() == java_thread, "Must be");
1624
1625
if (!self && !java_thread->is_suspended()) {
1626
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
1627
return;
1628
}
1629
1630
vframe *vf = JvmtiEnvBase::vframeForNoProcess(java_thread, _depth);
1631
if (vf == NULL) {
1632
_result = JVMTI_ERROR_NO_MORE_FRAMES;
1633
return;
1634
}
1635
1636
if (!vf->is_java_frame() || ((javaVFrame*) vf)->method()->is_native()) {
1637
_result = JVMTI_ERROR_OPAQUE_FRAME;
1638
return;
1639
}
1640
1641
assert(vf->frame_pointer() != NULL, "frame pointer mustn't be NULL");
1642
if (java_thread->is_exiting() || java_thread->threadObj() == NULL) {
1643
return; /* JVMTI_ERROR_THREAD_NOT_ALIVE (default) */
1644
}
1645
int frame_number = _state->count_frames() - _depth;
1646
_state->env_thread_state((JvmtiEnvBase*)_env)->set_frame_pop(frame_number);
1647
_result = JVMTI_ERROR_NONE;
1648
}
1649
1650
void
1651
GetOwnedMonitorInfoClosure::do_thread(Thread *target) {
1652
JavaThread *jt = target->as_Java_thread();
1653
if (!jt->is_exiting() && (jt->threadObj() != NULL)) {
1654
_result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread,
1655
jt,
1656
_owned_monitors_list);
1657
}
1658
}
1659
1660
void
1661
GetCurrentContendedMonitorClosure::do_thread(Thread *target) {
1662
JavaThread *jt = target->as_Java_thread();
1663
if (!jt->is_exiting() && (jt->threadObj() != NULL)) {
1664
_result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,
1665
jt,
1666
_owned_monitor_ptr);
1667
}
1668
}
1669
1670
void
1671
GetStackTraceClosure::do_thread(Thread *target) {
1672
JavaThread *jt = target->as_Java_thread();
1673
if (!jt->is_exiting() && jt->threadObj() != NULL) {
1674
_result = ((JvmtiEnvBase *)_env)->get_stack_trace(jt,
1675
_start_depth, _max_count,
1676
_frame_buffer, _count_ptr);
1677
}
1678
}
1679
1680
void
1681
GetFrameCountClosure::do_thread(Thread *target) {
1682
JavaThread* jt = _state->get_thread();
1683
assert(target == jt, "just checking");
1684
if (!jt->is_exiting() && jt->threadObj() != NULL) {
1685
_result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
1686
}
1687
}
1688
1689
void
1690
GetFrameLocationClosure::do_thread(Thread *target) {
1691
JavaThread *jt = target->as_Java_thread();
1692
if (!jt->is_exiting() && jt->threadObj() != NULL) {
1693
_result = ((JvmtiEnvBase*)_env)->get_frame_location(jt, _depth,
1694
_method_ptr, _location_ptr);
1695
}
1696
}
1697
1698