Path: blob/master/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
41155 views
/*1* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/shared/collectedHeap.hpp"26#include "gc/shared/oopStorage.hpp"27#include "gc/shared/oopStorageSet.hpp"28#include "jfr/jfrEvents.hpp"29#include "jfr/leakprofiler/sampling/objectSample.hpp"30#include "jfr/leakprofiler/sampling/objectSampler.hpp"31#include "jfr/leakprofiler/sampling/sampleList.hpp"32#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"33#include "jfr/recorder/jfrEventSetting.inline.hpp"34#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"35#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"36#include "jfr/support/jfrThreadLocal.hpp"37#include "jfr/utilities/jfrTime.hpp"38#include "jfr/utilities/jfrTryLock.hpp"39#include "logging/log.hpp"40#include "memory/universe.hpp"41#include "oops/oop.inline.hpp"42#include "runtime/atomic.hpp"43#include "runtime/orderAccess.hpp"44#include "runtime/safepoint.hpp"45#include "runtime/thread.hpp"4647// Timestamp of when the gc last processed the set of sampled objects.48// Atomic access to prevent word tearing on 32-bit platforms.49static volatile int64_t _last_sweep;5051// Condition variable to communicate that some sampled objects have been cleared by the gc52// and can therefore be removed from the sample priority queue.53static bool volatile _dead_samples = false;5455// The OopStorage instance is used to hold weak references to sampled objects.56// It is constructed and registered during VM initialization. This is a singleton57// that persist independent of the state of the ObjectSampler.58static OopStorage* _oop_storage = NULL;5960OopStorage* ObjectSampler::oop_storage() { return _oop_storage; }6162// Callback invoked by the GC after an iteration over the oop storage63// that may have cleared dead referents. num_dead is the number of entries64// already NULL or cleared by the iteration.65void ObjectSampler::oop_storage_gc_notification(size_t num_dead) {66if (num_dead != 0) {67// The ObjectSampler instance may have already been cleaned or a new68// instance was created concurrently. This allows for a small race where cleaning69// could be done again.70Atomic::store(&_dead_samples, true);71Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());72}73}7475bool ObjectSampler::create_oop_storage() {76_oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples", mtTracing);77assert(_oop_storage != NULL, "invariant");78_oop_storage->register_num_dead_callback(&oop_storage_gc_notification);79return true;80}8182static ObjectSampler* _instance = NULL;8384static ObjectSampler& instance() {85assert(_instance != NULL, "invariant");86return *_instance;87}8889ObjectSampler::ObjectSampler(size_t size) :90_priority_queue(new SamplePriorityQueue(size)),91_list(new SampleList(size)),92_total_allocated(0),93_threshold(0),94_size(size) {95Atomic::store(&_dead_samples, false);96Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());97}9899ObjectSampler::~ObjectSampler() {100delete _priority_queue;101_priority_queue = NULL;102delete _list;103_list = NULL;104}105106bool ObjectSampler::create(size_t size) {107assert(SafepointSynchronize::is_at_safepoint(), "invariant");108assert(_oop_storage != NULL, "should be already created");109assert(_instance == NULL, "invariant");110_instance = new ObjectSampler(size);111return _instance != NULL;112}113114bool ObjectSampler::is_created() {115return _instance != NULL;116}117118ObjectSampler* ObjectSampler::sampler() {119assert(is_created(), "invariant");120return _instance;121}122123void ObjectSampler::destroy() {124assert(SafepointSynchronize::is_at_safepoint(), "invariant");125if (_instance != NULL) {126ObjectSampler* const sampler = _instance;127_instance = NULL;128delete sampler;129}130}131132static volatile int _lock = 0;133134ObjectSampler* ObjectSampler::acquire() {135while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}136return _instance;137}138139void ObjectSampler::release() {140OrderAccess::fence();141_lock = 0;142}143144static traceid get_thread_id(JavaThread* thread) {145assert(thread != NULL, "invariant");146if (thread->threadObj() == NULL) {147return 0;148}149const JfrThreadLocal* const tl = thread->jfr_thread_local();150assert(tl != NULL, "invariant");151if (tl->is_excluded()) {152return 0;153}154if (!tl->has_thread_blob()) {155JfrCheckpointManager::create_thread_blob(thread);156}157assert(tl->has_thread_blob(), "invariant");158return tl->thread_id();159}160161class RecordStackTrace {162private:163JavaThread* _jt;164bool _enabled;165public:166RecordStackTrace(JavaThread* jt) : _jt(jt),167_enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {168if (_enabled) {169JfrStackTraceRepository::record_for_leak_profiler(jt);170}171}172~RecordStackTrace() {173if (_enabled) {174_jt->jfr_thread_local()->clear_cached_stack_trace();175}176}177};178179void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {180assert(thread != NULL, "invariant");181assert(is_created(), "invariant");182const traceid thread_id = get_thread_id(thread);183if (thread_id == 0) {184return;185}186RecordStackTrace rst(thread);187// try enter critical section188JfrTryLock tryLock(&_lock);189if (!tryLock.acquired()) {190log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");191return;192}193instance().add(obj, allocated, thread_id, thread);194}195196void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {197assert(obj != NULL, "invariant");198assert(thread_id != 0, "invariant");199assert(thread != NULL, "invariant");200assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");201202if (Atomic::load(&_dead_samples)) {203// There's a small race where a GC scan might reset this to true, potentially204// causing a back-to-back scavenge.205Atomic::store(&_dead_samples, false);206scavenge();207}208209_total_allocated += allocated;210const size_t span = _total_allocated - _priority_queue->total();211ObjectSample* sample;212if ((size_t)_priority_queue->count() == _size) {213assert(_list->count() == _size, "invariant");214const ObjectSample* peek = _priority_queue->peek();215if (peek->span() > span) {216// quick reject, will not fit217return;218}219sample = _list->reuse(_priority_queue->pop());220} else {221sample = _list->get();222}223224assert(sample != NULL, "invariant");225sample->set_thread_id(thread_id);226227const JfrThreadLocal* const tl = thread->jfr_thread_local();228sample->set_thread(tl->thread_blob());229230const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();231if (stacktrace_hash != 0) {232sample->set_stack_trace_id(tl->cached_stack_trace_id());233sample->set_stack_trace_hash(stacktrace_hash);234}235236sample->set_span(allocated);237sample->set_object(cast_to_oop(obj));238sample->set_allocated(allocated);239sample->set_allocation_time(JfrTicks::now());240sample->set_heap_used_at_last_gc(Universe::heap()->used_at_last_gc());241_priority_queue->push(sample);242}243244void ObjectSampler::scavenge() {245ObjectSample* current = _list->last();246while (current != NULL) {247ObjectSample* next = current->next();248if (current->is_dead()) {249remove_dead(current);250}251current = next;252}253}254255void ObjectSampler::remove_dead(ObjectSample* sample) {256assert(sample != NULL, "invariant");257assert(sample->is_dead(), "invariant");258sample->release();259260ObjectSample* const previous = sample->prev();261// push span onto previous262if (previous != NULL) {263_priority_queue->remove(previous);264previous->add_span(sample->span());265_priority_queue->push(previous);266}267_priority_queue->remove(sample);268_list->release(sample);269}270271ObjectSample* ObjectSampler::last() const {272return _list->last();273}274275const ObjectSample* ObjectSampler::first() const {276return _list->first();277}278279const ObjectSample* ObjectSampler::last_resolved() const {280return _list->last_resolved();281}282283void ObjectSampler::set_last_resolved(const ObjectSample* sample) {284_list->set_last_resolved(sample);285}286287int ObjectSampler::item_count() const {288return _priority_queue->count();289}290291const ObjectSample* ObjectSampler::item_at(int index) const {292return _priority_queue->item_at(index);293}294295ObjectSample* ObjectSampler::item_at(int index) {296return const_cast<ObjectSample*>(297const_cast<const ObjectSampler*>(this)->item_at(index)298);299}300301int64_t ObjectSampler::last_sweep() {302return Atomic::load(&_last_sweep);303}304305306