Path: blob/master/src/hotspot/share/services/mallocSiteTable.hpp
41144 views
/*1* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_SERVICES_MALLOCSITETABLE_HPP25#define SHARE_SERVICES_MALLOCSITETABLE_HPP2627#include "utilities/macros.hpp"2829#if INCLUDE_NMT3031#include "memory/allocation.hpp"32#include "runtime/atomic.hpp"33#include "services/allocationSite.hpp"34#include "services/mallocTracker.hpp"35#include "services/nmtCommon.hpp"36#include "utilities/nativeCallStack.hpp"3738// MallocSite represents a code path that eventually calls39// os::malloc() to allocate memory40class MallocSite : public AllocationSite {41MemoryCounter _c;42public:43MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :44AllocationSite(stack, flags) {}4546void allocate(size_t size) { _c.allocate(size); }47void deallocate(size_t size) { _c.deallocate(size); }4849// Memory allocated from this code path50size_t size() const { return _c.size(); }51// The number of calls were made52size_t count() const { return _c.count(); }53};5455// Malloc site hashtable entry56class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {57private:58MallocSite _malloc_site;59const unsigned int _hash;60MallocSiteHashtableEntry* volatile _next;6162public:6364MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):65_malloc_site(stack, flags), _hash(stack.calculate_hash()), _next(NULL) {66assert(flags != mtNone, "Expect a real memory type");67}6869inline const MallocSiteHashtableEntry* next() const {70return _next;71}7273// Insert an entry atomically.74// Return true if the entry is inserted successfully.75// The operation can be failed due to contention from other thread.76bool atomic_insert(MallocSiteHashtableEntry* entry);7778unsigned int hash() const { return _hash; }7980inline const MallocSite* peek() const { return &_malloc_site; }81inline MallocSite* data() { return &_malloc_site; }8283// Allocation/deallocation on this allocation site84inline void allocate(size_t size) { _malloc_site.allocate(size); }85inline void deallocate(size_t size) { _malloc_site.deallocate(size); }86// Memory counters87inline size_t size() const { return _malloc_site.size(); }88inline size_t count() const { return _malloc_site.count(); }89};9091// The walker walks every entry on MallocSiteTable92class MallocSiteWalker : public StackObj {93public:94virtual bool do_malloc_site(const MallocSite* e) { return false; }95};9697/*98* Native memory tracking call site table.99* The table is only needed when detail tracking is enabled.100*/101class MallocSiteTable : AllStatic {102private:103// The number of hash bucket in this hashtable. The number should104// be tuned if malloc activities changed significantly.105// The statistics data can be obtained via Jcmd106// jcmd <pid> VM.native_memory statistics.107108// Currently, (number of buckets / number of entires) ratio is109// about 1 / 6110enum {111table_base_size = 128, // The base size is calculated from statistics to give112// table ratio around 1:6113table_size = (table_base_size * NMT_TrackingStackDepth - 1)114};115116117// This is a very special lock, that allows multiple shared accesses (sharedLock), but118// once exclusive access (exclusiveLock) is requested, all shared accesses are119// rejected forever.120class AccessLock : public StackObj {121enum LockState {122NoLock,123SharedLock,124ExclusiveLock125};126127private:128// A very large negative number. The only possibility to "overflow"129// this number is when there are more than -min_jint threads in130// this process, which is not going to happen in foreseeable future.131const static int _MAGIC_ = min_jint;132133LockState _lock_state;134volatile int* _lock;135public:136AccessLock(volatile int* lock) :137_lock_state(NoLock), _lock(lock) {138}139140~AccessLock() {141if (_lock_state == SharedLock) {142Atomic::dec(_lock);143}144}145// Acquire shared lock.146// Return true if shared access is granted.147inline bool sharedLock() {148jint res = Atomic::add(_lock, 1);149if (res < 0) {150Atomic::dec(_lock);151return false;152}153_lock_state = SharedLock;154return true;155}156// Acquire exclusive lock157void exclusiveLock();158};159160public:161static bool initialize();162static void shutdown();163164NOT_PRODUCT(static int access_peak_count() { return _peak_count; })165166// Number of hash buckets167static inline int hash_buckets() { return (int)table_size; }168169// Access and copy a call stack from this table. Shared lock should be170// acquired before access the entry.171static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,172size_t pos_idx) {173AccessLock locker(&_access_count);174if (locker.sharedLock()) {175NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)176MallocSite* site = malloc_site(bucket_idx, pos_idx);177if (site != NULL) {178stack = *site->call_stack();179return true;180}181}182return false;183}184185// Record a new allocation from specified call path.186// Return true if the allocation is recorded successfully, bucket_idx187// and pos_idx are also updated to indicate the entry where the allocation188// information was recorded.189// Return false only occurs under rare scenarios:190// 1. out of memory191// 2. overflow hash bucket192static inline bool allocation_at(const NativeCallStack& stack, size_t size,193size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {194AccessLock locker(&_access_count);195if (locker.sharedLock()) {196NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)197MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);198if (site != NULL) site->allocate(size);199return site != NULL;200}201return false;202}203204// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation205// information was recorded.206static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {207AccessLock locker(&_access_count);208if (locker.sharedLock()) {209NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)210MallocSite* site = malloc_site(bucket_idx, pos_idx);211if (site != NULL) {212site->deallocate(size);213return true;214}215}216return false;217}218219// Walk this table.220static bool walk_malloc_site(MallocSiteWalker* walker);221222static void print_tuning_statistics(outputStream* st);223224private:225static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);226static void reset();227228// Delete a bucket linked list229static void delete_linked_list(MallocSiteHashtableEntry* head);230231static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);232static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);233static bool walk(MallocSiteWalker* walker);234235static inline unsigned int hash_to_index(unsigned int hash) {236return (hash % table_size);237}238239static inline const NativeCallStack* hash_entry_allocation_stack() {240assert(_hash_entry_allocation_stack != NULL, "Must be set");241return _hash_entry_allocation_stack;242}243244static inline const MallocSiteHashtableEntry* hash_entry_allocation_site() {245assert(_hash_entry_allocation_site != NULL, "Must be set");246return _hash_entry_allocation_site;247}248249private:250// Counter for counting concurrent access251static volatile int _access_count;252253// The callsite hashtable. It has to be a static table,254// since malloc call can come from C runtime linker.255static MallocSiteHashtableEntry* _table[table_size];256static const NativeCallStack* _hash_entry_allocation_stack;257static const MallocSiteHashtableEntry* _hash_entry_allocation_site;258259260NOT_PRODUCT(static int _peak_count;)261};262263#endif // INCLUDE_NMT264#endif // SHARE_SERVICES_MALLOCSITETABLE_HPP265266267