Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/services/mallocSiteTable.hpp
41144 views
1
/*
2
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_SERVICES_MALLOCSITETABLE_HPP
26
#define SHARE_SERVICES_MALLOCSITETABLE_HPP
27
28
#include "utilities/macros.hpp"
29
30
#if INCLUDE_NMT
31
32
#include "memory/allocation.hpp"
33
#include "runtime/atomic.hpp"
34
#include "services/allocationSite.hpp"
35
#include "services/mallocTracker.hpp"
36
#include "services/nmtCommon.hpp"
37
#include "utilities/nativeCallStack.hpp"
38
39
// MallocSite represents a code path that eventually calls
40
// os::malloc() to allocate memory
41
class MallocSite : public AllocationSite {
42
MemoryCounter _c;
43
public:
44
MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :
45
AllocationSite(stack, flags) {}
46
47
void allocate(size_t size) { _c.allocate(size); }
48
void deallocate(size_t size) { _c.deallocate(size); }
49
50
// Memory allocated from this code path
51
size_t size() const { return _c.size(); }
52
// The number of calls were made
53
size_t count() const { return _c.count(); }
54
};
55
56
// Malloc site hashtable entry
57
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
58
private:
59
MallocSite _malloc_site;
60
const unsigned int _hash;
61
MallocSiteHashtableEntry* volatile _next;
62
63
public:
64
65
MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):
66
_malloc_site(stack, flags), _hash(stack.calculate_hash()), _next(NULL) {
67
assert(flags != mtNone, "Expect a real memory type");
68
}
69
70
inline const MallocSiteHashtableEntry* next() const {
71
return _next;
72
}
73
74
// Insert an entry atomically.
75
// Return true if the entry is inserted successfully.
76
// The operation can be failed due to contention from other thread.
77
bool atomic_insert(MallocSiteHashtableEntry* entry);
78
79
unsigned int hash() const { return _hash; }
80
81
inline const MallocSite* peek() const { return &_malloc_site; }
82
inline MallocSite* data() { return &_malloc_site; }
83
84
// Allocation/deallocation on this allocation site
85
inline void allocate(size_t size) { _malloc_site.allocate(size); }
86
inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
87
// Memory counters
88
inline size_t size() const { return _malloc_site.size(); }
89
inline size_t count() const { return _malloc_site.count(); }
90
};
91
92
// The walker walks every entry on MallocSiteTable
93
class MallocSiteWalker : public StackObj {
94
public:
95
virtual bool do_malloc_site(const MallocSite* e) { return false; }
96
};
97
98
/*
99
* Native memory tracking call site table.
100
* The table is only needed when detail tracking is enabled.
101
*/
102
class MallocSiteTable : AllStatic {
103
private:
104
// The number of hash bucket in this hashtable. The number should
105
// be tuned if malloc activities changed significantly.
106
// The statistics data can be obtained via Jcmd
107
// jcmd <pid> VM.native_memory statistics.
108
109
// Currently, (number of buckets / number of entires) ratio is
110
// about 1 / 6
111
enum {
112
table_base_size = 128, // The base size is calculated from statistics to give
113
// table ratio around 1:6
114
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
115
};
116
117
118
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
119
// once exclusive access (exclusiveLock) is requested, all shared accesses are
120
// rejected forever.
121
class AccessLock : public StackObj {
122
enum LockState {
123
NoLock,
124
SharedLock,
125
ExclusiveLock
126
};
127
128
private:
129
// A very large negative number. The only possibility to "overflow"
130
// this number is when there are more than -min_jint threads in
131
// this process, which is not going to happen in foreseeable future.
132
const static int _MAGIC_ = min_jint;
133
134
LockState _lock_state;
135
volatile int* _lock;
136
public:
137
AccessLock(volatile int* lock) :
138
_lock_state(NoLock), _lock(lock) {
139
}
140
141
~AccessLock() {
142
if (_lock_state == SharedLock) {
143
Atomic::dec(_lock);
144
}
145
}
146
// Acquire shared lock.
147
// Return true if shared access is granted.
148
inline bool sharedLock() {
149
jint res = Atomic::add(_lock, 1);
150
if (res < 0) {
151
Atomic::dec(_lock);
152
return false;
153
}
154
_lock_state = SharedLock;
155
return true;
156
}
157
// Acquire exclusive lock
158
void exclusiveLock();
159
};
160
161
public:
162
static bool initialize();
163
static void shutdown();
164
165
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
166
167
// Number of hash buckets
168
static inline int hash_buckets() { return (int)table_size; }
169
170
// Access and copy a call stack from this table. Shared lock should be
171
// acquired before access the entry.
172
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
173
size_t pos_idx) {
174
AccessLock locker(&_access_count);
175
if (locker.sharedLock()) {
176
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
177
MallocSite* site = malloc_site(bucket_idx, pos_idx);
178
if (site != NULL) {
179
stack = *site->call_stack();
180
return true;
181
}
182
}
183
return false;
184
}
185
186
// Record a new allocation from specified call path.
187
// Return true if the allocation is recorded successfully, bucket_idx
188
// and pos_idx are also updated to indicate the entry where the allocation
189
// information was recorded.
190
// Return false only occurs under rare scenarios:
191
// 1. out of memory
192
// 2. overflow hash bucket
193
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
194
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
195
AccessLock locker(&_access_count);
196
if (locker.sharedLock()) {
197
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
198
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
199
if (site != NULL) site->allocate(size);
200
return site != NULL;
201
}
202
return false;
203
}
204
205
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
206
// information was recorded.
207
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
208
AccessLock locker(&_access_count);
209
if (locker.sharedLock()) {
210
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
211
MallocSite* site = malloc_site(bucket_idx, pos_idx);
212
if (site != NULL) {
213
site->deallocate(size);
214
return true;
215
}
216
}
217
return false;
218
}
219
220
// Walk this table.
221
static bool walk_malloc_site(MallocSiteWalker* walker);
222
223
static void print_tuning_statistics(outputStream* st);
224
225
private:
226
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
227
static void reset();
228
229
// Delete a bucket linked list
230
static void delete_linked_list(MallocSiteHashtableEntry* head);
231
232
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
233
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
234
static bool walk(MallocSiteWalker* walker);
235
236
static inline unsigned int hash_to_index(unsigned int hash) {
237
return (hash % table_size);
238
}
239
240
static inline const NativeCallStack* hash_entry_allocation_stack() {
241
assert(_hash_entry_allocation_stack != NULL, "Must be set");
242
return _hash_entry_allocation_stack;
243
}
244
245
static inline const MallocSiteHashtableEntry* hash_entry_allocation_site() {
246
assert(_hash_entry_allocation_site != NULL, "Must be set");
247
return _hash_entry_allocation_site;
248
}
249
250
private:
251
// Counter for counting concurrent access
252
static volatile int _access_count;
253
254
// The callsite hashtable. It has to be a static table,
255
// since malloc call can come from C runtime linker.
256
static MallocSiteHashtableEntry* _table[table_size];
257
static const NativeCallStack* _hash_entry_allocation_stack;
258
static const MallocSiteHashtableEntry* _hash_entry_allocation_site;
259
260
261
NOT_PRODUCT(static int _peak_count;)
262
};
263
264
#endif // INCLUDE_NMT
265
#endif // SHARE_SERVICES_MALLOCSITETABLE_HPP
266
267