Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/services/mallocTracker.hpp
41144 views
1
/*
2
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_SERVICES_MALLOCTRACKER_HPP
26
#define SHARE_SERVICES_MALLOCTRACKER_HPP
27
28
#if INCLUDE_NMT
29
30
#include "memory/allocation.hpp"
31
#include "runtime/atomic.hpp"
32
#include "runtime/threadCritical.hpp"
33
#include "services/nmtCommon.hpp"
34
#include "utilities/nativeCallStack.hpp"
35
36
/*
37
* This counter class counts memory allocation and deallocation,
38
* records total memory allocation size and number of allocations.
39
* The counters are updated atomically.
40
*/
41
class MemoryCounter {
42
private:
43
volatile size_t _count;
44
volatile size_t _size;
45
46
DEBUG_ONLY(volatile size_t _peak_count;)
47
DEBUG_ONLY(volatile size_t _peak_size; )
48
49
public:
50
MemoryCounter() : _count(0), _size(0) {
51
DEBUG_ONLY(_peak_count = 0;)
52
DEBUG_ONLY(_peak_size = 0;)
53
}
54
55
inline void allocate(size_t sz) {
56
size_t cnt = Atomic::add(&_count, size_t(1), memory_order_relaxed);
57
if (sz > 0) {
58
size_t sum = Atomic::add(&_size, sz, memory_order_relaxed);
59
DEBUG_ONLY(update_peak_size(sum);)
60
}
61
DEBUG_ONLY(update_peak_count(cnt);)
62
}
63
64
inline void deallocate(size_t sz) {
65
assert(count() > 0, "Nothing allocated yet");
66
assert(size() >= sz, "deallocation > allocated");
67
Atomic::dec(&_count, memory_order_relaxed);
68
if (sz > 0) {
69
Atomic::sub(&_size, sz, memory_order_relaxed);
70
}
71
}
72
73
inline void resize(ssize_t sz) {
74
if (sz != 0) {
75
assert(sz >= 0 || size() >= size_t(-sz), "Must be");
76
size_t sum = Atomic::add(&_size, size_t(sz), memory_order_relaxed);
77
DEBUG_ONLY(update_peak_size(sum);)
78
}
79
}
80
81
inline size_t count() const { return Atomic::load(&_count); }
82
inline size_t size() const { return Atomic::load(&_size); }
83
84
#ifdef ASSERT
85
void update_peak_count(size_t cnt);
86
void update_peak_size(size_t sz);
87
size_t peak_count() const;
88
size_t peak_size() const;
89
#endif // ASSERT
90
};
91
92
/*
93
* Malloc memory used by a particular subsystem.
94
* It includes the memory acquired through os::malloc()
95
* call and arena's backing memory.
96
*/
97
class MallocMemory {
98
private:
99
MemoryCounter _malloc;
100
MemoryCounter _arena;
101
102
public:
103
MallocMemory() { }
104
105
inline void record_malloc(size_t sz) {
106
_malloc.allocate(sz);
107
}
108
109
inline void record_free(size_t sz) {
110
_malloc.deallocate(sz);
111
}
112
113
inline void record_new_arena() {
114
_arena.allocate(0);
115
}
116
117
inline void record_arena_free() {
118
_arena.deallocate(0);
119
}
120
121
inline void record_arena_size_change(ssize_t sz) {
122
_arena.resize(sz);
123
}
124
125
inline size_t malloc_size() const { return _malloc.size(); }
126
inline size_t malloc_count() const { return _malloc.count();}
127
inline size_t arena_size() const { return _arena.size(); }
128
inline size_t arena_count() const { return _arena.count(); }
129
130
DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
131
DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
132
};
133
134
class MallocMemorySummary;
135
136
// A snapshot of malloc'd memory, includes malloc memory
137
// usage by types and memory used by tracking itself.
138
class MallocMemorySnapshot : public ResourceObj {
139
friend class MallocMemorySummary;
140
141
private:
142
MallocMemory _malloc[mt_number_of_types];
143
MemoryCounter _tracking_header;
144
145
146
public:
147
inline MallocMemory* by_type(MEMFLAGS flags) {
148
int index = NMTUtil::flag_to_index(flags);
149
return &_malloc[index];
150
}
151
152
inline MemoryCounter* malloc_overhead() {
153
return &_tracking_header;
154
}
155
156
// Total malloc'd memory amount
157
size_t total() const;
158
// Total malloc'd memory used by arenas
159
size_t total_arena() const;
160
161
inline size_t thread_count() const {
162
MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
163
return s->by_type(mtThreadStack)->malloc_count();
164
}
165
166
void copy_to(MallocMemorySnapshot* s) {
167
// Need to make sure that mtChunks don't get deallocated while the
168
// copy is going on, because their size is adjusted using this
169
// buffer in make_adjustment().
170
ThreadCritical tc;
171
s->_tracking_header = _tracking_header;
172
for (int index = 0; index < mt_number_of_types; index ++) {
173
s->_malloc[index] = _malloc[index];
174
}
175
}
176
177
// Make adjustment by subtracting chunks used by arenas
178
// from total chunks to get total free chunk size
179
void make_adjustment();
180
};
181
182
/*
183
* This class is for collecting malloc statistics at summary level
184
*/
185
class MallocMemorySummary : AllStatic {
186
private:
187
// Reserve memory for placement of MallocMemorySnapshot object
188
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
189
190
public:
191
static void initialize();
192
193
static inline void record_malloc(size_t size, MEMFLAGS flag) {
194
as_snapshot()->by_type(flag)->record_malloc(size);
195
}
196
197
static inline void record_free(size_t size, MEMFLAGS flag) {
198
as_snapshot()->by_type(flag)->record_free(size);
199
}
200
201
static inline void record_new_arena(MEMFLAGS flag) {
202
as_snapshot()->by_type(flag)->record_new_arena();
203
}
204
205
static inline void record_arena_free(MEMFLAGS flag) {
206
as_snapshot()->by_type(flag)->record_arena_free();
207
}
208
209
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) {
210
as_snapshot()->by_type(flag)->record_arena_size_change(size);
211
}
212
213
static void snapshot(MallocMemorySnapshot* s) {
214
as_snapshot()->copy_to(s);
215
s->make_adjustment();
216
}
217
218
// Record memory used by malloc tracking header
219
static inline void record_new_malloc_header(size_t sz) {
220
as_snapshot()->malloc_overhead()->allocate(sz);
221
}
222
223
static inline void record_free_malloc_header(size_t sz) {
224
as_snapshot()->malloc_overhead()->deallocate(sz);
225
}
226
227
// The memory used by malloc tracking headers
228
static inline size_t tracking_overhead() {
229
return as_snapshot()->malloc_overhead()->size();
230
}
231
232
static MallocMemorySnapshot* as_snapshot() {
233
return (MallocMemorySnapshot*)_snapshot;
234
}
235
};
236
237
238
/*
239
* Malloc tracking header.
240
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
241
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
242
*/
243
244
class MallocHeader {
245
#ifdef _LP64
246
size_t _size : 64;
247
size_t _flags : 8;
248
size_t _pos_idx : 16;
249
size_t _bucket_idx: 40;
250
#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
251
#define MAX_BUCKET_LENGTH right_n_bits(16)
252
#else
253
size_t _size : 32;
254
size_t _flags : 8;
255
size_t _pos_idx : 8;
256
size_t _bucket_idx: 16;
257
#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16)
258
#define MAX_BUCKET_LENGTH right_n_bits(8)
259
#endif // _LP64
260
261
public:
262
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
263
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
264
"Wrong header size");
265
266
if (level == NMT_minimal) {
267
return;
268
}
269
270
_flags = NMTUtil::flag_to_index(flags);
271
set_size(size);
272
if (level == NMT_detail) {
273
size_t bucket_idx;
274
size_t pos_idx;
275
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) {
276
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
277
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
278
_bucket_idx = bucket_idx;
279
_pos_idx = pos_idx;
280
}
281
}
282
283
MallocMemorySummary::record_malloc(size, flags);
284
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
285
}
286
287
inline size_t size() const { return _size; }
288
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
289
bool get_stack(NativeCallStack& stack) const;
290
291
// Cleanup tracking information before the memory is released.
292
void release() const;
293
294
private:
295
inline void set_size(size_t size) {
296
_size = size;
297
}
298
bool record_malloc_site(const NativeCallStack& stack, size_t size,
299
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const;
300
};
301
302
303
// Main class called from MemTracker to track malloc activities
304
class MallocTracker : AllStatic {
305
public:
306
// Initialize malloc tracker for specific tracking level
307
static bool initialize(NMT_TrackingLevel level);
308
309
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
310
311
// malloc tracking header size for specific tracking level
312
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
313
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
314
}
315
316
// Parameter name convention:
317
// memblock : the beginning address for user data
318
// malloc_base: the beginning address that includes malloc tracking header
319
//
320
// The relationship:
321
// memblock = (char*)malloc_base + sizeof(nmt header)
322
//
323
324
// Record malloc on specified memory block
325
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
326
const NativeCallStack& stack, NMT_TrackingLevel level);
327
328
// Record free on specified memory block
329
static void* record_free(void* memblock);
330
331
// Offset memory address to header address
332
static inline void* get_base(void* memblock);
333
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
334
if (memblock == NULL || level == NMT_off) return memblock;
335
return (char*)memblock - malloc_header_size(level);
336
}
337
338
// Get memory size
339
static inline size_t get_size(void* memblock) {
340
MallocHeader* header = malloc_header(memblock);
341
return header->size();
342
}
343
344
// Get memory type
345
static inline MEMFLAGS get_flags(void* memblock) {
346
MallocHeader* header = malloc_header(memblock);
347
return header->flags();
348
}
349
350
// Get header size
351
static inline size_t get_header_size(void* memblock) {
352
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
353
}
354
355
static inline void record_new_arena(MEMFLAGS flags) {
356
MallocMemorySummary::record_new_arena(flags);
357
}
358
359
static inline void record_arena_free(MEMFLAGS flags) {
360
MallocMemorySummary::record_arena_free(flags);
361
}
362
363
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) {
364
MallocMemorySummary::record_arena_size_change(size, flags);
365
}
366
private:
367
static inline MallocHeader* malloc_header(void *memblock) {
368
assert(memblock != NULL, "NULL pointer");
369
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
370
return header;
371
}
372
};
373
374
#endif // INCLUDE_NMT
375
376
377
#endif // SHARE_SERVICES_MALLOCTRACKER_HPP
378
379