Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/parallel/psOldGen.cpp
41149 views
1
/*
2
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/parallel/objectStartArray.inline.hpp"
27
#include "gc/parallel/parallelArguments.hpp"
28
#include "gc/parallel/parallelScavengeHeap.hpp"
29
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
30
#include "gc/parallel/psCardTable.hpp"
31
#include "gc/parallel/psOldGen.hpp"
32
#include "gc/shared/cardTableBarrierSet.hpp"
33
#include "gc/shared/gcLocker.hpp"
34
#include "gc/shared/spaceDecorator.inline.hpp"
35
#include "logging/log.hpp"
36
#include "oops/oop.inline.hpp"
37
#include "runtime/java.hpp"
38
#include "utilities/align.hpp"
39
40
PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,
41
size_t max_size, const char* perf_data_name, int level):
42
_min_gen_size(min_size),
43
_max_gen_size(max_size)
44
{
45
initialize(rs, initial_size, GenAlignment, perf_data_name, level);
46
}
47
48
void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment,
49
const char* perf_data_name, int level) {
50
initialize_virtual_space(rs, initial_size, alignment);
51
initialize_work(perf_data_name, level);
52
53
// The old gen can grow to max_gen_size(). _reserve reflects only
54
// the current maximum that can be committed.
55
assert(_reserved.byte_size() <= max_gen_size(), "Consistency check");
56
57
initialize_performance_counters(perf_data_name, level);
58
}
59
60
void PSOldGen::initialize_virtual_space(ReservedSpace rs,
61
size_t initial_size,
62
size_t alignment) {
63
64
_virtual_space = new PSVirtualSpace(rs, alignment);
65
if (!_virtual_space->expand_by(initial_size)) {
66
vm_exit_during_initialization("Could not reserve enough space for "
67
"object heap");
68
}
69
}
70
71
void PSOldGen::initialize_work(const char* perf_data_name, int level) {
72
//
73
// Basic memory initialization
74
//
75
76
MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
77
heap_word_size(max_gen_size()));
78
assert(limit_reserved.byte_size() == max_gen_size(),
79
"word vs bytes confusion");
80
//
81
// Object start stuff
82
//
83
84
start_array()->initialize(limit_reserved);
85
86
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
87
(HeapWord*)virtual_space()->high_boundary());
88
89
//
90
// Card table stuff
91
//
92
93
MemRegion cmr((HeapWord*)virtual_space()->low(),
94
(HeapWord*)virtual_space()->high());
95
if (ZapUnusedHeapArea) {
96
// Mangle newly committed space immediately rather than
97
// waiting for the initialization of the space even though
98
// mangling is related to spaces. Doing it here eliminates
99
// the need to carry along information that a complete mangling
100
// (bottom to end) needs to be done.
101
SpaceMangler::mangle_region(cmr);
102
}
103
104
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
105
PSCardTable* ct = heap->card_table();
106
ct->resize_covered_region(cmr);
107
108
// Verify that the start and end of this generation is the start of a card.
109
// If this wasn't true, a single card could span more than one generation,
110
// which would cause problems when we commit/uncommit memory, and when we
111
// clear and dirty cards.
112
guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
113
if (_reserved.end() != heap->reserved_region().end()) {
114
// Don't check at the very end of the heap as we'll assert that we're probing off
115
// the end if we try.
116
guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
117
}
118
119
//
120
// ObjectSpace stuff
121
//
122
123
_object_space = new MutableSpace(virtual_space()->alignment());
124
object_space()->initialize(cmr,
125
SpaceDecorator::Clear,
126
SpaceDecorator::Mangle,
127
MutableSpace::SetupPages,
128
&ParallelScavengeHeap::heap()->workers());
129
130
// Update the start_array
131
start_array()->set_covered_region(cmr);
132
}
133
134
void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
135
// Generation Counters, generation 'level', 1 subspace
136
_gen_counters = new PSGenerationCounters(perf_data_name, level, 1, min_gen_size(),
137
max_gen_size(), virtual_space());
138
_space_counters = new SpaceCounters(perf_data_name, 0,
139
virtual_space()->reserved_size(),
140
_object_space, _gen_counters);
141
}
142
143
// Assume that the generation has been allocated if its
144
// reserved size is not 0.
145
bool PSOldGen::is_allocated() {
146
return virtual_space()->reserved_size() != 0;
147
}
148
149
size_t PSOldGen::num_iterable_blocks() const {
150
return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;
151
}
152
153
void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
154
size_t block_word_size = IterateBlockSize / HeapWordSize;
155
assert((block_word_size % (ObjectStartArray::block_size)) == 0,
156
"Block size not a multiple of start_array block");
157
158
MutableSpace *space = object_space();
159
160
HeapWord* begin = space->bottom() + block_index * block_word_size;
161
HeapWord* end = MIN2(space->top(), begin + block_word_size);
162
163
if (!start_array()->object_starts_in_range(begin, end)) {
164
return;
165
}
166
167
// Get object starting at or reaching into this block.
168
HeapWord* start = start_array()->object_start(begin);
169
if (start < begin) {
170
start += cast_to_oop(start)->size();
171
}
172
assert(start >= begin,
173
"Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT,
174
p2i(start), p2i(begin));
175
// Iterate all objects until the end.
176
for (HeapWord* p = start; p < end; p += cast_to_oop(p)->size()) {
177
cl->do_object(cast_to_oop(p));
178
}
179
}
180
181
bool PSOldGen::expand_for_allocate(size_t word_size) {
182
assert(word_size > 0, "allocating zero words?");
183
bool result = true;
184
{
185
MutexLocker x(ExpandHeap_lock);
186
// Avoid "expand storms" by rechecking available space after obtaining
187
// the lock, because another thread may have already made sufficient
188
// space available. If insufficient space available, that will remain
189
// true until we expand, since we have the lock. Other threads may take
190
// the space we need before we can allocate it, regardless of whether we
191
// expand. That's okay, we'll just try expanding again.
192
if (object_space()->needs_expand(word_size)) {
193
result = expand(word_size*HeapWordSize);
194
}
195
}
196
if (GCExpandToAllocateDelayMillis > 0) {
197
os::naked_sleep(GCExpandToAllocateDelayMillis);
198
}
199
return result;
200
}
201
202
bool PSOldGen::expand(size_t bytes) {
203
assert_lock_strong(ExpandHeap_lock);
204
assert_locked_or_safepoint(Heap_lock);
205
assert(bytes > 0, "precondition");
206
const size_t alignment = virtual_space()->alignment();
207
size_t aligned_bytes = align_up(bytes, alignment);
208
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
209
210
if (UseNUMA) {
211
// With NUMA we use round-robin page allocation for the old gen. Expand by at least
212
// providing a page per lgroup. Alignment is larger or equal to the page size.
213
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
214
}
215
if (aligned_bytes == 0) {
216
// The alignment caused the number of bytes to wrap. A call to expand
217
// implies a best effort to expand by "bytes" but not a guarantee. Align
218
// down to give a best effort. This is likely the most that the generation
219
// can expand since it has some capacity to start with.
220
aligned_bytes = align_down(bytes, alignment);
221
}
222
223
bool success = false;
224
if (aligned_expand_bytes > aligned_bytes) {
225
success = expand_by(aligned_expand_bytes);
226
}
227
if (!success) {
228
success = expand_by(aligned_bytes);
229
}
230
if (!success) {
231
success = expand_to_reserved();
232
}
233
234
if (success && GCLocker::is_active_and_needs_gc()) {
235
log_debug(gc)("Garbage collection disabled, expanded heap instead");
236
}
237
return success;
238
}
239
240
bool PSOldGen::expand_by(size_t bytes) {
241
assert_lock_strong(ExpandHeap_lock);
242
assert_locked_or_safepoint(Heap_lock);
243
assert(bytes > 0, "precondition");
244
bool result = virtual_space()->expand_by(bytes);
245
if (result) {
246
if (ZapUnusedHeapArea) {
247
// We need to mangle the newly expanded area. The memregion spans
248
// end -> new_end, we assume that top -> end is already mangled.
249
// Do the mangling before post_resize() is called because
250
// the space is available for allocation after post_resize();
251
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
252
assert(object_space()->end() < virtual_space_high,
253
"Should be true before post_resize()");
254
MemRegion mangle_region(object_space()->end(), virtual_space_high);
255
// Note that the object space has not yet been updated to
256
// coincide with the new underlying virtual space.
257
SpaceMangler::mangle_region(mangle_region);
258
}
259
post_resize();
260
if (UsePerfData) {
261
_space_counters->update_capacity();
262
_gen_counters->update_all();
263
}
264
}
265
266
if (result) {
267
size_t new_mem_size = virtual_space()->committed_size();
268
size_t old_mem_size = new_mem_size - bytes;
269
log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
270
name(), old_mem_size/K, bytes/K, new_mem_size/K);
271
}
272
273
return result;
274
}
275
276
bool PSOldGen::expand_to_reserved() {
277
assert_lock_strong(ExpandHeap_lock);
278
assert_locked_or_safepoint(Heap_lock);
279
280
bool result = false;
281
const size_t remaining_bytes = virtual_space()->uncommitted_size();
282
if (remaining_bytes > 0) {
283
result = expand_by(remaining_bytes);
284
DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
285
}
286
return result;
287
}
288
289
void PSOldGen::shrink(size_t bytes) {
290
assert_lock_strong(ExpandHeap_lock);
291
assert_locked_or_safepoint(Heap_lock);
292
293
size_t size = align_down(bytes, virtual_space()->alignment());
294
if (size > 0) {
295
assert_lock_strong(ExpandHeap_lock);
296
virtual_space()->shrink_by(bytes);
297
post_resize();
298
299
size_t new_mem_size = virtual_space()->committed_size();
300
size_t old_mem_size = new_mem_size + bytes;
301
log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
302
name(), old_mem_size/K, bytes/K, new_mem_size/K);
303
}
304
}
305
306
void PSOldGen::resize(size_t desired_free_space) {
307
const size_t alignment = virtual_space()->alignment();
308
const size_t size_before = virtual_space()->committed_size();
309
size_t new_size = used_in_bytes() + desired_free_space;
310
if (new_size < used_in_bytes()) {
311
// Overflowed the addition.
312
new_size = max_gen_size();
313
}
314
// Adjust according to our min and max
315
new_size = clamp(new_size, min_gen_size(), max_gen_size());
316
317
assert(max_gen_size() >= reserved().byte_size(), "max new size problem?");
318
new_size = align_up(new_size, alignment);
319
320
const size_t current_size = capacity_in_bytes();
321
322
log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
323
"desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
324
" new size: " SIZE_FORMAT " current size " SIZE_FORMAT
325
" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
326
desired_free_space, used_in_bytes(), new_size, current_size,
327
max_gen_size(), min_gen_size());
328
329
if (new_size == current_size) {
330
// No change requested
331
return;
332
}
333
if (new_size > current_size) {
334
size_t change_bytes = new_size - current_size;
335
MutexLocker x(ExpandHeap_lock);
336
expand(change_bytes);
337
} else {
338
size_t change_bytes = current_size - new_size;
339
MutexLocker x(ExpandHeap_lock);
340
shrink(change_bytes);
341
}
342
343
log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
344
ParallelScavengeHeap::heap()->total_collections(),
345
size_before,
346
virtual_space()->committed_size());
347
}
348
349
// NOTE! We need to be careful about resizing. During a GC, multiple
350
// allocators may be active during heap expansion. If we allow the
351
// heap resizing to become visible before we have correctly resized
352
// all heap related data structures, we may cause program failures.
353
void PSOldGen::post_resize() {
354
// First construct a memregion representing the new size
355
MemRegion new_memregion((HeapWord*)virtual_space()->low(),
356
(HeapWord*)virtual_space()->high());
357
size_t new_word_size = new_memregion.word_size();
358
359
start_array()->set_covered_region(new_memregion);
360
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
361
362
WorkGang* workers = Thread::current()->is_VM_thread() ?
363
&ParallelScavengeHeap::heap()->workers() : NULL;
364
365
// The update of the space's end is done by this call. As that
366
// makes the new space available for concurrent allocation, this
367
// must be the last step when expanding.
368
object_space()->initialize(new_memregion,
369
SpaceDecorator::DontClear,
370
SpaceDecorator::DontMangle,
371
MutableSpace::SetupPages,
372
workers);
373
374
assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
375
"Sanity");
376
}
377
378
void PSOldGen::print() const { print_on(tty);}
379
void PSOldGen::print_on(outputStream* st) const {
380
st->print(" %-15s", name());
381
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
382
capacity_in_bytes()/K, used_in_bytes()/K);
383
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
384
p2i(virtual_space()->low_boundary()),
385
p2i(virtual_space()->high()),
386
p2i(virtual_space()->high_boundary()));
387
388
st->print(" object"); object_space()->print_on(st);
389
}
390
391
void PSOldGen::update_counters() {
392
if (UsePerfData) {
393
_space_counters->update_all();
394
_gen_counters->update_all();
395
}
396
}
397
398
void PSOldGen::verify() {
399
object_space()->verify();
400
}
401
402
class VerifyObjectStartArrayClosure : public ObjectClosure {
403
PSOldGen* _old_gen;
404
ObjectStartArray* _start_array;
405
406
public:
407
VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
408
_old_gen(old_gen), _start_array(start_array) { }
409
410
virtual void do_object(oop obj) {
411
HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1;
412
guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object");
413
guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation");
414
}
415
};
416
417
void PSOldGen::verify_object_start_array() {
418
VerifyObjectStartArrayClosure check( this, &_start_array );
419
object_iterate(&check);
420
}
421
422
#ifndef PRODUCT
423
void PSOldGen::record_spaces_top() {
424
assert(ZapUnusedHeapArea, "Not mangling unused space");
425
object_space()->set_top_for_allocations();
426
}
427
#endif
428
429