Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/parallel/psParallelCompact.cpp
41149 views
1
/*
2
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/classLoaderDataGraph.hpp"
27
#include "classfile/javaClasses.inline.hpp"
28
#include "classfile/stringTable.hpp"
29
#include "classfile/symbolTable.hpp"
30
#include "classfile/systemDictionary.hpp"
31
#include "code/codeCache.hpp"
32
#include "compiler/oopMap.hpp"
33
#include "gc/parallel/parallelArguments.hpp"
34
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
35
#include "gc/parallel/parMarkBitMap.inline.hpp"
36
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
37
#include "gc/parallel/psCompactionManager.inline.hpp"
38
#include "gc/parallel/psOldGen.hpp"
39
#include "gc/parallel/psParallelCompact.inline.hpp"
40
#include "gc/parallel/psPromotionManager.inline.hpp"
41
#include "gc/parallel/psRootType.hpp"
42
#include "gc/parallel/psScavenge.hpp"
43
#include "gc/parallel/psYoungGen.hpp"
44
#include "gc/shared/gcCause.hpp"
45
#include "gc/shared/gcHeapSummary.hpp"
46
#include "gc/shared/gcId.hpp"
47
#include "gc/shared/gcLocker.hpp"
48
#include "gc/shared/gcTimer.hpp"
49
#include "gc/shared/gcTrace.hpp"
50
#include "gc/shared/gcTraceTime.inline.hpp"
51
#include "gc/shared/isGCActiveMark.hpp"
52
#include "gc/shared/oopStorage.inline.hpp"
53
#include "gc/shared/oopStorageSet.inline.hpp"
54
#include "gc/shared/oopStorageSetParState.inline.hpp"
55
#include "gc/shared/referencePolicy.hpp"
56
#include "gc/shared/referenceProcessor.hpp"
57
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
58
#include "gc/shared/spaceDecorator.inline.hpp"
59
#include "gc/shared/taskTerminator.hpp"
60
#include "gc/shared/weakProcessor.inline.hpp"
61
#include "gc/shared/workerPolicy.hpp"
62
#include "gc/shared/workgroup.hpp"
63
#include "logging/log.hpp"
64
#include "memory/iterator.inline.hpp"
65
#include "memory/metaspaceUtils.hpp"
66
#include "memory/resourceArea.hpp"
67
#include "memory/universe.hpp"
68
#include "oops/access.inline.hpp"
69
#include "oops/instanceClassLoaderKlass.inline.hpp"
70
#include "oops/instanceKlass.inline.hpp"
71
#include "oops/instanceMirrorKlass.inline.hpp"
72
#include "oops/methodData.hpp"
73
#include "oops/objArrayKlass.inline.hpp"
74
#include "oops/oop.inline.hpp"
75
#include "runtime/atomic.hpp"
76
#include "runtime/handles.inline.hpp"
77
#include "runtime/java.hpp"
78
#include "runtime/safepoint.hpp"
79
#include "runtime/vmThread.hpp"
80
#include "services/memTracker.hpp"
81
#include "services/memoryService.hpp"
82
#include "utilities/align.hpp"
83
#include "utilities/debug.hpp"
84
#include "utilities/events.hpp"
85
#include "utilities/formatBuffer.hpp"
86
#include "utilities/macros.hpp"
87
#include "utilities/stack.inline.hpp"
88
#if INCLUDE_JVMCI
89
#include "jvmci/jvmci.hpp"
90
#endif
91
92
#include <math.h>
93
94
// All sizes are in HeapWords.
95
const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
96
const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
97
const size_t ParallelCompactData::RegionSizeBytes =
98
RegionSize << LogHeapWordSize;
99
const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
100
const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
101
const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
102
103
const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
104
const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
105
const size_t ParallelCompactData::BlockSizeBytes =
106
BlockSize << LogHeapWordSize;
107
const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
108
const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
109
const size_t ParallelCompactData::BlockAddrMask = ~BlockAddrOffsetMask;
110
111
const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
112
const size_t ParallelCompactData::Log2BlocksPerRegion =
113
Log2RegionSize - Log2BlockSize;
114
115
const ParallelCompactData::RegionData::region_sz_t
116
ParallelCompactData::RegionData::dc_shift = 27;
117
118
const ParallelCompactData::RegionData::region_sz_t
119
ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
120
121
const ParallelCompactData::RegionData::region_sz_t
122
ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
123
124
const ParallelCompactData::RegionData::region_sz_t
125
ParallelCompactData::RegionData::los_mask = ~dc_mask;
126
127
const ParallelCompactData::RegionData::region_sz_t
128
ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
129
130
const ParallelCompactData::RegionData::region_sz_t
131
ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
132
133
SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
134
135
SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
136
ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
137
138
double PSParallelCompact::_dwl_mean;
139
double PSParallelCompact::_dwl_std_dev;
140
double PSParallelCompact::_dwl_first_term;
141
double PSParallelCompact::_dwl_adjustment;
142
#ifdef ASSERT
143
bool PSParallelCompact::_dwl_initialized = false;
144
#endif // #ifdef ASSERT
145
146
void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
147
HeapWord* destination)
148
{
149
assert(src_region_idx != 0, "invalid src_region_idx");
150
assert(partial_obj_size != 0, "invalid partial_obj_size argument");
151
assert(destination != NULL, "invalid destination argument");
152
153
_src_region_idx = src_region_idx;
154
_partial_obj_size = partial_obj_size;
155
_destination = destination;
156
157
// These fields may not be updated below, so make sure they're clear.
158
assert(_dest_region_addr == NULL, "should have been cleared");
159
assert(_first_src_addr == NULL, "should have been cleared");
160
161
// Determine the number of destination regions for the partial object.
162
HeapWord* const last_word = destination + partial_obj_size - 1;
163
const ParallelCompactData& sd = PSParallelCompact::summary_data();
164
HeapWord* const beg_region_addr = sd.region_align_down(destination);
165
HeapWord* const end_region_addr = sd.region_align_down(last_word);
166
167
if (beg_region_addr == end_region_addr) {
168
// One destination region.
169
_destination_count = 1;
170
if (end_region_addr == destination) {
171
// The destination falls on a region boundary, thus the first word of the
172
// partial object will be the first word copied to the destination region.
173
_dest_region_addr = end_region_addr;
174
_first_src_addr = sd.region_to_addr(src_region_idx);
175
}
176
} else {
177
// Two destination regions. When copied, the partial object will cross a
178
// destination region boundary, so a word somewhere within the partial
179
// object will be the first word copied to the second destination region.
180
_destination_count = 2;
181
_dest_region_addr = end_region_addr;
182
const size_t ofs = pointer_delta(end_region_addr, destination);
183
assert(ofs < _partial_obj_size, "sanity");
184
_first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
185
}
186
}
187
188
void SplitInfo::clear()
189
{
190
_src_region_idx = 0;
191
_partial_obj_size = 0;
192
_destination = NULL;
193
_destination_count = 0;
194
_dest_region_addr = NULL;
195
_first_src_addr = NULL;
196
assert(!is_valid(), "sanity");
197
}
198
199
#ifdef ASSERT
200
void SplitInfo::verify_clear()
201
{
202
assert(_src_region_idx == 0, "not clear");
203
assert(_partial_obj_size == 0, "not clear");
204
assert(_destination == NULL, "not clear");
205
assert(_destination_count == 0, "not clear");
206
assert(_dest_region_addr == NULL, "not clear");
207
assert(_first_src_addr == NULL, "not clear");
208
}
209
#endif // #ifdef ASSERT
210
211
212
void PSParallelCompact::print_on_error(outputStream* st) {
213
_mark_bitmap.print_on_error(st);
214
}
215
216
#ifndef PRODUCT
217
const char* PSParallelCompact::space_names[] = {
218
"old ", "eden", "from", "to "
219
};
220
221
void PSParallelCompact::print_region_ranges() {
222
if (!log_develop_is_enabled(Trace, gc, compaction)) {
223
return;
224
}
225
Log(gc, compaction) log;
226
ResourceMark rm;
227
LogStream ls(log.trace());
228
Universe::print_on(&ls);
229
log.trace("space bottom top end new_top");
230
log.trace("------ ---------- ---------- ---------- ----------");
231
232
for (unsigned int id = 0; id < last_space_id; ++id) {
233
const MutableSpace* space = _space_info[id].space();
234
log.trace("%u %s "
235
SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
236
SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
237
id, space_names[id],
238
summary_data().addr_to_region_idx(space->bottom()),
239
summary_data().addr_to_region_idx(space->top()),
240
summary_data().addr_to_region_idx(space->end()),
241
summary_data().addr_to_region_idx(_space_info[id].new_top()));
242
}
243
}
244
245
void
246
print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
247
{
248
#define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
249
#define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
250
251
ParallelCompactData& sd = PSParallelCompact::summary_data();
252
size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
253
log_develop_trace(gc, compaction)(
254
REGION_IDX_FORMAT " " PTR_FORMAT " "
255
REGION_IDX_FORMAT " " PTR_FORMAT " "
256
REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
257
REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
258
i, p2i(c->data_location()), dci, p2i(c->destination()),
259
c->partial_obj_size(), c->live_obj_size(),
260
c->data_size(), c->source_region(), c->destination_count());
261
262
#undef REGION_IDX_FORMAT
263
#undef REGION_DATA_FORMAT
264
}
265
266
void
267
print_generic_summary_data(ParallelCompactData& summary_data,
268
HeapWord* const beg_addr,
269
HeapWord* const end_addr)
270
{
271
size_t total_words = 0;
272
size_t i = summary_data.addr_to_region_idx(beg_addr);
273
const size_t last = summary_data.addr_to_region_idx(end_addr);
274
HeapWord* pdest = 0;
275
276
while (i < last) {
277
ParallelCompactData::RegionData* c = summary_data.region(i);
278
if (c->data_size() != 0 || c->destination() != pdest) {
279
print_generic_summary_region(i, c);
280
total_words += c->data_size();
281
pdest = c->destination();
282
}
283
++i;
284
}
285
286
log_develop_trace(gc, compaction)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
287
}
288
289
void
290
PSParallelCompact::print_generic_summary_data(ParallelCompactData& summary_data,
291
HeapWord* const beg_addr,
292
HeapWord* const end_addr) {
293
::print_generic_summary_data(summary_data,beg_addr, end_addr);
294
}
295
296
void
297
print_generic_summary_data(ParallelCompactData& summary_data,
298
SpaceInfo* space_info)
299
{
300
if (!log_develop_is_enabled(Trace, gc, compaction)) {
301
return;
302
}
303
304
for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
305
const MutableSpace* space = space_info[id].space();
306
print_generic_summary_data(summary_data, space->bottom(),
307
MAX2(space->top(), space_info[id].new_top()));
308
}
309
}
310
311
void
312
print_initial_summary_data(ParallelCompactData& summary_data,
313
const MutableSpace* space) {
314
if (space->top() == space->bottom()) {
315
return;
316
}
317
318
const size_t region_size = ParallelCompactData::RegionSize;
319
typedef ParallelCompactData::RegionData RegionData;
320
HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
321
const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
322
const RegionData* c = summary_data.region(end_region - 1);
323
HeapWord* end_addr = c->destination() + c->data_size();
324
const size_t live_in_space = pointer_delta(end_addr, space->bottom());
325
326
// Print (and count) the full regions at the beginning of the space.
327
size_t full_region_count = 0;
328
size_t i = summary_data.addr_to_region_idx(space->bottom());
329
while (i < end_region && summary_data.region(i)->data_size() == region_size) {
330
ParallelCompactData::RegionData* c = summary_data.region(i);
331
log_develop_trace(gc, compaction)(
332
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
333
i, p2i(c->destination()),
334
c->partial_obj_size(), c->live_obj_size(),
335
c->data_size(), c->source_region(), c->destination_count());
336
++full_region_count;
337
++i;
338
}
339
340
size_t live_to_right = live_in_space - full_region_count * region_size;
341
342
double max_reclaimed_ratio = 0.0;
343
size_t max_reclaimed_ratio_region = 0;
344
size_t max_dead_to_right = 0;
345
size_t max_live_to_right = 0;
346
347
// Print the 'reclaimed ratio' for regions while there is something live in
348
// the region or to the right of it. The remaining regions are empty (and
349
// uninteresting), and computing the ratio will result in division by 0.
350
while (i < end_region && live_to_right > 0) {
351
c = summary_data.region(i);
352
HeapWord* const region_addr = summary_data.region_to_addr(i);
353
const size_t used_to_right = pointer_delta(space->top(), region_addr);
354
const size_t dead_to_right = used_to_right - live_to_right;
355
const double reclaimed_ratio = double(dead_to_right) / live_to_right;
356
357
if (reclaimed_ratio > max_reclaimed_ratio) {
358
max_reclaimed_ratio = reclaimed_ratio;
359
max_reclaimed_ratio_region = i;
360
max_dead_to_right = dead_to_right;
361
max_live_to_right = live_to_right;
362
}
363
364
ParallelCompactData::RegionData* c = summary_data.region(i);
365
log_develop_trace(gc, compaction)(
366
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
367
"%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
368
i, p2i(c->destination()),
369
c->partial_obj_size(), c->live_obj_size(),
370
c->data_size(), c->source_region(), c->destination_count(),
371
reclaimed_ratio, dead_to_right, live_to_right);
372
373
374
live_to_right -= c->data_size();
375
++i;
376
}
377
378
// Any remaining regions are empty. Print one more if there is one.
379
if (i < end_region) {
380
ParallelCompactData::RegionData* c = summary_data.region(i);
381
log_develop_trace(gc, compaction)(
382
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
383
i, p2i(c->destination()),
384
c->partial_obj_size(), c->live_obj_size(),
385
c->data_size(), c->source_region(), c->destination_count());
386
}
387
388
log_develop_trace(gc, compaction)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
389
max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
390
}
391
392
void
393
print_initial_summary_data(ParallelCompactData& summary_data,
394
SpaceInfo* space_info) {
395
if (!log_develop_is_enabled(Trace, gc, compaction)) {
396
return;
397
}
398
399
unsigned int id = PSParallelCompact::old_space_id;
400
const MutableSpace* space;
401
do {
402
space = space_info[id].space();
403
print_initial_summary_data(summary_data, space);
404
} while (++id < PSParallelCompact::eden_space_id);
405
406
do {
407
space = space_info[id].space();
408
print_generic_summary_data(summary_data, space->bottom(), space->top());
409
} while (++id < PSParallelCompact::last_space_id);
410
}
411
#endif // #ifndef PRODUCT
412
413
ParallelCompactData::ParallelCompactData() :
414
_region_start(NULL),
415
DEBUG_ONLY(_region_end(NULL) COMMA)
416
_region_vspace(NULL),
417
_reserved_byte_size(0),
418
_region_data(NULL),
419
_region_count(0),
420
_block_vspace(NULL),
421
_block_data(NULL),
422
_block_count(0) {}
423
424
bool ParallelCompactData::initialize(MemRegion covered_region)
425
{
426
_region_start = covered_region.start();
427
const size_t region_size = covered_region.word_size();
428
DEBUG_ONLY(_region_end = _region_start + region_size;)
429
430
assert(region_align_down(_region_start) == _region_start,
431
"region start not aligned");
432
assert((region_size & RegionSizeOffsetMask) == 0,
433
"region size not a multiple of RegionSize");
434
435
bool result = initialize_region_data(region_size) && initialize_block_data();
436
return result;
437
}
438
439
PSVirtualSpace*
440
ParallelCompactData::create_vspace(size_t count, size_t element_size)
441
{
442
const size_t raw_bytes = count * element_size;
443
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
444
const size_t granularity = os::vm_allocation_granularity();
445
_reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
446
447
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
448
MAX2(page_sz, granularity);
449
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
450
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
451
rs.size());
452
453
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
454
455
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
456
if (vspace != 0) {
457
if (vspace->expand_by(_reserved_byte_size)) {
458
return vspace;
459
}
460
delete vspace;
461
// Release memory reserved in the space.
462
rs.release();
463
}
464
465
return 0;
466
}
467
468
bool ParallelCompactData::initialize_region_data(size_t region_size)
469
{
470
const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
471
_region_vspace = create_vspace(count, sizeof(RegionData));
472
if (_region_vspace != 0) {
473
_region_data = (RegionData*)_region_vspace->reserved_low_addr();
474
_region_count = count;
475
return true;
476
}
477
return false;
478
}
479
480
bool ParallelCompactData::initialize_block_data()
481
{
482
assert(_region_count != 0, "region data must be initialized first");
483
const size_t count = _region_count << Log2BlocksPerRegion;
484
_block_vspace = create_vspace(count, sizeof(BlockData));
485
if (_block_vspace != 0) {
486
_block_data = (BlockData*)_block_vspace->reserved_low_addr();
487
_block_count = count;
488
return true;
489
}
490
return false;
491
}
492
493
void ParallelCompactData::clear()
494
{
495
memset(_region_data, 0, _region_vspace->committed_size());
496
memset(_block_data, 0, _block_vspace->committed_size());
497
}
498
499
void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
500
assert(beg_region <= _region_count, "beg_region out of range");
501
assert(end_region <= _region_count, "end_region out of range");
502
assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
503
504
const size_t region_cnt = end_region - beg_region;
505
memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
506
507
const size_t beg_block = beg_region * BlocksPerRegion;
508
const size_t block_cnt = region_cnt * BlocksPerRegion;
509
memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
510
}
511
512
HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
513
{
514
const RegionData* cur_cp = region(region_idx);
515
const RegionData* const end_cp = region(region_count() - 1);
516
517
HeapWord* result = region_to_addr(region_idx);
518
if (cur_cp < end_cp) {
519
do {
520
result += cur_cp->partial_obj_size();
521
} while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
522
}
523
return result;
524
}
525
526
void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
527
{
528
const size_t obj_ofs = pointer_delta(addr, _region_start);
529
const size_t beg_region = obj_ofs >> Log2RegionSize;
530
// end_region is inclusive
531
const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
532
533
if (beg_region == end_region) {
534
// All in one region.
535
_region_data[beg_region].add_live_obj(len);
536
return;
537
}
538
539
// First region.
540
const size_t beg_ofs = region_offset(addr);
541
_region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
542
543
// Middle regions--completely spanned by this object.
544
for (size_t region = beg_region + 1; region < end_region; ++region) {
545
_region_data[region].set_partial_obj_size(RegionSize);
546
_region_data[region].set_partial_obj_addr(addr);
547
}
548
549
// Last region.
550
const size_t end_ofs = region_offset(addr + len - 1);
551
_region_data[end_region].set_partial_obj_size(end_ofs + 1);
552
_region_data[end_region].set_partial_obj_addr(addr);
553
}
554
555
void
556
ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
557
{
558
assert(is_region_aligned(beg), "not RegionSize aligned");
559
assert(is_region_aligned(end), "not RegionSize aligned");
560
561
size_t cur_region = addr_to_region_idx(beg);
562
const size_t end_region = addr_to_region_idx(end);
563
HeapWord* addr = beg;
564
while (cur_region < end_region) {
565
_region_data[cur_region].set_destination(addr);
566
_region_data[cur_region].set_destination_count(0);
567
_region_data[cur_region].set_source_region(cur_region);
568
_region_data[cur_region].set_data_location(addr);
569
570
// Update live_obj_size so the region appears completely full.
571
size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
572
_region_data[cur_region].set_live_obj_size(live_size);
573
574
++cur_region;
575
addr += RegionSize;
576
}
577
}
578
579
// Find the point at which a space can be split and, if necessary, record the
580
// split point.
581
//
582
// If the current src region (which overflowed the destination space) doesn't
583
// have a partial object, the split point is at the beginning of the current src
584
// region (an "easy" split, no extra bookkeeping required).
585
//
586
// If the current src region has a partial object, the split point is in the
587
// region where that partial object starts (call it the split_region). If
588
// split_region has a partial object, then the split point is just after that
589
// partial object (a "hard" split where we have to record the split data and
590
// zero the partial_obj_size field). With a "hard" split, we know that the
591
// partial_obj ends within split_region because the partial object that caused
592
// the overflow starts in split_region. If split_region doesn't have a partial
593
// obj, then the split is at the beginning of split_region (another "easy"
594
// split).
595
HeapWord*
596
ParallelCompactData::summarize_split_space(size_t src_region,
597
SplitInfo& split_info,
598
HeapWord* destination,
599
HeapWord* target_end,
600
HeapWord** target_next)
601
{
602
assert(destination <= target_end, "sanity");
603
assert(destination + _region_data[src_region].data_size() > target_end,
604
"region should not fit into target space");
605
assert(is_region_aligned(target_end), "sanity");
606
607
size_t split_region = src_region;
608
HeapWord* split_destination = destination;
609
size_t partial_obj_size = _region_data[src_region].partial_obj_size();
610
611
if (destination + partial_obj_size > target_end) {
612
// The split point is just after the partial object (if any) in the
613
// src_region that contains the start of the object that overflowed the
614
// destination space.
615
//
616
// Find the start of the "overflow" object and set split_region to the
617
// region containing it.
618
HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
619
split_region = addr_to_region_idx(overflow_obj);
620
621
// Clear the source_region field of all destination regions whose first word
622
// came from data after the split point (a non-null source_region field
623
// implies a region must be filled).
624
//
625
// An alternative to the simple loop below: clear during post_compact(),
626
// which uses memcpy instead of individual stores, and is easy to
627
// parallelize. (The downside is that it clears the entire RegionData
628
// object as opposed to just one field.)
629
//
630
// post_compact() would have to clear the summary data up to the highest
631
// address that was written during the summary phase, which would be
632
//
633
// max(top, max(new_top, clear_top))
634
//
635
// where clear_top is a new field in SpaceInfo. Would have to set clear_top
636
// to target_end.
637
const RegionData* const sr = region(split_region);
638
const size_t beg_idx =
639
addr_to_region_idx(region_align_up(sr->destination() +
640
sr->partial_obj_size()));
641
const size_t end_idx = addr_to_region_idx(target_end);
642
643
log_develop_trace(gc, compaction)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
644
for (size_t idx = beg_idx; idx < end_idx; ++idx) {
645
_region_data[idx].set_source_region(0);
646
}
647
648
// Set split_destination and partial_obj_size to reflect the split region.
649
split_destination = sr->destination();
650
partial_obj_size = sr->partial_obj_size();
651
}
652
653
// The split is recorded only if a partial object extends onto the region.
654
if (partial_obj_size != 0) {
655
_region_data[split_region].set_partial_obj_size(0);
656
split_info.record(split_region, partial_obj_size, split_destination);
657
}
658
659
// Setup the continuation addresses.
660
*target_next = split_destination + partial_obj_size;
661
HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
662
663
if (log_develop_is_enabled(Trace, gc, compaction)) {
664
const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
665
log_develop_trace(gc, compaction)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
666
split_type, p2i(source_next), split_region, partial_obj_size);
667
log_develop_trace(gc, compaction)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
668
split_type, p2i(split_destination),
669
addr_to_region_idx(split_destination),
670
p2i(*target_next));
671
672
if (partial_obj_size != 0) {
673
HeapWord* const po_beg = split_info.destination();
674
HeapWord* const po_end = po_beg + split_info.partial_obj_size();
675
log_develop_trace(gc, compaction)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
676
split_type,
677
p2i(po_beg), addr_to_region_idx(po_beg),
678
p2i(po_end), addr_to_region_idx(po_end));
679
}
680
}
681
682
return source_next;
683
}
684
685
bool ParallelCompactData::summarize(SplitInfo& split_info,
686
HeapWord* source_beg, HeapWord* source_end,
687
HeapWord** source_next,
688
HeapWord* target_beg, HeapWord* target_end,
689
HeapWord** target_next)
690
{
691
HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
692
log_develop_trace(gc, compaction)(
693
"sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
694
"tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
695
p2i(source_beg), p2i(source_end), p2i(source_next_val),
696
p2i(target_beg), p2i(target_end), p2i(*target_next));
697
698
size_t cur_region = addr_to_region_idx(source_beg);
699
const size_t end_region = addr_to_region_idx(region_align_up(source_end));
700
701
HeapWord *dest_addr = target_beg;
702
while (cur_region < end_region) {
703
// The destination must be set even if the region has no data.
704
_region_data[cur_region].set_destination(dest_addr);
705
706
size_t words = _region_data[cur_region].data_size();
707
if (words > 0) {
708
// If cur_region does not fit entirely into the target space, find a point
709
// at which the source space can be 'split' so that part is copied to the
710
// target space and the rest is copied elsewhere.
711
if (dest_addr + words > target_end) {
712
assert(source_next != NULL, "source_next is NULL when splitting");
713
*source_next = summarize_split_space(cur_region, split_info, dest_addr,
714
target_end, target_next);
715
return false;
716
}
717
718
// Compute the destination_count for cur_region, and if necessary, update
719
// source_region for a destination region. The source_region field is
720
// updated if cur_region is the first (left-most) region to be copied to a
721
// destination region.
722
//
723
// The destination_count calculation is a bit subtle. A region that has
724
// data that compacts into itself does not count itself as a destination.
725
// This maintains the invariant that a zero count means the region is
726
// available and can be claimed and then filled.
727
uint destination_count = 0;
728
if (split_info.is_split(cur_region)) {
729
// The current region has been split: the partial object will be copied
730
// to one destination space and the remaining data will be copied to
731
// another destination space. Adjust the initial destination_count and,
732
// if necessary, set the source_region field if the partial object will
733
// cross a destination region boundary.
734
destination_count = split_info.destination_count();
735
if (destination_count == 2) {
736
size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
737
_region_data[dest_idx].set_source_region(cur_region);
738
}
739
}
740
741
HeapWord* const last_addr = dest_addr + words - 1;
742
const size_t dest_region_1 = addr_to_region_idx(dest_addr);
743
const size_t dest_region_2 = addr_to_region_idx(last_addr);
744
745
// Initially assume that the destination regions will be the same and
746
// adjust the value below if necessary. Under this assumption, if
747
// cur_region == dest_region_2, then cur_region will be compacted
748
// completely into itself.
749
destination_count += cur_region == dest_region_2 ? 0 : 1;
750
if (dest_region_1 != dest_region_2) {
751
// Destination regions differ; adjust destination_count.
752
destination_count += 1;
753
// Data from cur_region will be copied to the start of dest_region_2.
754
_region_data[dest_region_2].set_source_region(cur_region);
755
} else if (is_region_aligned(dest_addr)) {
756
// Data from cur_region will be copied to the start of the destination
757
// region.
758
_region_data[dest_region_1].set_source_region(cur_region);
759
}
760
761
_region_data[cur_region].set_destination_count(destination_count);
762
_region_data[cur_region].set_data_location(region_to_addr(cur_region));
763
dest_addr += words;
764
}
765
766
++cur_region;
767
}
768
769
*target_next = dest_addr;
770
return true;
771
}
772
773
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const {
774
assert(addr != NULL, "Should detect NULL oop earlier");
775
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
776
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
777
778
// Region covering the object.
779
RegionData* const region_ptr = addr_to_region_ptr(addr);
780
HeapWord* result = region_ptr->destination();
781
782
// If the entire Region is live, the new location is region->destination + the
783
// offset of the object within in the Region.
784
785
// Run some performance tests to determine if this special case pays off. It
786
// is worth it for pointers into the dense prefix. If the optimization to
787
// avoid pointer updates in regions that only point to the dense prefix is
788
// ever implemented, this should be revisited.
789
if (region_ptr->data_size() == RegionSize) {
790
result += region_offset(addr);
791
return result;
792
}
793
794
// Otherwise, the new location is region->destination + block offset + the
795
// number of live words in the Block that are (a) to the left of addr and (b)
796
// due to objects that start in the Block.
797
798
// Fill in the block table if necessary. This is unsynchronized, so multiple
799
// threads may fill the block table for a region (harmless, since it is
800
// idempotent).
801
if (!region_ptr->blocks_filled()) {
802
PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
803
region_ptr->set_blocks_filled();
804
}
805
806
HeapWord* const search_start = block_align_down(addr);
807
const size_t block_offset = addr_to_block_ptr(addr)->offset();
808
809
const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
810
const size_t live = bitmap->live_words_in_range(cm, search_start, cast_to_oop(addr));
811
result += block_offset + live;
812
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
813
return result;
814
}
815
816
#ifdef ASSERT
817
void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
818
{
819
const size_t* const beg = (const size_t*)vspace->committed_low_addr();
820
const size_t* const end = (const size_t*)vspace->committed_high_addr();
821
for (const size_t* p = beg; p < end; ++p) {
822
assert(*p == 0, "not zero");
823
}
824
}
825
826
void ParallelCompactData::verify_clear()
827
{
828
verify_clear(_region_vspace);
829
verify_clear(_block_vspace);
830
}
831
#endif // #ifdef ASSERT
832
833
STWGCTimer PSParallelCompact::_gc_timer;
834
ParallelOldTracer PSParallelCompact::_gc_tracer;
835
elapsedTimer PSParallelCompact::_accumulated_time;
836
unsigned int PSParallelCompact::_total_invocations = 0;
837
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
838
CollectorCounters* PSParallelCompact::_counters = NULL;
839
ParMarkBitMap PSParallelCompact::_mark_bitmap;
840
ParallelCompactData PSParallelCompact::_summary_data;
841
842
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
843
844
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
845
846
class PCReferenceProcessor: public ReferenceProcessor {
847
public:
848
PCReferenceProcessor(
849
BoolObjectClosure* is_subject_to_discovery,
850
BoolObjectClosure* is_alive_non_header) :
851
ReferenceProcessor(is_subject_to_discovery,
852
ParallelGCThreads, // mt processing degree
853
true, // mt discovery
854
ParallelGCThreads, // mt discovery degree
855
true, // atomic_discovery
856
is_alive_non_header) {
857
}
858
859
template<typename T> bool discover(oop obj, ReferenceType type) {
860
T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(obj);
861
T heap_oop = RawAccess<>::oop_load(referent_addr);
862
oop referent = CompressedOops::decode_not_null(heap_oop);
863
return PSParallelCompact::mark_bitmap()->is_unmarked(referent)
864
&& ReferenceProcessor::discover_reference(obj, type);
865
}
866
virtual bool discover_reference(oop obj, ReferenceType type) {
867
if (UseCompressedOops) {
868
return discover<narrowOop>(obj, type);
869
} else {
870
return discover<oop>(obj, type);
871
}
872
}
873
};
874
875
void PSParallelCompact::post_initialize() {
876
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
877
_span_based_discoverer.set_span(heap->reserved_region());
878
_ref_processor =
879
new PCReferenceProcessor(&_span_based_discoverer,
880
&_is_alive_closure); // non-header is alive closure
881
882
_counters = new CollectorCounters("Parallel full collection pauses", 1);
883
884
// Initialize static fields in ParCompactionManager.
885
ParCompactionManager::initialize(mark_bitmap());
886
}
887
888
bool PSParallelCompact::initialize() {
889
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
890
MemRegion mr = heap->reserved_region();
891
892
// Was the old gen get allocated successfully?
893
if (!heap->old_gen()->is_allocated()) {
894
return false;
895
}
896
897
initialize_space_info();
898
initialize_dead_wood_limiter();
899
900
if (!_mark_bitmap.initialize(mr)) {
901
vm_shutdown_during_initialization(
902
err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
903
"garbage collection for the requested " SIZE_FORMAT "KB heap.",
904
_mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
905
return false;
906
}
907
908
if (!_summary_data.initialize(mr)) {
909
vm_shutdown_during_initialization(
910
err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
911
"garbage collection for the requested " SIZE_FORMAT "KB heap.",
912
_summary_data.reserved_byte_size()/K, mr.byte_size()/K));
913
return false;
914
}
915
916
return true;
917
}
918
919
void PSParallelCompact::initialize_space_info()
920
{
921
memset(&_space_info, 0, sizeof(_space_info));
922
923
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
924
PSYoungGen* young_gen = heap->young_gen();
925
926
_space_info[old_space_id].set_space(heap->old_gen()->object_space());
927
_space_info[eden_space_id].set_space(young_gen->eden_space());
928
_space_info[from_space_id].set_space(young_gen->from_space());
929
_space_info[to_space_id].set_space(young_gen->to_space());
930
931
_space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
932
}
933
934
void PSParallelCompact::initialize_dead_wood_limiter()
935
{
936
const size_t max = 100;
937
_dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
938
_dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
939
_dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
940
DEBUG_ONLY(_dwl_initialized = true;)
941
_dwl_adjustment = normal_distribution(1.0);
942
}
943
944
void
945
PSParallelCompact::clear_data_covering_space(SpaceId id)
946
{
947
// At this point, top is the value before GC, new_top() is the value that will
948
// be set at the end of GC. The marking bitmap is cleared to top; nothing
949
// should be marked above top. The summary data is cleared to the larger of
950
// top & new_top.
951
MutableSpace* const space = _space_info[id].space();
952
HeapWord* const bot = space->bottom();
953
HeapWord* const top = space->top();
954
HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
955
956
const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
957
const idx_t end_bit = _mark_bitmap.align_range_end(_mark_bitmap.addr_to_bit(top));
958
_mark_bitmap.clear_range(beg_bit, end_bit);
959
960
const size_t beg_region = _summary_data.addr_to_region_idx(bot);
961
const size_t end_region =
962
_summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
963
_summary_data.clear_range(beg_region, end_region);
964
965
// Clear the data used to 'split' regions.
966
SplitInfo& split_info = _space_info[id].split_info();
967
if (split_info.is_valid()) {
968
split_info.clear();
969
}
970
DEBUG_ONLY(split_info.verify_clear();)
971
}
972
973
void PSParallelCompact::pre_compact()
974
{
975
// Update the from & to space pointers in space_info, since they are swapped
976
// at each young gen gc. Do the update unconditionally (even though a
977
// promotion failure does not swap spaces) because an unknown number of young
978
// collections will have swapped the spaces an unknown number of times.
979
GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
980
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
981
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
982
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
983
984
// Increment the invocation count
985
heap->increment_total_collections(true);
986
987
// We need to track unique mark sweep invocations as well.
988
_total_invocations++;
989
990
heap->print_heap_before_gc();
991
heap->trace_heap_before_gc(&_gc_tracer);
992
993
// Fill in TLABs
994
heap->ensure_parsability(true); // retire TLABs
995
996
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
997
Universe::verify("Before GC");
998
}
999
1000
// Verify object start arrays
1001
if (VerifyObjectStartArray &&
1002
VerifyBeforeGC) {
1003
heap->old_gen()->verify_object_start_array();
1004
}
1005
1006
DEBUG_ONLY(mark_bitmap()->verify_clear();)
1007
DEBUG_ONLY(summary_data().verify_clear();)
1008
1009
ParCompactionManager::reset_all_bitmap_query_caches();
1010
}
1011
1012
void PSParallelCompact::post_compact()
1013
{
1014
GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
1015
ParCompactionManager::remove_all_shadow_regions();
1016
1017
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1018
// Clear the marking bitmap, summary data and split info.
1019
clear_data_covering_space(SpaceId(id));
1020
// Update top(). Must be done after clearing the bitmap and summary data.
1021
_space_info[id].publish_new_top();
1022
}
1023
1024
MutableSpace* const eden_space = _space_info[eden_space_id].space();
1025
MutableSpace* const from_space = _space_info[from_space_id].space();
1026
MutableSpace* const to_space = _space_info[to_space_id].space();
1027
1028
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1029
bool eden_empty = eden_space->is_empty();
1030
1031
// Update heap occupancy information which is used as input to the soft ref
1032
// clearing policy at the next gc.
1033
Universe::heap()->update_capacity_and_used_at_gc();
1034
1035
bool young_gen_empty = eden_empty && from_space->is_empty() &&
1036
to_space->is_empty();
1037
1038
PSCardTable* ct = heap->card_table();
1039
MemRegion old_mr = heap->old_gen()->reserved();
1040
if (young_gen_empty) {
1041
ct->clear(MemRegion(old_mr.start(), old_mr.end()));
1042
} else {
1043
ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1044
}
1045
1046
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
1047
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1048
DEBUG_ONLY(MetaspaceUtils::verify();)
1049
1050
heap->prune_scavengable_nmethods();
1051
1052
#if COMPILER2_OR_JVMCI
1053
DerivedPointerTable::update_pointers();
1054
#endif
1055
1056
if (ZapUnusedHeapArea) {
1057
heap->gen_mangle_unused_area();
1058
}
1059
1060
// Signal that we have completed a visit to all live objects.
1061
Universe::heap()->record_whole_heap_examined_timestamp();
1062
}
1063
1064
HeapWord*
1065
PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1066
bool maximum_compaction)
1067
{
1068
const size_t region_size = ParallelCompactData::RegionSize;
1069
const ParallelCompactData& sd = summary_data();
1070
1071
const MutableSpace* const space = _space_info[id].space();
1072
HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1073
const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1074
const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1075
1076
// Skip full regions at the beginning of the space--they are necessarily part
1077
// of the dense prefix.
1078
size_t full_count = 0;
1079
const RegionData* cp;
1080
for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1081
++full_count;
1082
}
1083
1084
assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1085
const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1086
const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1087
if (maximum_compaction || cp == end_cp || interval_ended) {
1088
_maximum_compaction_gc_num = total_invocations();
1089
return sd.region_to_addr(cp);
1090
}
1091
1092
HeapWord* const new_top = _space_info[id].new_top();
1093
const size_t space_live = pointer_delta(new_top, space->bottom());
1094
const size_t space_used = space->used_in_words();
1095
const size_t space_capacity = space->capacity_in_words();
1096
1097
const double cur_density = double(space_live) / space_capacity;
1098
const double deadwood_density =
1099
(1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1100
const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1101
1102
log_develop_debug(gc, compaction)(
1103
"cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1104
cur_density, deadwood_density, deadwood_goal);
1105
log_develop_debug(gc, compaction)(
1106
"space_live=" SIZE_FORMAT " space_used=" SIZE_FORMAT " "
1107
"space_cap=" SIZE_FORMAT,
1108
space_live, space_used,
1109
space_capacity);
1110
1111
// XXX - Use binary search?
1112
HeapWord* dense_prefix = sd.region_to_addr(cp);
1113
const RegionData* full_cp = cp;
1114
const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1115
while (cp < end_cp) {
1116
HeapWord* region_destination = cp->destination();
1117
const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1118
1119
log_develop_trace(gc, compaction)(
1120
"c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1121
"dp=" PTR_FORMAT " cdw=" SIZE_FORMAT_W(8),
1122
sd.region(cp), p2i(region_destination),
1123
p2i(dense_prefix), cur_deadwood);
1124
1125
if (cur_deadwood >= deadwood_goal) {
1126
// Found the region that has the correct amount of deadwood to the left.
1127
// This typically occurs after crossing a fairly sparse set of regions, so
1128
// iterate backwards over those sparse regions, looking for the region
1129
// that has the lowest density of live objects 'to the right.'
1130
size_t space_to_left = sd.region(cp) * region_size;
1131
size_t live_to_left = space_to_left - cur_deadwood;
1132
size_t space_to_right = space_capacity - space_to_left;
1133
size_t live_to_right = space_live - live_to_left;
1134
double density_to_right = double(live_to_right) / space_to_right;
1135
while (cp > full_cp) {
1136
--cp;
1137
const size_t prev_region_live_to_right = live_to_right -
1138
cp->data_size();
1139
const size_t prev_region_space_to_right = space_to_right + region_size;
1140
double prev_region_density_to_right =
1141
double(prev_region_live_to_right) / prev_region_space_to_right;
1142
if (density_to_right <= prev_region_density_to_right) {
1143
return dense_prefix;
1144
}
1145
1146
log_develop_trace(gc, compaction)(
1147
"backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1148
"pc_d2r=%10.8f",
1149
sd.region(cp), density_to_right,
1150
prev_region_density_to_right);
1151
1152
dense_prefix -= region_size;
1153
live_to_right = prev_region_live_to_right;
1154
space_to_right = prev_region_space_to_right;
1155
density_to_right = prev_region_density_to_right;
1156
}
1157
return dense_prefix;
1158
}
1159
1160
dense_prefix += region_size;
1161
++cp;
1162
}
1163
1164
return dense_prefix;
1165
}
1166
1167
#ifndef PRODUCT
1168
void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1169
const SpaceId id,
1170
const bool maximum_compaction,
1171
HeapWord* const addr)
1172
{
1173
const size_t region_idx = summary_data().addr_to_region_idx(addr);
1174
RegionData* const cp = summary_data().region(region_idx);
1175
const MutableSpace* const space = _space_info[id].space();
1176
HeapWord* const new_top = _space_info[id].new_top();
1177
1178
const size_t space_live = pointer_delta(new_top, space->bottom());
1179
const size_t dead_to_left = pointer_delta(addr, cp->destination());
1180
const size_t space_cap = space->capacity_in_words();
1181
const double dead_to_left_pct = double(dead_to_left) / space_cap;
1182
const size_t live_to_right = new_top - cp->destination();
1183
const size_t dead_to_right = space->top() - addr - live_to_right;
1184
1185
log_develop_debug(gc, compaction)(
1186
"%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1187
"spl=" SIZE_FORMAT " "
1188
"d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1189
"d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT " "
1190
"ratio=%10.8f",
1191
algorithm, p2i(addr), region_idx,
1192
space_live,
1193
dead_to_left, dead_to_left_pct,
1194
dead_to_right, live_to_right,
1195
double(dead_to_right) / live_to_right);
1196
}
1197
#endif // #ifndef PRODUCT
1198
1199
// Return a fraction indicating how much of the generation can be treated as
1200
// "dead wood" (i.e., not reclaimed). The function uses a normal distribution
1201
// based on the density of live objects in the generation to determine a limit,
1202
// which is then adjusted so the return value is min_percent when the density is
1203
// 1.
1204
//
1205
// The following table shows some return values for a different values of the
1206
// standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1207
// min_percent is 1.
1208
//
1209
// fraction allowed as dead wood
1210
// -----------------------------------------------------------------
1211
// density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1212
// ------- ---------- ---------- ---------- ---------- ---------- ----------
1213
// 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1214
// 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1215
// 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1216
// 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1217
// 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1218
// 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1219
// 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1220
// 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1221
// 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1222
// 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1223
// 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1224
// 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1225
// 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1226
// 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1227
// 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1228
// 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1229
// 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1230
// 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1231
// 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1232
// 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1233
// 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1234
1235
double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1236
{
1237
assert(_dwl_initialized, "uninitialized");
1238
1239
// The raw limit is the value of the normal distribution at x = density.
1240
const double raw_limit = normal_distribution(density);
1241
1242
// Adjust the raw limit so it becomes the minimum when the density is 1.
1243
//
1244
// First subtract the adjustment value (which is simply the precomputed value
1245
// normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1246
// Then add the minimum value, so the minimum is returned when the density is
1247
// 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1248
const double min = double(min_percent) / 100.0;
1249
const double limit = raw_limit - _dwl_adjustment + min;
1250
return MAX2(limit, 0.0);
1251
}
1252
1253
ParallelCompactData::RegionData*
1254
PSParallelCompact::first_dead_space_region(const RegionData* beg,
1255
const RegionData* end)
1256
{
1257
const size_t region_size = ParallelCompactData::RegionSize;
1258
ParallelCompactData& sd = summary_data();
1259
size_t left = sd.region(beg);
1260
size_t right = end > beg ? sd.region(end) - 1 : left;
1261
1262
// Binary search.
1263
while (left < right) {
1264
// Equivalent to (left + right) / 2, but does not overflow.
1265
const size_t middle = left + (right - left) / 2;
1266
RegionData* const middle_ptr = sd.region(middle);
1267
HeapWord* const dest = middle_ptr->destination();
1268
HeapWord* const addr = sd.region_to_addr(middle);
1269
assert(dest != NULL, "sanity");
1270
assert(dest <= addr, "must move left");
1271
1272
if (middle > left && dest < addr) {
1273
right = middle - 1;
1274
} else if (middle < right && middle_ptr->data_size() == region_size) {
1275
left = middle + 1;
1276
} else {
1277
return middle_ptr;
1278
}
1279
}
1280
return sd.region(left);
1281
}
1282
1283
ParallelCompactData::RegionData*
1284
PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1285
const RegionData* end,
1286
size_t dead_words)
1287
{
1288
ParallelCompactData& sd = summary_data();
1289
size_t left = sd.region(beg);
1290
size_t right = end > beg ? sd.region(end) - 1 : left;
1291
1292
// Binary search.
1293
while (left < right) {
1294
// Equivalent to (left + right) / 2, but does not overflow.
1295
const size_t middle = left + (right - left) / 2;
1296
RegionData* const middle_ptr = sd.region(middle);
1297
HeapWord* const dest = middle_ptr->destination();
1298
HeapWord* const addr = sd.region_to_addr(middle);
1299
assert(dest != NULL, "sanity");
1300
assert(dest <= addr, "must move left");
1301
1302
const size_t dead_to_left = pointer_delta(addr, dest);
1303
if (middle > left && dead_to_left > dead_words) {
1304
right = middle - 1;
1305
} else if (middle < right && dead_to_left < dead_words) {
1306
left = middle + 1;
1307
} else {
1308
return middle_ptr;
1309
}
1310
}
1311
return sd.region(left);
1312
}
1313
1314
// The result is valid during the summary phase, after the initial summarization
1315
// of each space into itself, and before final summarization.
1316
inline double
1317
PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1318
HeapWord* const bottom,
1319
HeapWord* const top,
1320
HeapWord* const new_top)
1321
{
1322
ParallelCompactData& sd = summary_data();
1323
1324
assert(cp != NULL, "sanity");
1325
assert(bottom != NULL, "sanity");
1326
assert(top != NULL, "sanity");
1327
assert(new_top != NULL, "sanity");
1328
assert(top >= new_top, "summary data problem?");
1329
assert(new_top > bottom, "space is empty; should not be here");
1330
assert(new_top >= cp->destination(), "sanity");
1331
assert(top >= sd.region_to_addr(cp), "sanity");
1332
1333
HeapWord* const destination = cp->destination();
1334
const size_t dense_prefix_live = pointer_delta(destination, bottom);
1335
const size_t compacted_region_live = pointer_delta(new_top, destination);
1336
const size_t compacted_region_used = pointer_delta(top,
1337
sd.region_to_addr(cp));
1338
const size_t reclaimable = compacted_region_used - compacted_region_live;
1339
1340
const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1341
return double(reclaimable) / divisor;
1342
}
1343
1344
// Return the address of the end of the dense prefix, a.k.a. the start of the
1345
// compacted region. The address is always on a region boundary.
1346
//
1347
// Completely full regions at the left are skipped, since no compaction can
1348
// occur in those regions. Then the maximum amount of dead wood to allow is
1349
// computed, based on the density (amount live / capacity) of the generation;
1350
// the region with approximately that amount of dead space to the left is
1351
// identified as the limit region. Regions between the last completely full
1352
// region and the limit region are scanned and the one that has the best
1353
// (maximum) reclaimed_ratio() is selected.
1354
HeapWord*
1355
PSParallelCompact::compute_dense_prefix(const SpaceId id,
1356
bool maximum_compaction)
1357
{
1358
const size_t region_size = ParallelCompactData::RegionSize;
1359
const ParallelCompactData& sd = summary_data();
1360
1361
const MutableSpace* const space = _space_info[id].space();
1362
HeapWord* const top = space->top();
1363
HeapWord* const top_aligned_up = sd.region_align_up(top);
1364
HeapWord* const new_top = _space_info[id].new_top();
1365
HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1366
HeapWord* const bottom = space->bottom();
1367
const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1368
const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1369
const RegionData* const new_top_cp =
1370
sd.addr_to_region_ptr(new_top_aligned_up);
1371
1372
// Skip full regions at the beginning of the space--they are necessarily part
1373
// of the dense prefix.
1374
const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1375
assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1376
space->is_empty(), "no dead space allowed to the left");
1377
assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1378
"region must have dead space");
1379
1380
// The gc number is saved whenever a maximum compaction is done, and used to
1381
// determine when the maximum compaction interval has expired. This avoids
1382
// successive max compactions for different reasons.
1383
assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1384
const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1385
const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1386
total_invocations() == HeapFirstMaximumCompactionCount;
1387
if (maximum_compaction || full_cp == top_cp || interval_ended) {
1388
_maximum_compaction_gc_num = total_invocations();
1389
return sd.region_to_addr(full_cp);
1390
}
1391
1392
const size_t space_live = pointer_delta(new_top, bottom);
1393
const size_t space_used = space->used_in_words();
1394
const size_t space_capacity = space->capacity_in_words();
1395
1396
const double density = double(space_live) / double(space_capacity);
1397
const size_t min_percent_free = MarkSweepDeadRatio;
1398
const double limiter = dead_wood_limiter(density, min_percent_free);
1399
const size_t dead_wood_max = space_used - space_live;
1400
const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1401
dead_wood_max);
1402
1403
log_develop_debug(gc, compaction)(
1404
"space_live=" SIZE_FORMAT " space_used=" SIZE_FORMAT " "
1405
"space_cap=" SIZE_FORMAT,
1406
space_live, space_used,
1407
space_capacity);
1408
log_develop_debug(gc, compaction)(
1409
"dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
1410
"dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1411
density, min_percent_free, limiter,
1412
dead_wood_max, dead_wood_limit);
1413
1414
// Locate the region with the desired amount of dead space to the left.
1415
const RegionData* const limit_cp =
1416
dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1417
1418
// Scan from the first region with dead space to the limit region and find the
1419
// one with the best (largest) reclaimed ratio.
1420
double best_ratio = 0.0;
1421
const RegionData* best_cp = full_cp;
1422
for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1423
double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1424
if (tmp_ratio > best_ratio) {
1425
best_cp = cp;
1426
best_ratio = tmp_ratio;
1427
}
1428
}
1429
1430
return sd.region_to_addr(best_cp);
1431
}
1432
1433
void PSParallelCompact::summarize_spaces_quick()
1434
{
1435
for (unsigned int i = 0; i < last_space_id; ++i) {
1436
const MutableSpace* space = _space_info[i].space();
1437
HeapWord** nta = _space_info[i].new_top_addr();
1438
bool result = _summary_data.summarize(_space_info[i].split_info(),
1439
space->bottom(), space->top(), NULL,
1440
space->bottom(), space->end(), nta);
1441
assert(result, "space must fit into itself");
1442
_space_info[i].set_dense_prefix(space->bottom());
1443
}
1444
}
1445
1446
void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1447
{
1448
HeapWord* const dense_prefix_end = dense_prefix(id);
1449
const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1450
const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1451
if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1452
// Only enough dead space is filled so that any remaining dead space to the
1453
// left is larger than the minimum filler object. (The remainder is filled
1454
// during the copy/update phase.)
1455
//
1456
// The size of the dead space to the right of the boundary is not a
1457
// concern, since compaction will be able to use whatever space is
1458
// available.
1459
//
1460
// Here '||' is the boundary, 'x' represents a don't care bit and a box
1461
// surrounds the space to be filled with an object.
1462
//
1463
// In the 32-bit VM, each bit represents two 32-bit words:
1464
// +---+
1465
// a) beg_bits: ... x x x | 0 | || 0 x x ...
1466
// end_bits: ... x x x | 0 | || 0 x x ...
1467
// +---+
1468
//
1469
// In the 64-bit VM, each bit represents one 64-bit word:
1470
// +------------+
1471
// b) beg_bits: ... x x x | 0 || 0 | x x ...
1472
// end_bits: ... x x 1 | 0 || 0 | x x ...
1473
// +------------+
1474
// +-------+
1475
// c) beg_bits: ... x x | 0 0 | || 0 x x ...
1476
// end_bits: ... x 1 | 0 0 | || 0 x x ...
1477
// +-------+
1478
// +-----------+
1479
// d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1480
// end_bits: ... 1 | 0 0 0 | || 0 x x ...
1481
// +-----------+
1482
// +-------+
1483
// e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1484
// end_bits: ... 0 0 | 0 0 | || 0 x x ...
1485
// +-------+
1486
1487
// Initially assume case a, c or e will apply.
1488
size_t obj_len = CollectedHeap::min_fill_size();
1489
HeapWord* obj_beg = dense_prefix_end - obj_len;
1490
1491
#ifdef _LP64
1492
if (MinObjAlignment > 1) { // object alignment > heap word size
1493
// Cases a, c or e.
1494
} else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1495
// Case b above.
1496
obj_beg = dense_prefix_end - 1;
1497
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1498
_mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1499
// Case d above.
1500
obj_beg = dense_prefix_end - 3;
1501
obj_len = 3;
1502
}
1503
#endif // #ifdef _LP64
1504
1505
CollectedHeap::fill_with_object(obj_beg, obj_len);
1506
_mark_bitmap.mark_obj(obj_beg, obj_len);
1507
_summary_data.add_obj(obj_beg, obj_len);
1508
assert(start_array(id) != NULL, "sanity");
1509
start_array(id)->allocate_block(obj_beg);
1510
}
1511
}
1512
1513
void
1514
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1515
{
1516
assert(id < last_space_id, "id out of range");
1517
assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1518
"should have been reset in summarize_spaces_quick()");
1519
1520
const MutableSpace* space = _space_info[id].space();
1521
if (_space_info[id].new_top() != space->bottom()) {
1522
HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1523
_space_info[id].set_dense_prefix(dense_prefix_end);
1524
1525
#ifndef PRODUCT
1526
if (log_is_enabled(Debug, gc, compaction)) {
1527
print_dense_prefix_stats("ratio", id, maximum_compaction,
1528
dense_prefix_end);
1529
HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1530
print_dense_prefix_stats("density", id, maximum_compaction, addr);
1531
}
1532
#endif // #ifndef PRODUCT
1533
1534
// Recompute the summary data, taking into account the dense prefix. If
1535
// every last byte will be reclaimed, then the existing summary data which
1536
// compacts everything can be left in place.
1537
if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1538
// If dead space crosses the dense prefix boundary, it is (at least
1539
// partially) filled with a dummy object, marked live and added to the
1540
// summary data. This simplifies the copy/update phase and must be done
1541
// before the final locations of objects are determined, to prevent
1542
// leaving a fragment of dead space that is too small to fill.
1543
fill_dense_prefix_end(id);
1544
1545
// Compute the destination of each Region, and thus each object.
1546
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1547
_summary_data.summarize(_space_info[id].split_info(),
1548
dense_prefix_end, space->top(), NULL,
1549
dense_prefix_end, space->end(),
1550
_space_info[id].new_top_addr());
1551
}
1552
}
1553
1554
if (log_develop_is_enabled(Trace, gc, compaction)) {
1555
const size_t region_size = ParallelCompactData::RegionSize;
1556
HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1557
const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1558
const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1559
HeapWord* const new_top = _space_info[id].new_top();
1560
const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1561
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1562
log_develop_trace(gc, compaction)(
1563
"id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1564
"dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1565
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1566
id, space->capacity_in_words(), p2i(dense_prefix_end),
1567
dp_region, dp_words / region_size,
1568
cr_words / region_size, p2i(new_top));
1569
}
1570
}
1571
1572
#ifndef PRODUCT
1573
void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1574
HeapWord* dst_beg, HeapWord* dst_end,
1575
SpaceId src_space_id,
1576
HeapWord* src_beg, HeapWord* src_end)
1577
{
1578
log_develop_trace(gc, compaction)(
1579
"Summarizing %d [%s] into %d [%s]: "
1580
"src=" PTR_FORMAT "-" PTR_FORMAT " "
1581
SIZE_FORMAT "-" SIZE_FORMAT " "
1582
"dst=" PTR_FORMAT "-" PTR_FORMAT " "
1583
SIZE_FORMAT "-" SIZE_FORMAT,
1584
src_space_id, space_names[src_space_id],
1585
dst_space_id, space_names[dst_space_id],
1586
p2i(src_beg), p2i(src_end),
1587
_summary_data.addr_to_region_idx(src_beg),
1588
_summary_data.addr_to_region_idx(src_end),
1589
p2i(dst_beg), p2i(dst_end),
1590
_summary_data.addr_to_region_idx(dst_beg),
1591
_summary_data.addr_to_region_idx(dst_end));
1592
}
1593
#endif // #ifndef PRODUCT
1594
1595
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1596
bool maximum_compaction)
1597
{
1598
GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
1599
1600
// Quick summarization of each space into itself, to see how much is live.
1601
summarize_spaces_quick();
1602
1603
log_develop_trace(gc, compaction)("summary phase: after summarizing each space to self");
1604
NOT_PRODUCT(print_region_ranges());
1605
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1606
1607
// The amount of live data that will end up in old space (assuming it fits).
1608
size_t old_space_total_live = 0;
1609
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1610
old_space_total_live += pointer_delta(_space_info[id].new_top(),
1611
_space_info[id].space()->bottom());
1612
}
1613
1614
MutableSpace* const old_space = _space_info[old_space_id].space();
1615
const size_t old_capacity = old_space->capacity_in_words();
1616
if (old_space_total_live > old_capacity) {
1617
// XXX - should also try to expand
1618
maximum_compaction = true;
1619
}
1620
1621
// Old generations.
1622
summarize_space(old_space_id, maximum_compaction);
1623
1624
// Summarize the remaining spaces in the young gen. The initial target space
1625
// is the old gen. If a space does not fit entirely into the target, then the
1626
// remainder is compacted into the space itself and that space becomes the new
1627
// target.
1628
SpaceId dst_space_id = old_space_id;
1629
HeapWord* dst_space_end = old_space->end();
1630
HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1631
for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1632
const MutableSpace* space = _space_info[id].space();
1633
const size_t live = pointer_delta(_space_info[id].new_top(),
1634
space->bottom());
1635
const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1636
1637
NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1638
SpaceId(id), space->bottom(), space->top());)
1639
if (live > 0 && live <= available) {
1640
// All the live data will fit.
1641
bool done = _summary_data.summarize(_space_info[id].split_info(),
1642
space->bottom(), space->top(),
1643
NULL,
1644
*new_top_addr, dst_space_end,
1645
new_top_addr);
1646
assert(done, "space must fit into old gen");
1647
1648
// Reset the new_top value for the space.
1649
_space_info[id].set_new_top(space->bottom());
1650
} else if (live > 0) {
1651
// Attempt to fit part of the source space into the target space.
1652
HeapWord* next_src_addr = NULL;
1653
bool done = _summary_data.summarize(_space_info[id].split_info(),
1654
space->bottom(), space->top(),
1655
&next_src_addr,
1656
*new_top_addr, dst_space_end,
1657
new_top_addr);
1658
assert(!done, "space should not fit into old gen");
1659
assert(next_src_addr != NULL, "sanity");
1660
1661
// The source space becomes the new target, so the remainder is compacted
1662
// within the space itself.
1663
dst_space_id = SpaceId(id);
1664
dst_space_end = space->end();
1665
new_top_addr = _space_info[id].new_top_addr();
1666
NOT_PRODUCT(summary_phase_msg(dst_space_id,
1667
space->bottom(), dst_space_end,
1668
SpaceId(id), next_src_addr, space->top());)
1669
done = _summary_data.summarize(_space_info[id].split_info(),
1670
next_src_addr, space->top(),
1671
NULL,
1672
space->bottom(), dst_space_end,
1673
new_top_addr);
1674
assert(done, "space must fit when compacted into itself");
1675
assert(*new_top_addr <= space->top(), "usage should not grow");
1676
}
1677
}
1678
1679
log_develop_trace(gc, compaction)("Summary_phase: after final summarization");
1680
NOT_PRODUCT(print_region_ranges());
1681
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1682
}
1683
1684
// This method should contain all heap-specific policy for invoking a full
1685
// collection. invoke_no_policy() will only attempt to compact the heap; it
1686
// will do nothing further. If we need to bail out for policy reasons, scavenge
1687
// before full gc, or any other specialized behavior, it needs to be added here.
1688
//
1689
// Note that this method should only be called from the vm_thread while at a
1690
// safepoint.
1691
//
1692
// Note that the all_soft_refs_clear flag in the soft ref policy
1693
// may be true because this method can be called without intervening
1694
// activity. For example when the heap space is tight and full measure
1695
// are being taken to free space.
1696
void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1697
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1698
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1699
"should be in vm thread");
1700
1701
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1702
GCCause::Cause gc_cause = heap->gc_cause();
1703
assert(!heap->is_gc_active(), "not reentrant");
1704
1705
PSAdaptiveSizePolicy* policy = heap->size_policy();
1706
IsGCActiveMark mark;
1707
1708
if (ScavengeBeforeFullGC) {
1709
PSScavenge::invoke_no_policy();
1710
}
1711
1712
const bool clear_all_soft_refs =
1713
heap->soft_ref_policy()->should_clear_all_soft_refs();
1714
1715
PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1716
maximum_heap_compaction);
1717
}
1718
1719
// This method contains no policy. You should probably
1720
// be calling invoke() instead.
1721
bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1722
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1723
assert(ref_processor() != NULL, "Sanity");
1724
1725
if (GCLocker::check_active_before_gc()) {
1726
return false;
1727
}
1728
1729
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1730
1731
GCIdMark gc_id_mark;
1732
_gc_timer.register_gc_start();
1733
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1734
1735
TimeStamp marking_start;
1736
TimeStamp compaction_start;
1737
TimeStamp collection_exit;
1738
1739
GCCause::Cause gc_cause = heap->gc_cause();
1740
PSYoungGen* young_gen = heap->young_gen();
1741
PSOldGen* old_gen = heap->old_gen();
1742
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1743
1744
// The scope of casr should end after code that can change
1745
// SoftRefPolicy::_should_clear_all_soft_refs.
1746
ClearedAllSoftRefs casr(maximum_heap_compaction,
1747
heap->soft_ref_policy());
1748
1749
if (ZapUnusedHeapArea) {
1750
// Save information needed to minimize mangling
1751
heap->record_gen_tops_before_GC();
1752
}
1753
1754
// Make sure data structures are sane, make the heap parsable, and do other
1755
// miscellaneous bookkeeping.
1756
pre_compact();
1757
1758
const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1759
1760
// Get the compaction manager reserved for the VM thread.
1761
ParCompactionManager* const vmthread_cm = ParCompactionManager::get_vmthread_cm();
1762
1763
{
1764
const uint active_workers =
1765
WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
1766
ParallelScavengeHeap::heap()->workers().active_workers(),
1767
Threads::number_of_non_daemon_threads());
1768
ParallelScavengeHeap::heap()->workers().update_active_workers(active_workers);
1769
1770
GCTraceCPUTime tcpu;
1771
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
1772
1773
heap->pre_full_gc_dump(&_gc_timer);
1774
1775
TraceCollectorStats tcs(counters());
1776
TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause);
1777
1778
if (log_is_enabled(Debug, gc, heap, exit)) {
1779
accumulated_time()->start();
1780
}
1781
1782
// Let the size policy know we're starting
1783
size_policy->major_collection_begin();
1784
1785
#if COMPILER2_OR_JVMCI
1786
DerivedPointerTable::clear();
1787
#endif
1788
1789
ref_processor()->enable_discovery();
1790
ref_processor()->setup_policy(maximum_heap_compaction);
1791
1792
bool marked_for_unloading = false;
1793
1794
marking_start.update();
1795
marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1796
1797
bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1798
&& GCCause::is_user_requested_gc(gc_cause);
1799
summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1800
1801
#if COMPILER2_OR_JVMCI
1802
assert(DerivedPointerTable::is_active(), "Sanity");
1803
DerivedPointerTable::set_active(false);
1804
#endif
1805
1806
// adjust_roots() updates Universe::_intArrayKlassObj which is
1807
// needed by the compaction for filling holes in the dense prefix.
1808
adjust_roots();
1809
1810
compaction_start.update();
1811
compact();
1812
1813
ParCompactionManager::verify_all_region_stack_empty();
1814
1815
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1816
// done before resizing.
1817
post_compact();
1818
1819
// Let the size policy know we're done
1820
size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1821
1822
if (UseAdaptiveSizePolicy) {
1823
log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1824
log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1825
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1826
1827
// Don't check if the size_policy is ready here. Let
1828
// the size_policy check that internally.
1829
if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1830
AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1831
// Swap the survivor spaces if from_space is empty. The
1832
// resize_young_gen() called below is normally used after
1833
// a successful young GC and swapping of survivor spaces;
1834
// otherwise, it will fail to resize the young gen with
1835
// the current implementation.
1836
if (young_gen->from_space()->is_empty()) {
1837
young_gen->from_space()->clear(SpaceDecorator::Mangle);
1838
young_gen->swap_spaces();
1839
}
1840
1841
// Calculate optimal free space amounts
1842
assert(young_gen->max_gen_size() >
1843
young_gen->from_space()->capacity_in_bytes() +
1844
young_gen->to_space()->capacity_in_bytes(),
1845
"Sizes of space in young gen are out-of-bounds");
1846
1847
size_t young_live = young_gen->used_in_bytes();
1848
size_t eden_live = young_gen->eden_space()->used_in_bytes();
1849
size_t old_live = old_gen->used_in_bytes();
1850
size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1851
size_t max_old_gen_size = old_gen->max_gen_size();
1852
size_t max_eden_size = young_gen->max_gen_size() -
1853
young_gen->from_space()->capacity_in_bytes() -
1854
young_gen->to_space()->capacity_in_bytes();
1855
1856
// Used for diagnostics
1857
size_policy->clear_generation_free_space_flags();
1858
1859
size_policy->compute_generations_free_space(young_live,
1860
eden_live,
1861
old_live,
1862
cur_eden,
1863
max_old_gen_size,
1864
max_eden_size,
1865
true /* full gc*/);
1866
1867
size_policy->check_gc_overhead_limit(eden_live,
1868
max_old_gen_size,
1869
max_eden_size,
1870
true /* full gc*/,
1871
gc_cause,
1872
heap->soft_ref_policy());
1873
1874
size_policy->decay_supplemental_growth(true /* full gc*/);
1875
1876
heap->resize_old_gen(
1877
size_policy->calculated_old_free_size_in_bytes());
1878
1879
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1880
size_policy->calculated_survivor_size_in_bytes());
1881
}
1882
1883
log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1884
}
1885
1886
if (UsePerfData) {
1887
PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1888
counters->update_counters();
1889
counters->update_old_capacity(old_gen->capacity_in_bytes());
1890
counters->update_young_capacity(young_gen->capacity_in_bytes());
1891
}
1892
1893
heap->resize_all_tlabs();
1894
1895
// Resize the metaspace capacity after a collection
1896
MetaspaceGC::compute_new_size();
1897
1898
if (log_is_enabled(Debug, gc, heap, exit)) {
1899
accumulated_time()->stop();
1900
}
1901
1902
heap->print_heap_change(pre_gc_values);
1903
1904
// Track memory usage and detect low memory
1905
MemoryService::track_memory_usage();
1906
heap->update_counters();
1907
1908
heap->post_full_gc_dump(&_gc_timer);
1909
}
1910
1911
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1912
Universe::verify("After GC");
1913
}
1914
1915
// Re-verify object start arrays
1916
if (VerifyObjectStartArray &&
1917
VerifyAfterGC) {
1918
old_gen->verify_object_start_array();
1919
}
1920
1921
if (ZapUnusedHeapArea) {
1922
old_gen->object_space()->check_mangled_unused_area_complete();
1923
}
1924
1925
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1926
1927
collection_exit.update();
1928
1929
heap->print_heap_after_gc();
1930
heap->trace_heap_after_gc(&_gc_tracer);
1931
1932
log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1933
marking_start.ticks(), compaction_start.ticks(),
1934
collection_exit.ticks());
1935
1936
AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1937
1938
_gc_timer.register_gc_end();
1939
1940
_gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1941
_gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1942
1943
return true;
1944
}
1945
1946
class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1947
private:
1948
uint _worker_id;
1949
1950
public:
1951
PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1952
void do_thread(Thread* thread) {
1953
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
1954
1955
ResourceMark rm;
1956
1957
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1958
1959
PCMarkAndPushClosure mark_and_push_closure(cm);
1960
MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
1961
1962
thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
1963
1964
// Do the real work
1965
cm->follow_marking_stacks();
1966
}
1967
};
1968
1969
static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_id) {
1970
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
1971
1972
ParCompactionManager* cm =
1973
ParCompactionManager::gc_thread_compaction_manager(worker_id);
1974
PCMarkAndPushClosure mark_and_push_closure(cm);
1975
1976
switch (root_type) {
1977
case ParallelRootType::class_loader_data:
1978
{
1979
CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_strong);
1980
ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1981
}
1982
break;
1983
1984
case ParallelRootType::code_cache:
1985
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
1986
//ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
1987
break;
1988
1989
case ParallelRootType::sentinel:
1990
DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
1991
fatal("Bad enumeration value: %u", root_type);
1992
break;
1993
}
1994
1995
// Do the real work
1996
cm->follow_marking_stacks();
1997
}
1998
1999
void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
2000
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2001
2002
ParCompactionManager* cm =
2003
ParCompactionManager::gc_thread_compaction_manager(worker_id);
2004
2005
oop obj = NULL;
2006
ObjArrayTask task;
2007
do {
2008
while (ParCompactionManager::steal_objarray(worker_id, task)) {
2009
cm->follow_array((objArrayOop)task.obj(), task.index());
2010
cm->follow_marking_stacks();
2011
}
2012
while (ParCompactionManager::steal(worker_id, obj)) {
2013
cm->follow_contents(obj);
2014
cm->follow_marking_stacks();
2015
}
2016
} while (!terminator.offer_termination());
2017
}
2018
2019
class MarkFromRootsTask : public AbstractGangTask {
2020
StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
2021
OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
2022
SequentialSubTasksDone _subtasks;
2023
TaskTerminator _terminator;
2024
uint _active_workers;
2025
2026
public:
2027
MarkFromRootsTask(uint active_workers) :
2028
AbstractGangTask("MarkFromRootsTask"),
2029
_strong_roots_scope(active_workers),
2030
_subtasks(ParallelRootType::sentinel),
2031
_terminator(active_workers, ParCompactionManager::oop_task_queues()),
2032
_active_workers(active_workers) {
2033
}
2034
2035
virtual void work(uint worker_id) {
2036
for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
2037
mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
2038
}
2039
2040
PCAddThreadRootsMarkingTaskClosure closure(worker_id);
2041
Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
2042
2043
// Mark from OopStorages
2044
{
2045
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2046
PCMarkAndPushClosure closure(cm);
2047
_oop_storage_set_par_state.oops_do(&closure);
2048
// Do the real work
2049
cm->follow_marking_stacks();
2050
}
2051
2052
if (_active_workers > 1) {
2053
steal_marking_work(_terminator, worker_id);
2054
}
2055
}
2056
};
2057
2058
class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
2059
TaskTerminator _terminator;
2060
2061
public:
2062
ParallelCompactRefProcProxyTask(uint max_workers)
2063
: RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
2064
_terminator(_max_workers, ParCompactionManager::oop_task_queues()) {}
2065
2066
void work(uint worker_id) override {
2067
assert(worker_id < _max_workers, "sanity");
2068
ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
2069
PCMarkAndPushClosure keep_alive(cm);
2070
ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
2071
_rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &complete_gc);
2072
}
2073
2074
void prepare_run_task_hook() override {
2075
_terminator.reset_for_reuse(_queue_count);
2076
}
2077
};
2078
2079
void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2080
bool maximum_heap_compaction,
2081
ParallelOldTracer *gc_tracer) {
2082
// Recursively traverse all live objects and mark them
2083
GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2084
2085
uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2086
2087
// Need new claim bits before marking starts.
2088
ClassLoaderDataGraph::clear_claimed_marks();
2089
2090
{
2091
GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2092
2093
MarkFromRootsTask task(active_gc_threads);
2094
ParallelScavengeHeap::heap()->workers().run_task(&task);
2095
}
2096
2097
// Process reference objects found during marking
2098
{
2099
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
2100
2101
ReferenceProcessorStats stats;
2102
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
2103
2104
ref_processor()->set_active_mt_degree(active_gc_threads);
2105
ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
2106
stats = ref_processor()->process_discovered_references(task, pt);
2107
2108
gc_tracer->report_gc_reference_stats(stats);
2109
pt.print_all_references();
2110
}
2111
2112
// This is the point where the entire marking should have completed.
2113
ParCompactionManager::verify_all_marking_stack_empty();
2114
2115
{
2116
GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
2117
WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
2118
}
2119
2120
{
2121
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
2122
2123
// Follow system dictionary roots and unload classes.
2124
bool purged_class = SystemDictionary::do_unloading(&_gc_timer);
2125
2126
// Unload nmethods.
2127
CodeCache::do_unloading(is_alive_closure(), purged_class);
2128
2129
// Prune dead klasses from subklass/sibling/implementor lists.
2130
Klass::clean_weak_klass_links(purged_class);
2131
2132
// Clean JVMCI metadata handles.
2133
JVMCI_ONLY(JVMCI::do_unloading(purged_class));
2134
}
2135
2136
_gc_tracer.report_object_count_after_gc(is_alive_closure());
2137
}
2138
2139
#ifdef ASSERT
2140
void PCAdjustPointerClosure::verify_cm(ParCompactionManager* cm) {
2141
assert(cm != NULL, "associate ParCompactionManage should not be NULL");
2142
auto vmthread_cm = ParCompactionManager::get_vmthread_cm();
2143
if (Thread::current()->is_VM_thread()) {
2144
assert(cm == vmthread_cm, "VM threads should use ParCompactionManager from get_vmthread_cm()");
2145
} else {
2146
assert(Thread::current()->is_GC_task_thread(), "Must be a GC thread");
2147
assert(cm != vmthread_cm, "GC threads should use ParCompactionManager from gc_thread_compaction_manager()");
2148
}
2149
}
2150
#endif
2151
2152
class PSAdjustTask final : public AbstractGangTask {
2153
SubTasksDone _sub_tasks;
2154
WeakProcessor::Task _weak_proc_task;
2155
OopStorageSetStrongParState<false, false> _oop_storage_iter;
2156
uint _nworkers;
2157
2158
enum PSAdjustSubTask {
2159
PSAdjustSubTask_code_cache,
2160
PSAdjustSubTask_old_ref_process,
2161
PSAdjustSubTask_young_ref_process,
2162
2163
PSAdjustSubTask_num_elements
2164
};
2165
2166
public:
2167
PSAdjustTask(uint nworkers) :
2168
AbstractGangTask("PSAdjust task"),
2169
_sub_tasks(PSAdjustSubTask_num_elements),
2170
_weak_proc_task(nworkers),
2171
_nworkers(nworkers) {
2172
// Need new claim bits when tracing through and adjusting pointers.
2173
ClassLoaderDataGraph::clear_claimed_marks();
2174
if (nworkers > 1) {
2175
Threads::change_thread_claim_token();
2176
}
2177
}
2178
2179
~PSAdjustTask() {
2180
Threads::assert_all_threads_claimed();
2181
}
2182
2183
void work(uint worker_id) {
2184
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2185
PCAdjustPointerClosure adjust(cm);
2186
{
2187
ResourceMark rm;
2188
Threads::possibly_parallel_oops_do(_nworkers > 1, &adjust, nullptr);
2189
}
2190
_oop_storage_iter.oops_do(&adjust);
2191
{
2192
CLDToOopClosure cld_closure(&adjust, ClassLoaderData::_claim_strong);
2193
ClassLoaderDataGraph::cld_do(&cld_closure);
2194
}
2195
{
2196
AlwaysTrueClosure always_alive;
2197
_weak_proc_task.work(worker_id, &always_alive, &adjust);
2198
}
2199
if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
2200
CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
2201
CodeCache::blobs_do(&adjust_code);
2202
}
2203
if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) {
2204
PSParallelCompact::ref_processor()->weak_oops_do(&adjust);
2205
}
2206
if (_sub_tasks.try_claim_task(PSAdjustSubTask_young_ref_process)) {
2207
// Roots were visited so references into the young gen in roots
2208
// may have been scanned. Process them also.
2209
// Should the reference processor have a span that excludes
2210
// young gen objects?
2211
PSScavenge::reference_processor()->weak_oops_do(&adjust);
2212
}
2213
_sub_tasks.all_tasks_claimed();
2214
}
2215
};
2216
2217
void PSParallelCompact::adjust_roots() {
2218
// Adjust the pointers to reflect the new locations
2219
GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
2220
uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
2221
PSAdjustTask task(nworkers);
2222
ParallelScavengeHeap::heap()->workers().run_task(&task);
2223
}
2224
2225
// Helper class to print 8 region numbers per line and then print the total at the end.
2226
class FillableRegionLogger : public StackObj {
2227
private:
2228
Log(gc, compaction) log;
2229
static const int LineLength = 8;
2230
size_t _regions[LineLength];
2231
int _next_index;
2232
bool _enabled;
2233
size_t _total_regions;
2234
public:
2235
FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
2236
~FillableRegionLogger() {
2237
log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2238
}
2239
2240
void print_line() {
2241
if (!_enabled || _next_index == 0) {
2242
return;
2243
}
2244
FormatBuffer<> line("Fillable: ");
2245
for (int i = 0; i < _next_index; i++) {
2246
line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2247
}
2248
log.trace("%s", line.buffer());
2249
_next_index = 0;
2250
}
2251
2252
void handle(size_t region) {
2253
if (!_enabled) {
2254
return;
2255
}
2256
_regions[_next_index++] = region;
2257
if (_next_index == LineLength) {
2258
print_line();
2259
}
2260
_total_regions++;
2261
}
2262
};
2263
2264
void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
2265
{
2266
GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
2267
2268
// Find the threads that are active
2269
uint worker_id = 0;
2270
2271
// Find all regions that are available (can be filled immediately) and
2272
// distribute them to the thread stacks. The iteration is done in reverse
2273
// order (high to low) so the regions will be removed in ascending order.
2274
2275
const ParallelCompactData& sd = PSParallelCompact::summary_data();
2276
2277
// id + 1 is used to test termination so unsigned can
2278
// be used with an old_space_id == 0.
2279
FillableRegionLogger region_logger;
2280
for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2281
SpaceInfo* const space_info = _space_info + id;
2282
MutableSpace* const space = space_info->space();
2283
HeapWord* const new_top = space_info->new_top();
2284
2285
const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2286
const size_t end_region =
2287
sd.addr_to_region_idx(sd.region_align_up(new_top));
2288
2289
for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2290
if (sd.region(cur)->claim_unsafe()) {
2291
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2292
bool result = sd.region(cur)->mark_normal();
2293
assert(result, "Must succeed at this point.");
2294
cm->region_stack()->push(cur);
2295
region_logger.handle(cur);
2296
// Assign regions to tasks in round-robin fashion.
2297
if (++worker_id == parallel_gc_threads) {
2298
worker_id = 0;
2299
}
2300
}
2301
}
2302
region_logger.print_line();
2303
}
2304
}
2305
2306
class TaskQueue : StackObj {
2307
volatile uint _counter;
2308
uint _size;
2309
uint _insert_index;
2310
PSParallelCompact::UpdateDensePrefixTask* _backing_array;
2311
public:
2312
explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(NULL) {
2313
_backing_array = NEW_C_HEAP_ARRAY(PSParallelCompact::UpdateDensePrefixTask, _size, mtGC);
2314
}
2315
~TaskQueue() {
2316
assert(_counter >= _insert_index, "not all queue elements were claimed");
2317
FREE_C_HEAP_ARRAY(T, _backing_array);
2318
}
2319
2320
void push(const PSParallelCompact::UpdateDensePrefixTask& value) {
2321
assert(_insert_index < _size, "too small backing array");
2322
_backing_array[_insert_index++] = value;
2323
}
2324
2325
bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
2326
uint claimed = Atomic::fetch_and_add(&_counter, 1u);
2327
if (claimed < _insert_index) {
2328
reference = _backing_array[claimed];
2329
return true;
2330
} else {
2331
return false;
2332
}
2333
}
2334
};
2335
2336
#define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2337
2338
void PSParallelCompact::enqueue_dense_prefix_tasks(TaskQueue& task_queue,
2339
uint parallel_gc_threads) {
2340
GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2341
2342
ParallelCompactData& sd = PSParallelCompact::summary_data();
2343
2344
// Iterate over all the spaces adding tasks for updating
2345
// regions in the dense prefix. Assume that 1 gc thread
2346
// will work on opening the gaps and the remaining gc threads
2347
// will work on the dense prefix.
2348
unsigned int space_id;
2349
for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2350
HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2351
const MutableSpace* const space = _space_info[space_id].space();
2352
2353
if (dense_prefix_end == space->bottom()) {
2354
// There is no dense prefix for this space.
2355
continue;
2356
}
2357
2358
// The dense prefix is before this region.
2359
size_t region_index_end_dense_prefix =
2360
sd.addr_to_region_idx(dense_prefix_end);
2361
RegionData* const dense_prefix_cp =
2362
sd.region(region_index_end_dense_prefix);
2363
assert(dense_prefix_end == space->end() ||
2364
dense_prefix_cp->available() ||
2365
dense_prefix_cp->claimed(),
2366
"The region after the dense prefix should always be ready to fill");
2367
2368
size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2369
2370
// Is there dense prefix work?
2371
size_t total_dense_prefix_regions =
2372
region_index_end_dense_prefix - region_index_start;
2373
// How many regions of the dense prefix should be given to
2374
// each thread?
2375
if (total_dense_prefix_regions > 0) {
2376
uint tasks_for_dense_prefix = 1;
2377
if (total_dense_prefix_regions <=
2378
(parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2379
// Don't over partition. This assumes that
2380
// PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2381
// so there are not many regions to process.
2382
tasks_for_dense_prefix = parallel_gc_threads;
2383
} else {
2384
// Over partition
2385
tasks_for_dense_prefix = parallel_gc_threads *
2386
PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2387
}
2388
size_t regions_per_thread = total_dense_prefix_regions /
2389
tasks_for_dense_prefix;
2390
// Give each thread at least 1 region.
2391
if (regions_per_thread == 0) {
2392
regions_per_thread = 1;
2393
}
2394
2395
for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2396
if (region_index_start >= region_index_end_dense_prefix) {
2397
break;
2398
}
2399
// region_index_end is not processed
2400
size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2401
region_index_end_dense_prefix);
2402
task_queue.push(UpdateDensePrefixTask(SpaceId(space_id),
2403
region_index_start,
2404
region_index_end));
2405
region_index_start = region_index_end;
2406
}
2407
}
2408
// This gets any part of the dense prefix that did not
2409
// fit evenly.
2410
if (region_index_start < region_index_end_dense_prefix) {
2411
task_queue.push(UpdateDensePrefixTask(SpaceId(space_id),
2412
region_index_start,
2413
region_index_end_dense_prefix));
2414
}
2415
}
2416
}
2417
2418
#ifdef ASSERT
2419
// Write a histogram of the number of times the block table was filled for a
2420
// region.
2421
void PSParallelCompact::write_block_fill_histogram()
2422
{
2423
if (!log_develop_is_enabled(Trace, gc, compaction)) {
2424
return;
2425
}
2426
2427
Log(gc, compaction) log;
2428
ResourceMark rm;
2429
LogStream ls(log.trace());
2430
outputStream* out = &ls;
2431
2432
typedef ParallelCompactData::RegionData rd_t;
2433
ParallelCompactData& sd = summary_data();
2434
2435
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2436
MutableSpace* const spc = _space_info[id].space();
2437
if (spc->bottom() != spc->top()) {
2438
const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2439
HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2440
const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2441
2442
size_t histo[5] = { 0, 0, 0, 0, 0 };
2443
const size_t histo_len = sizeof(histo) / sizeof(size_t);
2444
const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2445
2446
for (const rd_t* cur = beg; cur < end; ++cur) {
2447
++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2448
}
2449
out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2450
for (size_t i = 0; i < histo_len; ++i) {
2451
out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2452
histo[i], 100.0 * histo[i] / region_cnt);
2453
}
2454
out->cr();
2455
}
2456
}
2457
}
2458
#endif // #ifdef ASSERT
2459
2460
static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
2461
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2462
2463
ParCompactionManager* cm =
2464
ParCompactionManager::gc_thread_compaction_manager(worker_id);
2465
2466
// Drain the stacks that have been preloaded with regions
2467
// that are ready to fill.
2468
2469
cm->drain_region_stacks();
2470
2471
guarantee(cm->region_stack()->is_empty(), "Not empty");
2472
2473
size_t region_index = 0;
2474
2475
while (true) {
2476
if (ParCompactionManager::steal(worker_id, region_index)) {
2477
PSParallelCompact::fill_and_update_region(cm, region_index);
2478
cm->drain_region_stacks();
2479
} else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
2480
// Fill and update an unavailable region with the help of a shadow region
2481
PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
2482
cm->drain_region_stacks();
2483
} else {
2484
if (terminator->offer_termination()) {
2485
break;
2486
}
2487
// Go around again.
2488
}
2489
}
2490
}
2491
2492
class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
2493
TaskQueue& _tq;
2494
TaskTerminator _terminator;
2495
uint _active_workers;
2496
2497
public:
2498
UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
2499
AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
2500
_tq(tq),
2501
_terminator(active_workers, ParCompactionManager::region_task_queues()),
2502
_active_workers(active_workers) {
2503
}
2504
virtual void work(uint worker_id) {
2505
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2506
2507
for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
2508
PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
2509
task._space_id,
2510
task._region_index_start,
2511
task._region_index_end);
2512
}
2513
2514
// Once a thread has drained it's stack, it should try to steal regions from
2515
// other threads.
2516
compaction_with_stealing_work(&_terminator, worker_id);
2517
}
2518
};
2519
2520
void PSParallelCompact::compact() {
2521
GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2522
2523
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2524
PSOldGen* old_gen = heap->old_gen();
2525
old_gen->start_array()->reset();
2526
uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2527
2528
// for [0..last_space_id)
2529
// for [0..active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)
2530
// push
2531
// push
2532
//
2533
// max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)
2534
TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1));
2535
initialize_shadow_regions(active_gc_threads);
2536
prepare_region_draining_tasks(active_gc_threads);
2537
enqueue_dense_prefix_tasks(task_queue, active_gc_threads);
2538
2539
{
2540
GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2541
2542
UpdateDensePrefixAndCompactionTask task(task_queue, active_gc_threads);
2543
ParallelScavengeHeap::heap()->workers().run_task(&task);
2544
2545
#ifdef ASSERT
2546
// Verify that all regions have been processed before the deferred updates.
2547
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2548
verify_complete(SpaceId(id));
2549
}
2550
#endif
2551
}
2552
2553
{
2554
GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2555
// Update the deferred objects, if any. In principle, any compaction
2556
// manager can be used. However, since the current thread is VM thread, we
2557
// use the rightful one to keep the verification logic happy.
2558
ParCompactionManager* cm = ParCompactionManager::get_vmthread_cm();
2559
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2560
update_deferred_objects(cm, SpaceId(id));
2561
}
2562
}
2563
2564
DEBUG_ONLY(write_block_fill_histogram());
2565
}
2566
2567
#ifdef ASSERT
2568
void PSParallelCompact::verify_complete(SpaceId space_id) {
2569
// All Regions between space bottom() to new_top() should be marked as filled
2570
// and all Regions between new_top() and top() should be available (i.e.,
2571
// should have been emptied).
2572
ParallelCompactData& sd = summary_data();
2573
SpaceInfo si = _space_info[space_id];
2574
HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2575
HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2576
const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2577
const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2578
const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2579
2580
bool issued_a_warning = false;
2581
2582
size_t cur_region;
2583
for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2584
const RegionData* const c = sd.region(cur_region);
2585
if (!c->completed()) {
2586
log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u",
2587
cur_region, c->destination_count());
2588
issued_a_warning = true;
2589
}
2590
}
2591
2592
for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2593
const RegionData* const c = sd.region(cur_region);
2594
if (!c->available()) {
2595
log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u",
2596
cur_region, c->destination_count());
2597
issued_a_warning = true;
2598
}
2599
}
2600
2601
if (issued_a_warning) {
2602
print_region_ranges();
2603
}
2604
}
2605
#endif // #ifdef ASSERT
2606
2607
inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
2608
_start_array->allocate_block(addr);
2609
compaction_manager()->update_contents(cast_to_oop(addr));
2610
}
2611
2612
// Update interior oops in the ranges of regions [beg_region, end_region).
2613
void
2614
PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2615
SpaceId space_id,
2616
size_t beg_region,
2617
size_t end_region) {
2618
ParallelCompactData& sd = summary_data();
2619
ParMarkBitMap* const mbm = mark_bitmap();
2620
2621
HeapWord* beg_addr = sd.region_to_addr(beg_region);
2622
HeapWord* const end_addr = sd.region_to_addr(end_region);
2623
assert(beg_region <= end_region, "bad region range");
2624
assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2625
2626
#ifdef ASSERT
2627
// Claim the regions to avoid triggering an assert when they are marked as
2628
// filled.
2629
for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2630
assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2631
}
2632
#endif // #ifdef ASSERT
2633
2634
if (beg_addr != space(space_id)->bottom()) {
2635
// Find the first live object or block of dead space that *starts* in this
2636
// range of regions. If a partial object crosses onto the region, skip it;
2637
// it will be marked for 'deferred update' when the object head is
2638
// processed. If dead space crosses onto the region, it is also skipped; it
2639
// will be filled when the prior region is processed. If neither of those
2640
// apply, the first word in the region is the start of a live object or dead
2641
// space.
2642
assert(beg_addr > space(space_id)->bottom(), "sanity");
2643
const RegionData* const cp = sd.region(beg_region);
2644
if (cp->partial_obj_size() != 0) {
2645
beg_addr = sd.partial_obj_end(beg_region);
2646
} else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2647
beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2648
}
2649
}
2650
2651
if (beg_addr < end_addr) {
2652
// A live object or block of dead space starts in this range of Regions.
2653
HeapWord* const dense_prefix_end = dense_prefix(space_id);
2654
2655
// Create closures and iterate.
2656
UpdateOnlyClosure update_closure(mbm, cm, space_id);
2657
FillClosure fill_closure(cm, space_id);
2658
ParMarkBitMap::IterationStatus status;
2659
status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2660
dense_prefix_end);
2661
if (status == ParMarkBitMap::incomplete) {
2662
update_closure.do_addr(update_closure.source());
2663
}
2664
}
2665
2666
// Mark the regions as filled.
2667
RegionData* const beg_cp = sd.region(beg_region);
2668
RegionData* const end_cp = sd.region(end_region);
2669
for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2670
cp->set_completed();
2671
}
2672
}
2673
2674
// Return the SpaceId for the space containing addr. If addr is not in the
2675
// heap, last_space_id is returned. In debug mode it expects the address to be
2676
// in the heap and asserts such.
2677
PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2678
assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2679
2680
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2681
if (_space_info[id].space()->contains(addr)) {
2682
return SpaceId(id);
2683
}
2684
}
2685
2686
assert(false, "no space contains the addr");
2687
return last_space_id;
2688
}
2689
2690
void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2691
SpaceId id) {
2692
assert(id < last_space_id, "bad space id");
2693
2694
ParallelCompactData& sd = summary_data();
2695
const SpaceInfo* const space_info = _space_info + id;
2696
ObjectStartArray* const start_array = space_info->start_array();
2697
2698
const MutableSpace* const space = space_info->space();
2699
assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2700
HeapWord* const beg_addr = space_info->dense_prefix();
2701
HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2702
2703
const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2704
const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2705
const RegionData* cur_region;
2706
for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2707
HeapWord* const addr = cur_region->deferred_obj_addr();
2708
if (addr != NULL) {
2709
if (start_array != NULL) {
2710
start_array->allocate_block(addr);
2711
}
2712
cm->update_contents(cast_to_oop(addr));
2713
assert(oopDesc::is_oop_or_null(cast_to_oop(addr)), "Expected an oop or NULL at " PTR_FORMAT, p2i(cast_to_oop(addr)));
2714
}
2715
}
2716
}
2717
2718
// Skip over count live words starting from beg, and return the address of the
2719
// next live word. Unless marked, the word corresponding to beg is assumed to
2720
// be dead. Callers must either ensure beg does not correspond to the middle of
2721
// an object, or account for those live words in some other way. Callers must
2722
// also ensure that there are enough live words in the range [beg, end) to skip.
2723
HeapWord*
2724
PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2725
{
2726
assert(count > 0, "sanity");
2727
2728
ParMarkBitMap* m = mark_bitmap();
2729
idx_t bits_to_skip = m->words_to_bits(count);
2730
idx_t cur_beg = m->addr_to_bit(beg);
2731
const idx_t search_end = m->align_range_end(m->addr_to_bit(end));
2732
2733
do {
2734
cur_beg = m->find_obj_beg(cur_beg, search_end);
2735
idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2736
const size_t obj_bits = cur_end - cur_beg + 1;
2737
if (obj_bits > bits_to_skip) {
2738
return m->bit_to_addr(cur_beg + bits_to_skip);
2739
}
2740
bits_to_skip -= obj_bits;
2741
cur_beg = cur_end + 1;
2742
} while (bits_to_skip > 0);
2743
2744
// Skipping the desired number of words landed just past the end of an object.
2745
// Find the start of the next object.
2746
cur_beg = m->find_obj_beg(cur_beg, search_end);
2747
assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2748
return m->bit_to_addr(cur_beg);
2749
}
2750
2751
HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2752
SpaceId src_space_id,
2753
size_t src_region_idx)
2754
{
2755
assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2756
2757
const SplitInfo& split_info = _space_info[src_space_id].split_info();
2758
if (split_info.dest_region_addr() == dest_addr) {
2759
// The partial object ending at the split point contains the first word to
2760
// be copied to dest_addr.
2761
return split_info.first_src_addr();
2762
}
2763
2764
const ParallelCompactData& sd = summary_data();
2765
ParMarkBitMap* const bitmap = mark_bitmap();
2766
const size_t RegionSize = ParallelCompactData::RegionSize;
2767
2768
assert(sd.is_region_aligned(dest_addr), "not aligned");
2769
const RegionData* const src_region_ptr = sd.region(src_region_idx);
2770
const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2771
HeapWord* const src_region_destination = src_region_ptr->destination();
2772
2773
assert(dest_addr >= src_region_destination, "wrong src region");
2774
assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2775
2776
HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2777
HeapWord* const src_region_end = src_region_beg + RegionSize;
2778
2779
HeapWord* addr = src_region_beg;
2780
if (dest_addr == src_region_destination) {
2781
// Return the first live word in the source region.
2782
if (partial_obj_size == 0) {
2783
addr = bitmap->find_obj_beg(addr, src_region_end);
2784
assert(addr < src_region_end, "no objects start in src region");
2785
}
2786
return addr;
2787
}
2788
2789
// Must skip some live data.
2790
size_t words_to_skip = dest_addr - src_region_destination;
2791
assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2792
2793
if (partial_obj_size >= words_to_skip) {
2794
// All the live words to skip are part of the partial object.
2795
addr += words_to_skip;
2796
if (partial_obj_size == words_to_skip) {
2797
// Find the first live word past the partial object.
2798
addr = bitmap->find_obj_beg(addr, src_region_end);
2799
assert(addr < src_region_end, "wrong src region");
2800
}
2801
return addr;
2802
}
2803
2804
// Skip over the partial object (if any).
2805
if (partial_obj_size != 0) {
2806
words_to_skip -= partial_obj_size;
2807
addr += partial_obj_size;
2808
}
2809
2810
// Skip over live words due to objects that start in the region.
2811
addr = skip_live_words(addr, src_region_end, words_to_skip);
2812
assert(addr < src_region_end, "wrong src region");
2813
return addr;
2814
}
2815
2816
void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2817
SpaceId src_space_id,
2818
size_t beg_region,
2819
HeapWord* end_addr)
2820
{
2821
ParallelCompactData& sd = summary_data();
2822
2823
#ifdef ASSERT
2824
MutableSpace* const src_space = _space_info[src_space_id].space();
2825
HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2826
assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2827
"src_space_id does not match beg_addr");
2828
assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2829
"src_space_id does not match end_addr");
2830
#endif // #ifdef ASSERT
2831
2832
RegionData* const beg = sd.region(beg_region);
2833
RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2834
2835
// Regions up to new_top() are enqueued if they become available.
2836
HeapWord* const new_top = _space_info[src_space_id].new_top();
2837
RegionData* const enqueue_end =
2838
sd.addr_to_region_ptr(sd.region_align_up(new_top));
2839
2840
for (RegionData* cur = beg; cur < end; ++cur) {
2841
assert(cur->data_size() > 0, "region must have live data");
2842
cur->decrement_destination_count();
2843
if (cur < enqueue_end && cur->available() && cur->claim()) {
2844
if (cur->mark_normal()) {
2845
cm->push_region(sd.region(cur));
2846
} else if (cur->mark_copied()) {
2847
// Try to copy the content of the shadow region back to its corresponding
2848
// heap region if the shadow region is filled. Otherwise, the GC thread
2849
// fills the shadow region will copy the data back (see
2850
// MoveAndUpdateShadowClosure::complete_region).
2851
copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2852
ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2853
cur->set_completed();
2854
}
2855
}
2856
}
2857
}
2858
2859
size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2860
SpaceId& src_space_id,
2861
HeapWord*& src_space_top,
2862
HeapWord* end_addr)
2863
{
2864
typedef ParallelCompactData::RegionData RegionData;
2865
2866
ParallelCompactData& sd = PSParallelCompact::summary_data();
2867
const size_t region_size = ParallelCompactData::RegionSize;
2868
2869
size_t src_region_idx = 0;
2870
2871
// Skip empty regions (if any) up to the top of the space.
2872
HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2873
RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2874
HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2875
const RegionData* const top_region_ptr =
2876
sd.addr_to_region_ptr(top_aligned_up);
2877
while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2878
++src_region_ptr;
2879
}
2880
2881
if (src_region_ptr < top_region_ptr) {
2882
// The next source region is in the current space. Update src_region_idx
2883
// and the source address to match src_region_ptr.
2884
src_region_idx = sd.region(src_region_ptr);
2885
HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2886
if (src_region_addr > closure.source()) {
2887
closure.set_source(src_region_addr);
2888
}
2889
return src_region_idx;
2890
}
2891
2892
// Switch to a new source space and find the first non-empty region.
2893
unsigned int space_id = src_space_id + 1;
2894
assert(space_id < last_space_id, "not enough spaces");
2895
2896
HeapWord* const destination = closure.destination();
2897
2898
do {
2899
MutableSpace* space = _space_info[space_id].space();
2900
HeapWord* const bottom = space->bottom();
2901
const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2902
2903
// Iterate over the spaces that do not compact into themselves.
2904
if (bottom_cp->destination() != bottom) {
2905
HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2906
const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2907
2908
for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2909
if (src_cp->live_obj_size() > 0) {
2910
// Found it.
2911
assert(src_cp->destination() == destination,
2912
"first live obj in the space must match the destination");
2913
assert(src_cp->partial_obj_size() == 0,
2914
"a space cannot begin with a partial obj");
2915
2916
src_space_id = SpaceId(space_id);
2917
src_space_top = space->top();
2918
const size_t src_region_idx = sd.region(src_cp);
2919
closure.set_source(sd.region_to_addr(src_region_idx));
2920
return src_region_idx;
2921
} else {
2922
assert(src_cp->data_size() == 0, "sanity");
2923
}
2924
}
2925
}
2926
} while (++space_id < last_space_id);
2927
2928
assert(false, "no source region was found");
2929
return 0;
2930
}
2931
2932
void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2933
{
2934
typedef ParMarkBitMap::IterationStatus IterationStatus;
2935
ParMarkBitMap* const bitmap = mark_bitmap();
2936
ParallelCompactData& sd = summary_data();
2937
RegionData* const region_ptr = sd.region(region_idx);
2938
2939
// Get the source region and related info.
2940
size_t src_region_idx = region_ptr->source_region();
2941
SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2942
HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2943
HeapWord* dest_addr = sd.region_to_addr(region_idx);
2944
2945
closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2946
2947
// Adjust src_region_idx to prepare for decrementing destination counts (the
2948
// destination count is not decremented when a region is copied to itself).
2949
if (src_region_idx == region_idx) {
2950
src_region_idx += 1;
2951
}
2952
2953
if (bitmap->is_unmarked(closure.source())) {
2954
// The first source word is in the middle of an object; copy the remainder
2955
// of the object or as much as will fit. The fact that pointer updates were
2956
// deferred will be noted when the object header is processed.
2957
HeapWord* const old_src_addr = closure.source();
2958
closure.copy_partial_obj();
2959
if (closure.is_full()) {
2960
decrement_destination_counts(cm, src_space_id, src_region_idx,
2961
closure.source());
2962
region_ptr->set_deferred_obj_addr(NULL);
2963
closure.complete_region(cm, dest_addr, region_ptr);
2964
return;
2965
}
2966
2967
HeapWord* const end_addr = sd.region_align_down(closure.source());
2968
if (sd.region_align_down(old_src_addr) != end_addr) {
2969
// The partial object was copied from more than one source region.
2970
decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2971
2972
// Move to the next source region, possibly switching spaces as well. All
2973
// args except end_addr may be modified.
2974
src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2975
end_addr);
2976
}
2977
}
2978
2979
do {
2980
HeapWord* const cur_addr = closure.source();
2981
HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2982
src_space_top);
2983
IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2984
2985
if (status == ParMarkBitMap::incomplete) {
2986
// The last obj that starts in the source region does not end in the
2987
// region.
2988
assert(closure.source() < end_addr, "sanity");
2989
HeapWord* const obj_beg = closure.source();
2990
HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2991
src_space_top);
2992
HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2993
if (obj_end < range_end) {
2994
// The end was found; the entire object will fit.
2995
status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2996
assert(status != ParMarkBitMap::would_overflow, "sanity");
2997
} else {
2998
// The end was not found; the object will not fit.
2999
assert(range_end < src_space_top, "obj cannot cross space boundary");
3000
status = ParMarkBitMap::would_overflow;
3001
}
3002
}
3003
3004
if (status == ParMarkBitMap::would_overflow) {
3005
// The last object did not fit. Note that interior oop updates were
3006
// deferred, then copy enough of the object to fill the region.
3007
region_ptr->set_deferred_obj_addr(closure.destination());
3008
status = closure.copy_until_full(); // copies from closure.source()
3009
3010
decrement_destination_counts(cm, src_space_id, src_region_idx,
3011
closure.source());
3012
closure.complete_region(cm, dest_addr, region_ptr);
3013
return;
3014
}
3015
3016
if (status == ParMarkBitMap::full) {
3017
decrement_destination_counts(cm, src_space_id, src_region_idx,
3018
closure.source());
3019
region_ptr->set_deferred_obj_addr(NULL);
3020
closure.complete_region(cm, dest_addr, region_ptr);
3021
return;
3022
}
3023
3024
decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3025
3026
// Move to the next source region, possibly switching spaces as well. All
3027
// args except end_addr may be modified.
3028
src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3029
end_addr);
3030
} while (true);
3031
}
3032
3033
void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
3034
{
3035
MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
3036
fill_region(cm, cl, region_idx);
3037
}
3038
3039
void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
3040
{
3041
// Get a shadow region first
3042
ParallelCompactData& sd = summary_data();
3043
RegionData* const region_ptr = sd.region(region_idx);
3044
size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
3045
// The InvalidShadow return value indicates the corresponding heap region is available,
3046
// so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
3047
// MoveAndUpdateShadowClosure to fill the acquired shadow region.
3048
if (shadow_region == ParCompactionManager::InvalidShadow) {
3049
MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
3050
region_ptr->shadow_to_normal();
3051
return fill_region(cm, cl, region_idx);
3052
} else {
3053
MoveAndUpdateShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region);
3054
return fill_region(cm, cl, region_idx);
3055
}
3056
}
3057
3058
void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
3059
{
3060
Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
3061
}
3062
3063
bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
3064
{
3065
size_t next = cm->next_shadow_region();
3066
ParallelCompactData& sd = summary_data();
3067
size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
3068
uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
3069
3070
while (next < old_new_top) {
3071
if (sd.region(next)->mark_shadow()) {
3072
region_idx = next;
3073
return true;
3074
}
3075
next = cm->move_next_shadow_region_by(active_gc_threads);
3076
}
3077
3078
return false;
3079
}
3080
3081
// The shadow region is an optimization to address region dependencies in full GC. The basic
3082
// idea is making more regions available by temporally storing their live objects in empty
3083
// shadow regions to resolve dependencies between them and the destination regions. Therefore,
3084
// GC threads need not wait destination regions to be available before processing sources.
3085
//
3086
// A typical workflow would be:
3087
// After draining its own stack and failing to steal from others, a GC worker would pick an
3088
// unavailable region (destination count > 0) and get a shadow region. Then the worker fills
3089
// the shadow region by copying live objects from source regions of the unavailable one. Once
3090
// the unavailable region becomes available, the data in the shadow region will be copied back.
3091
// Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
3092
//
3093
// For more details, please refer to ยง4.2 of the VEE'19 paper:
3094
// Haoyu Li, Mingyu Wu, Binyu Zang, and Haibo Chen. 2019. ScissorGC: scalable and efficient
3095
// compaction for Java full garbage collection. In Proceedings of the 15th ACM SIGPLAN/SIGOPS
3096
// International Conference on Virtual Execution Environments (VEE 2019). ACM, New York, NY, USA,
3097
// 108-121. DOI: https://doi.org/10.1145/3313808.3313820
3098
void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
3099
{
3100
const ParallelCompactData& sd = PSParallelCompact::summary_data();
3101
3102
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
3103
SpaceInfo* const space_info = _space_info + id;
3104
MutableSpace* const space = space_info->space();
3105
3106
const size_t beg_region =
3107
sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
3108
const size_t end_region =
3109
sd.addr_to_region_idx(sd.region_align_down(space->end()));
3110
3111
for (size_t cur = beg_region; cur < end_region; ++cur) {
3112
ParCompactionManager::push_shadow_region(cur);
3113
}
3114
}
3115
3116
size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
3117
for (uint i = 0; i < parallel_gc_threads; i++) {
3118
ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
3119
cm->set_next_shadow_region(beg_region + i);
3120
}
3121
}
3122
3123
void PSParallelCompact::fill_blocks(size_t region_idx)
3124
{
3125
// Fill in the block table elements for the specified region. Each block
3126
// table element holds the number of live words in the region that are to the
3127
// left of the first object that starts in the block. Thus only blocks in
3128
// which an object starts need to be filled.
3129
//
3130
// The algorithm scans the section of the bitmap that corresponds to the
3131
// region, keeping a running total of the live words. When an object start is
3132
// found, if it's the first to start in the block that contains it, the
3133
// current total is written to the block table element.
3134
const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
3135
const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
3136
const size_t RegionSize = ParallelCompactData::RegionSize;
3137
3138
ParallelCompactData& sd = summary_data();
3139
const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
3140
if (partial_obj_size >= RegionSize) {
3141
return; // No objects start in this region.
3142
}
3143
3144
// Ensure the first loop iteration decides that the block has changed.
3145
size_t cur_block = sd.block_count();
3146
3147
const ParMarkBitMap* const bitmap = mark_bitmap();
3148
3149
const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
3150
assert((size_t)1 << Log2BitsPerBlock ==
3151
bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
3152
3153
size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
3154
const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
3155
size_t live_bits = bitmap->words_to_bits(partial_obj_size);
3156
beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
3157
while (beg_bit < range_end) {
3158
const size_t new_block = beg_bit >> Log2BitsPerBlock;
3159
if (new_block != cur_block) {
3160
cur_block = new_block;
3161
sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
3162
}
3163
3164
const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
3165
if (end_bit < range_end - 1) {
3166
live_bits += end_bit - beg_bit + 1;
3167
beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
3168
} else {
3169
return;
3170
}
3171
}
3172
}
3173
3174
ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3175
{
3176
if (source() != copy_destination()) {
3177
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3178
Copy::aligned_conjoint_words(source(), copy_destination(), words_remaining());
3179
}
3180
update_state(words_remaining());
3181
assert(is_full(), "sanity");
3182
return ParMarkBitMap::full;
3183
}
3184
3185
void MoveAndUpdateClosure::copy_partial_obj()
3186
{
3187
size_t words = words_remaining();
3188
3189
HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3190
HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3191
if (end_addr < range_end) {
3192
words = bitmap()->obj_size(source(), end_addr);
3193
}
3194
3195
// This test is necessary; if omitted, the pointer updates to a partial object
3196
// that crosses the dense prefix boundary could be overwritten.
3197
if (source() != copy_destination()) {
3198
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3199
Copy::aligned_conjoint_words(source(), copy_destination(), words);
3200
}
3201
update_state(words);
3202
}
3203
3204
void MoveAndUpdateClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr,
3205
PSParallelCompact::RegionData *region_ptr) {
3206
assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
3207
region_ptr->set_completed();
3208
}
3209
3210
ParMarkBitMapClosure::IterationStatus
3211
MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3212
assert(destination() != NULL, "sanity");
3213
assert(bitmap()->obj_size(addr) == words, "bad size");
3214
3215
_source = addr;
3216
assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
3217
destination(), "wrong destination");
3218
3219
if (words > words_remaining()) {
3220
return ParMarkBitMap::would_overflow;
3221
}
3222
3223
// The start_array must be updated even if the object is not moving.
3224
if (_start_array != NULL) {
3225
_start_array->allocate_block(destination());
3226
}
3227
3228
if (copy_destination() != source()) {
3229
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3230
Copy::aligned_conjoint_words(source(), copy_destination(), words);
3231
}
3232
3233
oop moved_oop = cast_to_oop(copy_destination());
3234
compaction_manager()->update_contents(moved_oop);
3235
assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3236
3237
update_state(words);
3238
assert(copy_destination() == cast_from_oop<HeapWord*>(moved_oop) + moved_oop->size(), "sanity");
3239
return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3240
}
3241
3242
void MoveAndUpdateShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr,
3243
PSParallelCompact::RegionData *region_ptr) {
3244
assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
3245
// Record the shadow region index
3246
region_ptr->set_shadow_region(_shadow);
3247
// Mark the shadow region as filled to indicate the data is ready to be
3248
// copied back
3249
region_ptr->mark_filled();
3250
// Try to copy the content of the shadow region back to its corresponding
3251
// heap region if available; the GC thread that decreases the destination
3252
// count to zero will do the copying otherwise (see
3253
// PSParallelCompact::decrement_destination_counts).
3254
if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
3255
region_ptr->set_completed();
3256
PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
3257
ParCompactionManager::push_shadow_region_mt_safe(_shadow);
3258
}
3259
}
3260
3261
UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3262
ParCompactionManager* cm,
3263
PSParallelCompact::SpaceId space_id) :
3264
ParMarkBitMapClosure(mbm, cm),
3265
_space_id(space_id),
3266
_start_array(PSParallelCompact::start_array(space_id))
3267
{
3268
}
3269
3270
// Updates the references in the object to their new values.
3271
ParMarkBitMapClosure::IterationStatus
3272
UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3273
do_addr(addr);
3274
return ParMarkBitMap::incomplete;
3275
}
3276
3277
FillClosure::FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
3278
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
3279
_start_array(PSParallelCompact::start_array(space_id))
3280
{
3281
assert(space_id == PSParallelCompact::old_space_id,
3282
"cannot use FillClosure in the young gen");
3283
}
3284
3285
ParMarkBitMapClosure::IterationStatus
3286
FillClosure::do_addr(HeapWord* addr, size_t size) {
3287
CollectedHeap::fill_with_objects(addr, size);
3288
HeapWord* const end = addr + size;
3289
do {
3290
_start_array->allocate_block(addr);
3291
addr += cast_to_oop(addr)->size();
3292
} while (addr < end);
3293
return ParMarkBitMap::incomplete;
3294
}
3295
3296