Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/parallel/psCompactionManager.cpp
41149 views
1
/*
2
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/parallel/objectStartArray.hpp"
27
#include "gc/parallel/parMarkBitMap.inline.hpp"
28
#include "gc/parallel/parallelScavengeHeap.hpp"
29
#include "gc/parallel/psCompactionManager.inline.hpp"
30
#include "gc/parallel/psOldGen.hpp"
31
#include "gc/parallel/psParallelCompact.inline.hpp"
32
#include "gc/shared/taskqueue.inline.hpp"
33
#include "logging/log.hpp"
34
#include "memory/iterator.inline.hpp"
35
#include "oops/access.inline.hpp"
36
#include "oops/compressedOops.inline.hpp"
37
#include "oops/instanceKlass.inline.hpp"
38
#include "oops/instanceMirrorKlass.inline.hpp"
39
#include "oops/objArrayKlass.inline.hpp"
40
#include "oops/oop.inline.hpp"
41
42
PSOldGen* ParCompactionManager::_old_gen = NULL;
43
ParCompactionManager** ParCompactionManager::_manager_array = NULL;
44
45
ParCompactionManager::OopTaskQueueSet* ParCompactionManager::_oop_task_queues = NULL;
46
ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = NULL;
47
ParCompactionManager::RegionTaskQueueSet* ParCompactionManager::_region_task_queues = NULL;
48
49
ObjectStartArray* ParCompactionManager::_start_array = NULL;
50
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
51
GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL;
52
Monitor* ParCompactionManager::_shadow_region_monitor = NULL;
53
54
ParCompactionManager::ParCompactionManager() {
55
56
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
57
58
_old_gen = heap->old_gen();
59
_start_array = old_gen()->start_array();
60
61
marking_stack()->initialize();
62
_objarray_stack.initialize();
63
_region_stack.initialize();
64
65
reset_bitmap_query_cache();
66
}
67
68
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
69
assert(ParallelScavengeHeap::heap() != NULL,
70
"Needed for initialization");
71
72
_mark_bitmap = mbm;
73
74
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
75
76
assert(_manager_array == NULL, "Attempt to initialize twice");
77
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
78
79
_oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
80
_objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
81
_region_task_queues = new RegionTaskQueueSet(parallel_gc_threads);
82
83
// Create and register the ParCompactionManager(s) for the worker threads.
84
for(uint i=0; i<parallel_gc_threads; i++) {
85
_manager_array[i] = new ParCompactionManager();
86
oop_task_queues()->register_queue(i, _manager_array[i]->marking_stack());
87
_objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
88
region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
89
}
90
91
// The VMThread gets its own ParCompactionManager, which is not available
92
// for work stealing.
93
_manager_array[parallel_gc_threads] = new ParCompactionManager();
94
assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
95
"Not initialized?");
96
97
_shadow_region_array = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t >(10, mtGC);
98
99
_shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor",
100
Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
101
}
102
103
void ParCompactionManager::reset_all_bitmap_query_caches() {
104
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
105
for (uint i=0; i<=parallel_gc_threads; i++) {
106
_manager_array[i]->reset_bitmap_query_cache();
107
}
108
}
109
110
111
ParCompactionManager*
112
ParCompactionManager::gc_thread_compaction_manager(uint index) {
113
assert(index < ParallelGCThreads, "index out of range");
114
assert(_manager_array != NULL, "Sanity");
115
return _manager_array[index];
116
}
117
118
void ParCompactionManager::follow_marking_stacks() {
119
do {
120
// Drain the overflow stack first, to allow stealing from the marking stack.
121
oop obj;
122
while (marking_stack()->pop_overflow(obj)) {
123
follow_contents(obj);
124
}
125
while (marking_stack()->pop_local(obj)) {
126
follow_contents(obj);
127
}
128
129
// Process ObjArrays one at a time to avoid marking stack bloat.
130
ObjArrayTask task;
131
if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
132
follow_array((objArrayOop)task.obj(), task.index());
133
}
134
} while (!marking_stacks_empty());
135
136
assert(marking_stacks_empty(), "Sanity");
137
}
138
139
void ParCompactionManager::drain_region_stacks() {
140
do {
141
// Drain overflow stack first so other threads can steal.
142
size_t region_index;
143
while (region_stack()->pop_overflow(region_index)) {
144
PSParallelCompact::fill_and_update_region(this, region_index);
145
}
146
147
while (region_stack()->pop_local(region_index)) {
148
PSParallelCompact::fill_and_update_region(this, region_index);
149
}
150
} while (!region_stack()->is_empty());
151
}
152
153
size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) {
154
MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
155
while (true) {
156
if (!_shadow_region_array->is_empty()) {
157
return _shadow_region_array->pop();
158
}
159
// Check if the corresponding heap region is available now.
160
// If so, we don't need to get a shadow region anymore, and
161
// we return InvalidShadow to indicate such a case.
162
if (region_ptr->claimed()) {
163
return InvalidShadow;
164
}
165
ml.wait(1);
166
}
167
}
168
169
void ParCompactionManager::push_shadow_region_mt_safe(size_t shadow_region) {
170
MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
171
_shadow_region_array->push(shadow_region);
172
ml.notify();
173
}
174
175
void ParCompactionManager::push_shadow_region(size_t shadow_region) {
176
_shadow_region_array->push(shadow_region);
177
}
178
179
void ParCompactionManager::remove_all_shadow_regions() {
180
_shadow_region_array->clear();
181
}
182
183
#ifdef ASSERT
184
void ParCompactionManager::verify_all_marking_stack_empty() {
185
uint parallel_gc_threads = ParallelGCThreads;
186
for (uint i = 0; i <= parallel_gc_threads; i++) {
187
assert(_manager_array[i]->marking_stacks_empty(), "Marking stack should be empty");
188
}
189
}
190
191
void ParCompactionManager::verify_all_region_stack_empty() {
192
uint parallel_gc_threads = ParallelGCThreads;
193
for (uint i = 0; i <= parallel_gc_threads; i++) {
194
assert(_manager_array[i]->region_stack()->is_empty(), "Region stack should be empty");
195
}
196
}
197
#endif
198
199