Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/serial/defNewGeneration.hpp
41149 views
1
/*
2
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
26
#define SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
27
28
#include "gc/serial/cSpaceCounters.hpp"
29
#include "gc/shared/ageTable.hpp"
30
#include "gc/shared/copyFailedInfo.hpp"
31
#include "gc/shared/generation.hpp"
32
#include "gc/shared/generationCounters.hpp"
33
#include "gc/shared/preservedMarks.hpp"
34
#include "gc/shared/tlab_globals.hpp"
35
#include "utilities/align.hpp"
36
#include "utilities/stack.hpp"
37
38
class ContiguousSpace;
39
class CSpaceCounters;
40
class DefNewYoungerGenClosure;
41
class DefNewScanClosure;
42
class ScanWeakRefClosure;
43
class SerialHeap;
44
class STWGCTimer;
45
46
// DefNewGeneration is a young generation containing eden, from- and
47
// to-space.
48
49
class DefNewGeneration: public Generation {
50
friend class VMStructs;
51
52
protected:
53
Generation* _old_gen;
54
uint _tenuring_threshold; // Tenuring threshold for next collection.
55
AgeTable _age_table;
56
// Size of object to pretenure in words; command line provides bytes
57
size_t _pretenure_size_threshold_words;
58
59
AgeTable* age_table() { return &_age_table; }
60
61
// Initialize state to optimistically assume no promotion failure will
62
// happen.
63
void init_assuming_no_promotion_failure();
64
// True iff a promotion has failed in the current collection.
65
bool _promotion_failed;
66
bool promotion_failed() { return _promotion_failed; }
67
PromotionFailedInfo _promotion_failed_info;
68
69
// Handling promotion failure. A young generation collection
70
// can fail if a live object cannot be copied out of its
71
// location in eden or from-space during the collection. If
72
// a collection fails, the young generation is left in a
73
// consistent state such that it can be collected by a
74
// full collection.
75
// Before the collection
76
// Objects are in eden or from-space
77
// All roots into the young generation point into eden or from-space.
78
//
79
// After a failed collection
80
// Objects may be in eden, from-space, or to-space
81
// An object A in eden or from-space may have a copy B
82
// in to-space. If B exists, all roots that once pointed
83
// to A must now point to B.
84
// All objects in the young generation are unmarked.
85
// Eden, from-space, and to-space will all be collected by
86
// the full collection.
87
void handle_promotion_failure(oop);
88
89
// In the absence of promotion failure, we wouldn't look at "from-space"
90
// objects after a young-gen collection. When promotion fails, however,
91
// the subsequent full collection will look at from-space objects:
92
// therefore we must remove their forwarding pointers.
93
void remove_forwarding_pointers();
94
95
virtual void restore_preserved_marks();
96
97
// Preserved marks
98
PreservedMarksSet _preserved_marks_set;
99
100
// Promotion failure handling
101
OopIterateClosure *_promo_failure_scan_stack_closure;
102
void set_promo_failure_scan_stack_closure(OopIterateClosure *scan_stack_closure) {
103
_promo_failure_scan_stack_closure = scan_stack_closure;
104
}
105
106
Stack<oop, mtGC> _promo_failure_scan_stack;
107
void drain_promo_failure_scan_stack(void);
108
bool _promo_failure_drain_in_progress;
109
110
// Performance Counters
111
GenerationCounters* _gen_counters;
112
CSpaceCounters* _eden_counters;
113
CSpaceCounters* _from_counters;
114
CSpaceCounters* _to_counters;
115
116
// sizing information
117
size_t _max_eden_size;
118
size_t _max_survivor_size;
119
120
// Allocation support
121
bool _should_allocate_from_space;
122
bool should_allocate_from_space() const {
123
return _should_allocate_from_space;
124
}
125
void clear_should_allocate_from_space() {
126
_should_allocate_from_space = false;
127
}
128
void set_should_allocate_from_space() {
129
_should_allocate_from_space = true;
130
}
131
132
// Tenuring
133
void adjust_desired_tenuring_threshold();
134
135
// Spaces
136
ContiguousSpace* _eden_space;
137
ContiguousSpace* _from_space;
138
ContiguousSpace* _to_space;
139
140
STWGCTimer* _gc_timer;
141
142
enum SomeProtectedConstants {
143
// Generations are GenGrain-aligned and have size that are multiples of
144
// GenGrain.
145
MinFreeScratchWords = 100
146
};
147
148
// Return the size of a survivor space if this generation were of size
149
// gen_size.
150
size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
151
size_t n = gen_size / (SurvivorRatio + 2);
152
return n > alignment ? align_down(n, alignment) : alignment;
153
}
154
155
public: // was "protected" but caused compile error on win32
156
class IsAliveClosure: public BoolObjectClosure {
157
Generation* _young_gen;
158
public:
159
IsAliveClosure(Generation* young_gen);
160
bool do_object_b(oop p);
161
};
162
163
class KeepAliveClosure: public OopClosure {
164
protected:
165
ScanWeakRefClosure* _cl;
166
CardTableRS* _rs;
167
template <class T> void do_oop_work(T* p);
168
public:
169
KeepAliveClosure(ScanWeakRefClosure* cl);
170
virtual void do_oop(oop* p);
171
virtual void do_oop(narrowOop* p);
172
};
173
174
class FastKeepAliveClosure: public KeepAliveClosure {
175
protected:
176
HeapWord* _boundary;
177
template <class T> void do_oop_work(T* p);
178
public:
179
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
180
virtual void do_oop(oop* p);
181
virtual void do_oop(narrowOop* p);
182
};
183
184
class FastEvacuateFollowersClosure: public VoidClosure {
185
SerialHeap* _heap;
186
DefNewScanClosure* _scan_cur_or_nonheap;
187
DefNewYoungerGenClosure* _scan_older;
188
public:
189
FastEvacuateFollowersClosure(SerialHeap* heap,
190
DefNewScanClosure* cur,
191
DefNewYoungerGenClosure* older);
192
void do_void();
193
};
194
195
public:
196
DefNewGeneration(ReservedSpace rs,
197
size_t initial_byte_size,
198
size_t min_byte_size,
199
size_t max_byte_size,
200
const char* policy="Serial young collection pauses");
201
202
virtual void ref_processor_init();
203
204
virtual Generation::Name kind() { return Generation::DefNew; }
205
206
// Accessing spaces
207
ContiguousSpace* eden() const { return _eden_space; }
208
ContiguousSpace* from() const { return _from_space; }
209
ContiguousSpace* to() const { return _to_space; }
210
211
virtual CompactibleSpace* first_compaction_space() const;
212
213
// Space enquiries
214
size_t capacity() const;
215
size_t used() const;
216
size_t free() const;
217
size_t max_capacity() const;
218
size_t capacity_before_gc() const;
219
size_t unsafe_max_alloc_nogc() const;
220
size_t contiguous_available() const;
221
222
size_t max_eden_size() const { return _max_eden_size; }
223
size_t max_survivor_size() const { return _max_survivor_size; }
224
225
bool supports_inline_contig_alloc() const { return true; }
226
HeapWord* volatile* top_addr() const;
227
HeapWord** end_addr() const;
228
229
// Thread-local allocation buffers
230
bool supports_tlab_allocation() const { return true; }
231
size_t tlab_capacity() const;
232
size_t tlab_used() const;
233
size_t unsafe_max_tlab_alloc() const;
234
235
// Grow the generation by the specified number of bytes.
236
// The size of bytes is assumed to be properly aligned.
237
// Return true if the expansion was successful.
238
bool expand(size_t bytes);
239
240
// DefNewGeneration cannot currently expand except at
241
// a GC.
242
virtual bool is_maximal_no_gc() const { return true; }
243
244
// Iteration
245
void object_iterate(ObjectClosure* blk);
246
247
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
248
249
// Allocation support
250
virtual bool should_allocate(size_t word_size, bool is_tlab) {
251
assert(UseTLAB || !is_tlab, "Should not allocate tlab");
252
253
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
254
255
const bool non_zero = word_size > 0;
256
const bool overflows = word_size >= overflow_limit;
257
const bool check_too_big = _pretenure_size_threshold_words > 0;
258
const bool not_too_big = word_size < _pretenure_size_threshold_words;
259
const bool size_ok = is_tlab || !check_too_big || not_too_big;
260
261
bool result = !overflows &&
262
non_zero &&
263
size_ok;
264
265
return result;
266
}
267
268
HeapWord* allocate(size_t word_size, bool is_tlab);
269
HeapWord* allocate_from_space(size_t word_size);
270
271
HeapWord* par_allocate(size_t word_size, bool is_tlab);
272
273
virtual void gc_epilogue(bool full);
274
275
// Save the tops for eden, from, and to
276
virtual void record_spaces_top();
277
278
// Accessing marks
279
void save_marks();
280
void reset_saved_marks();
281
bool no_allocs_since_save_marks();
282
283
// Need to declare the full complement of closures, whether we'll
284
// override them or not, or get message from the compiler:
285
// oop_since_save_marks_iterate_nv hides virtual function...
286
template <typename OopClosureType>
287
void oop_since_save_marks_iterate(OopClosureType* cl);
288
289
// For non-youngest collection, the DefNewGeneration can contribute
290
// "to-space".
291
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
292
size_t max_alloc_words);
293
294
// Reset for contribution of "to-space".
295
virtual void reset_scratch();
296
297
// GC support
298
virtual void compute_new_size();
299
300
// Returns true if the collection is likely to be safely
301
// completed. Even if this method returns true, a collection
302
// may not be guaranteed to succeed, and the system should be
303
// able to safely unwind and recover from that failure, albeit
304
// at some additional cost. Override superclass's implementation.
305
virtual bool collection_attempt_is_safe();
306
307
virtual void collect(bool full,
308
bool clear_all_soft_refs,
309
size_t size,
310
bool is_tlab);
311
HeapWord* expand_and_allocate(size_t size,
312
bool is_tlab,
313
bool parallel = false);
314
315
oop copy_to_survivor_space(oop old);
316
uint tenuring_threshold() { return _tenuring_threshold; }
317
318
// Performance Counter support
319
void update_counters();
320
321
// Printing
322
virtual const char* name() const;
323
virtual const char* short_name() const { return "DefNew"; }
324
325
void print_on(outputStream* st) const;
326
327
void verify();
328
329
bool promo_failure_scan_is_complete() const {
330
return _promo_failure_scan_stack.is_empty();
331
}
332
333
protected:
334
// If clear_space is true, clear the survivor spaces. Eden is
335
// cleared if the minimum size of eden is 0. If mangle_space
336
// is true, also mangle the space in debug mode.
337
void compute_space_boundaries(uintx minimum_eden_size,
338
bool clear_space,
339
bool mangle_space);
340
341
// Return adjusted new size for NewSizeThreadIncrease.
342
// If any overflow happens, revert to previous new size.
343
size_t adjust_for_thread_increase(size_t new_size_candidate,
344
size_t new_size_before,
345
size_t alignment) const;
346
347
348
// Scavenge support
349
void swap_spaces();
350
};
351
352
#endif // SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
353
354