Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp
41149 views
1
/*
2
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
26
#define SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
27
28
#include "gc/parallel/psParallelCompact.hpp"
29
30
#include "gc/parallel/parallelScavengeHeap.hpp"
31
#include "gc/parallel/parMarkBitMap.inline.hpp"
32
#include "gc/shared/collectedHeap.hpp"
33
#include "oops/access.inline.hpp"
34
#include "oops/compressedOops.inline.hpp"
35
#include "oops/klass.hpp"
36
#include "oops/oop.inline.hpp"
37
38
inline bool PSParallelCompact::is_marked(oop obj) {
39
return mark_bitmap()->is_marked(obj);
40
}
41
42
inline double PSParallelCompact::normal_distribution(double density) {
43
assert(_dwl_initialized, "uninitialized");
44
const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
45
return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
46
}
47
48
inline bool PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
49
idx_t bit) {
50
assert(bit > 0, "cannot call this for the first bit/region");
51
assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
52
"sanity check");
53
54
// Dead space crosses the boundary if (1) a partial object does not extend
55
// onto the region, (2) an object does not start at the beginning of the
56
// region, and (3) an object does not end at the end of the prior region.
57
return region->partial_obj_size() == 0 &&
58
!_mark_bitmap.is_obj_beg(bit) &&
59
!_mark_bitmap.is_obj_end(bit - 1);
60
}
61
62
inline bool PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
63
return p >= beg_addr && p < end_addr;
64
}
65
66
inline bool PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
67
return is_in((HeapWord*)p, beg_addr, end_addr);
68
}
69
70
inline MutableSpace* PSParallelCompact::space(SpaceId id) {
71
assert(id < last_space_id, "id out of range");
72
return _space_info[id].space();
73
}
74
75
inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
76
assert(id < last_space_id, "id out of range");
77
return _space_info[id].new_top();
78
}
79
80
inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
81
assert(id < last_space_id, "id out of range");
82
return _space_info[id].dense_prefix();
83
}
84
85
inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
86
assert(id < last_space_id, "id out of range");
87
return _space_info[id].start_array();
88
}
89
90
#ifdef ASSERT
91
inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) {
92
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
93
"must move left or to a different space");
94
assert(is_object_aligned(old_addr) && is_object_aligned(new_addr),
95
"checking alignment");
96
}
97
#endif // ASSERT
98
99
inline bool PSParallelCompact::mark_obj(oop obj) {
100
const int obj_size = obj->size();
101
if (mark_bitmap()->mark_obj(obj, obj_size)) {
102
_summary_data.add_obj(obj, obj_size);
103
return true;
104
} else {
105
return false;
106
}
107
}
108
109
template <class T>
110
inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
111
T heap_oop = RawAccess<>::oop_load(p);
112
if (!CompressedOops::is_null(heap_oop)) {
113
oop obj = CompressedOops::decode_not_null(heap_oop);
114
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
115
116
oop new_obj = cast_to_oop(summary_data().calc_new_pointer(obj, cm));
117
assert(new_obj != NULL, "non-null address for live objects");
118
// Is it actually relocated at all?
119
if (new_obj != obj) {
120
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
121
"should be in object space");
122
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
123
}
124
}
125
}
126
127
class PCAdjustPointerClosure: public BasicOopIterateClosure {
128
public:
129
PCAdjustPointerClosure(ParCompactionManager* cm) {
130
verify_cm(cm);
131
_cm = cm;
132
}
133
template <typename T> void do_oop_nv(T* p) { PSParallelCompact::adjust_pointer(p, _cm); }
134
virtual void do_oop(oop* p) { do_oop_nv(p); }
135
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
136
137
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
138
private:
139
ParCompactionManager* _cm;
140
141
static void verify_cm(ParCompactionManager* cm) NOT_DEBUG_RETURN;
142
};
143
144
#endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
145
146