Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/test/hotspot/gtest/runtime/test_os_linux.cpp
41144 views
1
/*
2
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
26
#ifdef LINUX
27
28
#include <sys/mman.h>
29
30
#include "runtime/globals.hpp"
31
#include "runtime/os.hpp"
32
#include "utilities/align.hpp"
33
#include "concurrentTestRunner.inline.hpp"
34
#include "unittest.hpp"
35
36
namespace {
37
static void small_page_write(void* addr, size_t size) {
38
size_t page_size = os::vm_page_size();
39
40
char* end = (char*)addr + size;
41
for (char* p = (char*)addr; p < end; p += page_size) {
42
*p = 1;
43
}
44
}
45
46
class HugeTlbfsMemory : private ::os::Linux {
47
char* const _ptr;
48
const size_t _size;
49
public:
50
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
51
return os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
52
}
53
HugeTlbfsMemory(char* const ptr, size_t size) : _ptr(ptr), _size(size) { }
54
~HugeTlbfsMemory() {
55
if (_ptr != NULL) {
56
os::Linux::release_memory_special_huge_tlbfs(_ptr, _size);
57
}
58
}
59
};
60
61
class ShmMemory : private ::os::Linux {
62
char* const _ptr;
63
const size_t _size;
64
public:
65
static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
66
return os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
67
}
68
ShmMemory(char* const ptr, size_t size) : _ptr(ptr), _size(size) { }
69
~ShmMemory() {
70
os::Linux::release_memory_special_shm(_ptr, _size);
71
}
72
};
73
74
// have to use these functions, as gtest's _PRED macros don't like is_aligned
75
// nor (is_aligned<size_t, size_t>)
76
static bool is_size_aligned(size_t size, size_t alignment) {
77
return is_aligned(size, alignment);
78
}
79
static bool is_ptr_aligned(char* ptr, size_t alignment) {
80
return is_aligned(ptr, alignment);
81
}
82
83
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
84
ASSERT_TRUE(UseSHM) << "must be used only when UseSHM is true";
85
char* addr = ShmMemory::reserve_memory_special_shm(size, alignment, NULL, false);
86
if (addr != NULL) {
87
ShmMemory mr(addr, size);
88
EXPECT_PRED2(is_ptr_aligned, addr, alignment);
89
EXPECT_PRED2(is_ptr_aligned, addr, os::large_page_size());
90
91
small_page_write(addr, size);
92
}
93
}
94
}
95
96
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_aligned) {
97
if (!UseHugeTLBFS) {
98
return;
99
}
100
size_t lp = os::large_page_size();
101
102
for (size_t size = lp; size <= lp * 10; size += lp) {
103
char* addr = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, lp, lp, NULL, false);
104
105
if (addr != NULL) {
106
HugeTlbfsMemory mr(addr, size);
107
small_page_write(addr, size);
108
}
109
}
110
}
111
112
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_not_aligned_without_addr) {
113
if (!UseHugeTLBFS) {
114
return;
115
}
116
size_t lp = os::large_page_size();
117
size_t ag = os::vm_allocation_granularity();
118
119
// sizes to test
120
const size_t sizes[] = {
121
lp, lp + ag, lp + lp / 2, lp * 2,
122
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
123
lp * 10, lp * 10 + lp / 2
124
};
125
const int num_sizes = sizeof(sizes) / sizeof(size_t);
126
for (int i = 0; i < num_sizes; i++) {
127
const size_t size = sizes[i];
128
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
129
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, alignment, lp, NULL, false);
130
if (p != NULL) {
131
HugeTlbfsMemory mr(p, size);
132
EXPECT_PRED2(is_ptr_aligned, p, alignment) << " size = " << size;
133
small_page_write(p, size);
134
}
135
}
136
}
137
}
138
139
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_not_aligned_with_good_req_addr) {
140
if (!UseHugeTLBFS) {
141
return;
142
}
143
size_t lp = os::large_page_size();
144
size_t ag = os::vm_allocation_granularity();
145
146
// sizes to test
147
const size_t sizes[] = {
148
lp, lp + ag, lp + lp / 2, lp * 2,
149
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
150
lp * 10, lp * 10 + lp / 2
151
};
152
const int num_sizes = sizeof(sizes) / sizeof(size_t);
153
154
// Pre-allocate an area as large as the largest allocation
155
// and aligned to the largest alignment we will be testing.
156
const size_t mapping_size = sizes[num_sizes - 1] * 2;
157
char* const mapping = (char*) ::mmap(NULL, mapping_size,
158
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
159
-1, 0);
160
ASSERT_TRUE(mapping != MAP_FAILED) << " mmap failed, mapping_size = " << mapping_size;
161
// Unmap the mapping, it will serve as a value for a "good" req_addr
162
::munmap(mapping, mapping_size);
163
164
for (int i = 0; i < num_sizes; i++) {
165
const size_t size = sizes[i];
166
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
167
// req_addr must be at least large page aligned.
168
char* const req_addr = align_up(mapping, MAX2(alignment, lp));
169
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, alignment, lp, req_addr, false);
170
if (p != NULL) {
171
HugeTlbfsMemory mr(p, size);
172
ASSERT_EQ(req_addr, p) << " size = " << size << ", alignment = " << alignment;
173
small_page_write(p, size);
174
}
175
}
176
}
177
}
178
179
180
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_not_aligned_with_bad_req_addr) {
181
if (!UseHugeTLBFS) {
182
return;
183
}
184
size_t lp = os::large_page_size();
185
size_t ag = os::vm_allocation_granularity();
186
187
// sizes to test
188
const size_t sizes[] = {
189
lp, lp + ag, lp + lp / 2, lp * 2,
190
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
191
lp * 10, lp * 10 + lp / 2
192
};
193
const int num_sizes = sizeof(sizes) / sizeof(size_t);
194
195
// Pre-allocate an area as large as the largest allocation
196
// and aligned to the largest alignment we will be testing.
197
const size_t mapping_size = sizes[num_sizes - 1] * 2;
198
char* const mapping = (char*) ::mmap(NULL, mapping_size,
199
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
200
-1, 0);
201
ASSERT_TRUE(mapping != MAP_FAILED) << " mmap failed, mapping_size = " << mapping_size;
202
// Leave the mapping intact, it will server as "bad" req_addr
203
204
class MappingHolder {
205
char* const _mapping;
206
size_t _size;
207
public:
208
MappingHolder(char* mapping, size_t size) : _mapping(mapping), _size(size) { }
209
~MappingHolder() {
210
::munmap(_mapping, _size);
211
}
212
} holder(mapping, mapping_size);
213
214
for (int i = 0; i < num_sizes; i++) {
215
const size_t size = sizes[i];
216
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
217
// req_addr must be at least large page aligned.
218
char* const req_addr = align_up(mapping, MAX2(alignment, lp));
219
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, alignment, lp, req_addr, false);
220
HugeTlbfsMemory mr(p, size);
221
// as the area around req_addr contains already existing mappings, the API should always
222
// return NULL (as per contract, it cannot return another address)
223
EXPECT_TRUE(p == NULL) << " size = " << size
224
<< ", alignment = " << alignment
225
<< ", req_addr = " << req_addr
226
<< ", p = " << p;
227
}
228
}
229
}
230
231
TEST_VM(os_linux, reserve_memory_special_shm) {
232
if (!UseSHM) {
233
return;
234
}
235
size_t lp = os::large_page_size();
236
size_t ag = os::vm_allocation_granularity();
237
238
for (size_t size = ag; size < lp * 3; size += ag) {
239
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
240
EXPECT_NO_FATAL_FAILURE(test_reserve_memory_special_shm(size, alignment));
241
}
242
}
243
}
244
245
class TestReserveMemorySpecial : AllStatic {
246
public:
247
static void small_page_write(void* addr, size_t size) {
248
size_t page_size = os::vm_page_size();
249
250
char* end = (char*)addr + size;
251
for (char* p = (char*)addr; p < end; p += page_size) {
252
*p = 1;
253
}
254
}
255
256
static void test_reserve_memory_special_huge_tlbfs_size_aligned(size_t size, size_t alignment, size_t page_size) {
257
if (!UseHugeTLBFS) {
258
return;
259
}
260
261
char* addr = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, page_size, NULL, false);
262
263
if (addr != NULL) {
264
small_page_write(addr, size);
265
266
os::Linux::release_memory_special_huge_tlbfs(addr, size);
267
}
268
}
269
270
static void test_reserve_memory_special_huge_tlbfs_size_aligned() {
271
if (!UseHugeTLBFS) {
272
return;
273
}
274
275
size_t lp = os::large_page_size();
276
277
for (size_t size = lp; size <= lp * 10; size += lp) {
278
test_reserve_memory_special_huge_tlbfs_size_aligned(size, lp, lp);
279
}
280
}
281
282
static void test_reserve_memory_special_huge_tlbfs_size_not_aligned() {
283
size_t lp = os::large_page_size();
284
size_t ag = os::vm_allocation_granularity();
285
286
// sizes to test
287
const size_t sizes[] = {
288
lp, lp + ag, lp + lp / 2, lp * 2,
289
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
290
lp * 10, lp * 10 + lp / 2
291
};
292
const int num_sizes = sizeof(sizes) / sizeof(size_t);
293
294
// For each size/alignment combination, we test three scenarios:
295
// 1) with req_addr == NULL
296
// 2) with a non-null req_addr at which we expect to successfully allocate
297
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
298
// expect the allocation to either fail or to ignore req_addr
299
300
// Pre-allocate two areas; they shall be as large as the largest allocation
301
// and aligned to the largest alignment we will be testing.
302
const size_t mapping_size = sizes[num_sizes - 1] * 2;
303
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
304
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
305
-1, 0);
306
EXPECT_NE(mapping1, MAP_FAILED);
307
308
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
309
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
310
-1, 0);
311
EXPECT_NE(mapping2, MAP_FAILED);
312
313
// Unmap the first mapping, but leave the second mapping intact: the first
314
// mapping will serve as a value for a "good" req_addr (case 2). The second
315
// mapping, still intact, as "bad" req_addr (case 3).
316
::munmap(mapping1, mapping_size);
317
318
// Case 1
319
for (int i = 0; i < num_sizes; i++) {
320
const size_t size = sizes[i];
321
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
322
char* p = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, lp, NULL, false);
323
if (p != NULL) {
324
EXPECT_TRUE(is_aligned(p, alignment));
325
small_page_write(p, size);
326
os::Linux::release_memory_special_huge_tlbfs(p, size);
327
}
328
}
329
}
330
331
// Case 2
332
for (int i = 0; i < num_sizes; i++) {
333
const size_t size = sizes[i];
334
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
335
// req_addr must be at least large page aligned.
336
char* const req_addr = align_up(mapping1, MAX2(alignment, lp));
337
char* p = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, lp, req_addr, false);
338
if (p != NULL) {
339
EXPECT_EQ(p, req_addr);
340
small_page_write(p, size);
341
os::Linux::release_memory_special_huge_tlbfs(p, size);
342
}
343
}
344
}
345
346
// Case 3
347
for (int i = 0; i < num_sizes; i++) {
348
const size_t size = sizes[i];
349
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
350
// req_addr must be at least large page aligned.
351
char* const req_addr = align_up(mapping2, MAX2(alignment, lp));
352
char* p = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, lp, req_addr, false);
353
// as the area around req_addr contains already existing mappings, the API should always
354
// return NULL (as per contract, it cannot return another address)
355
EXPECT_TRUE(p == NULL);
356
}
357
}
358
359
::munmap(mapping2, mapping_size);
360
361
}
362
363
static void test_reserve_memory_special_huge_tlbfs() {
364
if (!UseHugeTLBFS) {
365
return;
366
}
367
368
test_reserve_memory_special_huge_tlbfs_size_aligned();
369
test_reserve_memory_special_huge_tlbfs_size_not_aligned();
370
}
371
372
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
373
if (!UseSHM) {
374
return;
375
}
376
377
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
378
379
if (addr != NULL) {
380
EXPECT_TRUE(is_aligned(addr, alignment));
381
EXPECT_TRUE(is_aligned(addr, os::large_page_size()));
382
383
small_page_write(addr, size);
384
385
os::Linux::release_memory_special_shm(addr, size);
386
}
387
}
388
389
static void test_reserve_memory_special_shm() {
390
size_t lp = os::large_page_size();
391
size_t ag = os::vm_allocation_granularity();
392
393
for (size_t size = ag; size < lp * 3; size += ag) {
394
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
395
test_reserve_memory_special_shm(size, alignment);
396
}
397
}
398
}
399
400
static void test() {
401
test_reserve_memory_special_huge_tlbfs();
402
test_reserve_memory_special_shm();
403
}
404
};
405
406
TEST_VM(os_linux, reserve_memory_special) {
407
TestReserveMemorySpecial::test();
408
}
409
410
class ReserveMemorySpecialRunnable : public TestRunnable {
411
public:
412
void runUnitTest() const {
413
TestReserveMemorySpecial::test();
414
}
415
};
416
417
TEST_VM(os_linux, reserve_memory_special_concurrent) {
418
if (UseLargePages) {
419
ReserveMemorySpecialRunnable runnable;
420
ConcurrentTestRunner testRunner(&runnable, 5, 3000);
421
testRunner.run();
422
}
423
}
424
425
#endif
426
427