Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
41144 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
// no precompiled headers
27
#include "jvm.h"
28
#include "asm/macroAssembler.hpp"
29
#include "classfile/vmSymbols.hpp"
30
#include "code/codeCache.hpp"
31
#include "code/icBuffer.hpp"
32
#include "code/vtableStubs.hpp"
33
#include "code/nativeInst.hpp"
34
#include "interpreter/interpreter.hpp"
35
#include "memory/allocation.inline.hpp"
36
#include "os_share_linux.hpp"
37
#include "prims/jniFastGetField.hpp"
38
#include "prims/jvm_misc.hpp"
39
#include "runtime/arguments.hpp"
40
#include "runtime/frame.inline.hpp"
41
#include "runtime/interfaceSupport.inline.hpp"
42
#include "runtime/java.hpp"
43
#include "runtime/javaCalls.hpp"
44
#include "runtime/mutexLocker.hpp"
45
#include "runtime/osThread.hpp"
46
#include "runtime/safepointMechanism.hpp"
47
#include "runtime/sharedRuntime.hpp"
48
#include "runtime/stubRoutines.hpp"
49
#include "runtime/thread.inline.hpp"
50
#include "runtime/timer.hpp"
51
#include "signals_posix.hpp"
52
#include "utilities/debug.hpp"
53
#include "utilities/events.hpp"
54
#include "utilities/vmError.hpp"
55
56
// put OS-includes here
57
# include <sys/types.h>
58
# include <sys/mman.h>
59
# include <pthread.h>
60
# include <signal.h>
61
# include <errno.h>
62
# include <dlfcn.h>
63
# include <stdlib.h>
64
# include <stdio.h>
65
# include <unistd.h>
66
# include <sys/resource.h>
67
# include <pthread.h>
68
# include <sys/stat.h>
69
# include <sys/time.h>
70
# include <sys/utsname.h>
71
# include <sys/socket.h>
72
# include <sys/wait.h>
73
# include <pwd.h>
74
# include <poll.h>
75
# include <ucontext.h>
76
77
#define REG_FP 29
78
#define REG_LR 30
79
80
NOINLINE address os::current_stack_pointer() {
81
return (address)__builtin_frame_address(0);
82
}
83
84
char* os::non_memory_address_word() {
85
// Must never look like an address returned by reserve_memory,
86
// even in its subfields (as defined by the CPU immediate fields,
87
// if the CPU splits constants across multiple instructions).
88
89
return (char*) 0xffffffffffff;
90
}
91
92
address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
93
return (address)uc->uc_mcontext.pc;
94
}
95
96
void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
97
uc->uc_mcontext.pc = (intptr_t)pc;
98
}
99
100
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
101
return (intptr_t*)uc->uc_mcontext.sp;
102
}
103
104
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
105
return (intptr_t*)uc->uc_mcontext.regs[REG_FP];
106
}
107
108
address os::fetch_frame_from_context(const void* ucVoid,
109
intptr_t** ret_sp, intptr_t** ret_fp) {
110
111
address epc;
112
const ucontext_t* uc = (const ucontext_t*)ucVoid;
113
114
if (uc != NULL) {
115
epc = os::Posix::ucontext_get_pc(uc);
116
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
117
if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
118
} else {
119
epc = NULL;
120
if (ret_sp) *ret_sp = (intptr_t *)NULL;
121
if (ret_fp) *ret_fp = (intptr_t *)NULL;
122
}
123
124
return epc;
125
}
126
127
frame os::fetch_frame_from_context(const void* ucVoid) {
128
intptr_t* sp;
129
intptr_t* fp;
130
address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
131
return frame(sp, fp, epc);
132
}
133
134
frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
135
const ucontext_t* uc = (const ucontext_t*)ucVoid;
136
// In compiled code, the stack banging is performed before LR
137
// has been saved in the frame. LR is live, and SP and FP
138
// belong to the caller.
139
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
140
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
141
address pc = (address)(uc->uc_mcontext.regs[REG_LR]
142
- NativeInstruction::instruction_size);
143
return frame(sp, fp, pc);
144
}
145
146
// By default, gcc always saves frame pointer rfp on this stack. This
147
// may get turned off by -fomit-frame-pointer.
148
frame os::get_sender_for_C_frame(frame* fr) {
149
return frame(fr->link(), fr->link(), fr->sender_pc());
150
}
151
152
NOINLINE frame os::current_frame() {
153
intptr_t *fp = *(intptr_t **)__builtin_frame_address(0);
154
frame myframe((intptr_t*)os::current_stack_pointer(),
155
(intptr_t*)fp,
156
CAST_FROM_FN_PTR(address, os::current_frame));
157
if (os::is_first_C_frame(&myframe)) {
158
// stack is not walkable
159
return frame();
160
} else {
161
return os::get_sender_for_C_frame(&myframe);
162
}
163
}
164
165
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
166
ucontext_t* uc, JavaThread* thread) {
167
168
/*
169
NOTE: does not seem to work on linux.
170
if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
171
// can't decode this kind of signal
172
info = NULL;
173
} else {
174
assert(sig == info->si_signo, "bad siginfo");
175
}
176
*/
177
// decide if this trap can be handled by a stub
178
address stub = NULL;
179
180
address pc = NULL;
181
182
//%note os_trap_1
183
if (info != NULL && uc != NULL && thread != NULL) {
184
pc = (address) os::Posix::ucontext_get_pc(uc);
185
186
address addr = (address) info->si_addr;
187
188
// Make sure the high order byte is sign extended, as it may be masked away by the hardware.
189
if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
190
addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
191
}
192
193
// Handle ALL stack overflow variations here
194
if (sig == SIGSEGV) {
195
// check if fault address is within thread stack
196
if (thread->is_in_full_stack(addr)) {
197
if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
198
return true; // continue
199
}
200
}
201
}
202
203
if (thread->thread_state() == _thread_in_Java) {
204
// Java thread running in Java code => find exception handler if any
205
// a fault inside compiled code, the interpreter, or a stub
206
207
// Handle signal from NativeJump::patch_verified_entry().
208
if ((sig == SIGILL || sig == SIGTRAP)
209
&& nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
210
if (TraceTraps) {
211
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
212
}
213
stub = SharedRuntime::get_handle_wrong_method_stub();
214
} else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
215
stub = SharedRuntime::get_poll_stub(pc);
216
} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
217
// BugId 4454115: A read from a MappedByteBuffer can fault
218
// here if the underlying file has been truncated.
219
// Do not crash the VM in such a case.
220
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
221
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
222
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
223
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
224
address next_pc = pc + NativeCall::instruction_size;
225
if (is_unsafe_arraycopy) {
226
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
227
}
228
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
229
}
230
} else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
231
// Pull a pointer to the error message out of the instruction
232
// stream.
233
const uint64_t *detail_msg_ptr
234
= (uint64_t*)(pc + NativeInstruction::instruction_size);
235
const char *detail_msg = (const char *)*detail_msg_ptr;
236
const char *msg = "stop";
237
if (TraceTraps) {
238
tty->print_cr("trap: %s: (SIGILL)", msg);
239
}
240
241
// End life with a fatal error, message and detail message and the context.
242
// Note: no need to do any post-processing here (e.g. signal chaining)
243
va_list va_dummy;
244
VMError::report_and_die(thread, uc, NULL, 0, msg, detail_msg, va_dummy);
245
va_end(va_dummy);
246
247
ShouldNotReachHere();
248
249
}
250
else
251
252
if (sig == SIGFPE &&
253
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
254
stub =
255
SharedRuntime::
256
continuation_for_implicit_exception(thread,
257
pc,
258
SharedRuntime::
259
IMPLICIT_DIVIDE_BY_ZERO);
260
} else if (sig == SIGSEGV &&
261
MacroAssembler::uses_implicit_null_check((void*)addr)) {
262
// Determination of interpreter/vtable stub/compiled code null exception
263
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
264
}
265
} else if ((thread->thread_state() == _thread_in_vm ||
266
thread->thread_state() == _thread_in_native) &&
267
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
268
thread->doing_unsafe_access()) {
269
address next_pc = pc + NativeCall::instruction_size;
270
if (UnsafeCopyMemory::contains_pc(pc)) {
271
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
272
}
273
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
274
}
275
276
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
277
// and the heap gets shrunk before the field access.
278
if ((sig == SIGSEGV) || (sig == SIGBUS)) {
279
address addr = JNI_FastGetField::find_slowcase_pc(pc);
280
if (addr != (address)-1) {
281
stub = addr;
282
}
283
}
284
}
285
286
if (stub != NULL) {
287
// save all thread context in case we need to restore it
288
if (thread != NULL) thread->set_saved_exception_pc(pc);
289
290
os::Posix::ucontext_set_pc(uc, stub);
291
return true;
292
}
293
294
return false; // Mute compiler
295
}
296
297
void os::Linux::init_thread_fpu_state(void) {
298
}
299
300
int os::Linux::get_fpu_control_word(void) {
301
return 0;
302
}
303
304
void os::Linux::set_fpu_control_word(int fpu_control) {
305
}
306
307
////////////////////////////////////////////////////////////////////////////////
308
// thread stack
309
310
// Minimum usable stack sizes required to get to user code. Space for
311
// HotSpot guard pages is added later.
312
size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K;
313
size_t os::Posix::_java_thread_min_stack_allowed = 72 * K;
314
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K;
315
316
// return default stack size for thr_type
317
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
318
// default stack size (compiler thread needs larger stack)
319
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
320
return s;
321
}
322
323
/////////////////////////////////////////////////////////////////////////////
324
// helper functions for fatal error handler
325
326
void os::print_context(outputStream *st, const void *context) {
327
if (context == NULL) return;
328
329
const ucontext_t *uc = (const ucontext_t*)context;
330
st->print_cr("Registers:");
331
for (int r = 0; r < 31; r++) {
332
st->print("R%-2d=", r);
333
print_location(st, uc->uc_mcontext.regs[r]);
334
}
335
st->cr();
336
337
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
338
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
339
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
340
st->cr();
341
342
// Note: it may be unsafe to inspect memory near pc. For example, pc may
343
// point to garbage if entry point in an nmethod is corrupted. Leave
344
// this at the end, and hope for the best.
345
address pc = os::Posix::ucontext_get_pc(uc);
346
print_instructions(st, pc, 4/*native instruction size*/);
347
st->cr();
348
}
349
350
void os::print_register_info(outputStream *st, const void *context) {
351
if (context == NULL) return;
352
353
const ucontext_t *uc = (const ucontext_t*)context;
354
355
st->print_cr("Register to memory mapping:");
356
st->cr();
357
358
// this is horrendously verbose but the layout of the registers in the
359
// context does not match how we defined our abstract Register set, so
360
// we can't just iterate through the gregs area
361
362
// this is only for the "general purpose" registers
363
364
for (int r = 0; r < 31; r++)
365
st->print_cr( "R%d=" INTPTR_FORMAT, r, (uintptr_t)uc->uc_mcontext.regs[r]);
366
st->cr();
367
}
368
369
void os::setup_fpu() {
370
}
371
372
#ifndef PRODUCT
373
void os::verify_stack_alignment() {
374
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
375
}
376
#endif
377
378
int os::extra_bang_size_in_bytes() {
379
// AArch64 does not require the additional stack bang.
380
return 0;
381
}
382
383
extern "C" {
384
int SpinPause() {
385
return 0;
386
}
387
388
void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
389
if (from > to) {
390
const jshort *end = from + count;
391
while (from < end)
392
*(to++) = *(from++);
393
}
394
else if (from < to) {
395
const jshort *end = from;
396
from += count - 1;
397
to += count - 1;
398
while (from >= end)
399
*(to--) = *(from--);
400
}
401
}
402
void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
403
if (from > to) {
404
const jint *end = from + count;
405
while (from < end)
406
*(to++) = *(from++);
407
}
408
else if (from < to) {
409
const jint *end = from;
410
from += count - 1;
411
to += count - 1;
412
while (from >= end)
413
*(to--) = *(from--);
414
}
415
}
416
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
417
if (from > to) {
418
const jlong *end = from + count;
419
while (from < end)
420
os::atomic_copy64(from++, to++);
421
}
422
else if (from < to) {
423
const jlong *end = from;
424
from += count - 1;
425
to += count - 1;
426
while (from >= end)
427
os::atomic_copy64(from--, to--);
428
}
429
}
430
431
void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
432
HeapWord* to,
433
size_t count) {
434
memmove(to, from, count);
435
}
436
void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
437
HeapWord* to,
438
size_t count) {
439
memmove(to, from, count * 2);
440
}
441
void _Copy_arrayof_conjoint_jints(const HeapWord* from,
442
HeapWord* to,
443
size_t count) {
444
memmove(to, from, count * 4);
445
}
446
void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
447
HeapWord* to,
448
size_t count) {
449
memmove(to, from, count * 8);
450
}
451
};
452
453