Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
41144 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
27
#define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
28
29
#include "atomic_aarch64.hpp"
30
#include "runtime/vm_version.hpp"
31
32
// Implementation of class atomic
33
34
// Note that memory_order_conservative requires a full barrier after atomic stores.
35
// See https://patchwork.kernel.org/patch/3575821/
36
37
// Call one of the stubs from C++. This uses the C calling convention,
38
// but this asm definition is used in order only to clobber the
39
// registers we use. If we called the stubs via an ABI call we'd have
40
// to save X0 - X18 and most of the vectors.
41
//
42
// This really ought to be a template definition, but see GCC Bug
43
// 33661, template methods forget explicit local register asm
44
// vars. The problem is that register specifiers attached to local
45
// variables are ignored in any template function.
46
inline uint64_t bare_atomic_fastcall(address stub, volatile void *ptr, uint64_t arg1, uint64_t arg2 = 0) {
47
register uint64_t reg0 __asm__("x0") = (uint64_t)ptr;
48
register uint64_t reg1 __asm__("x1") = arg1;
49
register uint64_t reg2 __asm__("x2") = arg2;
50
register uint64_t reg3 __asm__("x3") = (uint64_t)stub;
51
register uint64_t result __asm__("x0");
52
asm volatile(// "stp x29, x30, [sp, #-16]!;"
53
" blr %1;"
54
// " ldp x29, x30, [sp], #16 // regs %0, %1, %2, %3, %4"
55
: "=r"(result), "+r"(reg3), "+r"(reg2)
56
: "r"(reg1), "0"(reg0) : "x8", "x9", "x30", "cc", "memory");
57
return result;
58
}
59
60
template <typename F, typename D, typename T1>
61
inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1) {
62
return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub),
63
dest, (uint64_t)arg1);
64
}
65
66
template <typename F, typename D, typename T1, typename T2>
67
inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) {
68
return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub),
69
dest, (uint64_t)arg1, (uint64_t)arg2);
70
}
71
72
template<size_t byte_size>
73
struct Atomic::PlatformAdd {
74
template<typename D, typename I>
75
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
76
77
template<typename D, typename I>
78
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
79
D value = fetch_and_add(dest, add_value, order) + add_value;
80
return value;
81
}
82
};
83
84
template<>
85
template<typename D, typename I>
86
inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
87
atomic_memory_order order) const {
88
STATIC_ASSERT(4 == sizeof(I));
89
STATIC_ASSERT(4 == sizeof(D));
90
D old_value
91
= atomic_fastcall(aarch64_atomic_fetch_add_4_impl, dest, add_value);
92
return old_value;
93
}
94
95
template<>
96
template<typename D, typename I>
97
inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
98
atomic_memory_order order) const {
99
STATIC_ASSERT(8 == sizeof(I));
100
STATIC_ASSERT(8 == sizeof(D));
101
D old_value
102
= atomic_fastcall(aarch64_atomic_fetch_add_8_impl, dest, add_value);
103
return old_value;
104
}
105
106
template<>
107
template<typename T>
108
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
109
T exchange_value,
110
atomic_memory_order order) const {
111
STATIC_ASSERT(4 == sizeof(T));
112
T old_value = atomic_fastcall(aarch64_atomic_xchg_4_impl, dest, exchange_value);
113
return old_value;
114
}
115
116
template<>
117
template<typename T>
118
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
119
atomic_memory_order order) const {
120
STATIC_ASSERT(8 == sizeof(T));
121
T old_value = atomic_fastcall(aarch64_atomic_xchg_8_impl, dest, exchange_value);
122
return old_value;
123
}
124
125
template<>
126
template<typename T>
127
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
128
T compare_value,
129
T exchange_value,
130
atomic_memory_order order) const {
131
STATIC_ASSERT(1 == sizeof(T));
132
aarch64_atomic_stub_t stub;
133
switch (order) {
134
case memory_order_relaxed:
135
stub = aarch64_atomic_cmpxchg_1_relaxed_impl; break;
136
default:
137
stub = aarch64_atomic_cmpxchg_1_impl; break;
138
}
139
140
return atomic_fastcall(stub, dest, compare_value, exchange_value);
141
}
142
143
template<>
144
template<typename T>
145
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
146
T compare_value,
147
T exchange_value,
148
atomic_memory_order order) const {
149
STATIC_ASSERT(4 == sizeof(T));
150
aarch64_atomic_stub_t stub;
151
switch (order) {
152
case memory_order_relaxed:
153
stub = aarch64_atomic_cmpxchg_4_relaxed_impl; break;
154
default:
155
stub = aarch64_atomic_cmpxchg_4_impl; break;
156
}
157
158
return atomic_fastcall(stub, dest, compare_value, exchange_value);
159
}
160
161
template<>
162
template<typename T>
163
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
164
T compare_value,
165
T exchange_value,
166
atomic_memory_order order) const {
167
STATIC_ASSERT(8 == sizeof(T));
168
aarch64_atomic_stub_t stub;
169
switch (order) {
170
case memory_order_relaxed:
171
stub = aarch64_atomic_cmpxchg_8_relaxed_impl; break;
172
default:
173
stub = aarch64_atomic_cmpxchg_8_impl; break;
174
}
175
176
return atomic_fastcall(stub, dest, compare_value, exchange_value);
177
}
178
179
template<size_t byte_size>
180
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
181
{
182
template <typename T>
183
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
184
};
185
186
template<size_t byte_size>
187
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
188
{
189
template <typename T>
190
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
191
};
192
193
template<size_t byte_size>
194
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
195
{
196
template <typename T>
197
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
198
};
199
200
#endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
201
202