Path: blob/master/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
41144 views
/*1* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP26#define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP2728#include "atomic_aarch64.hpp"29#include "runtime/vm_version.hpp"3031// Implementation of class atomic3233// Note that memory_order_conservative requires a full barrier after atomic stores.34// See https://patchwork.kernel.org/patch/3575821/3536// Call one of the stubs from C++. This uses the C calling convention,37// but this asm definition is used in order only to clobber the38// registers we use. If we called the stubs via an ABI call we'd have39// to save X0 - X18 and most of the vectors.40//41// This really ought to be a template definition, but see GCC Bug42// 33661, template methods forget explicit local register asm43// vars. The problem is that register specifiers attached to local44// variables are ignored in any template function.45inline uint64_t bare_atomic_fastcall(address stub, volatile void *ptr, uint64_t arg1, uint64_t arg2 = 0) {46register uint64_t reg0 __asm__("x0") = (uint64_t)ptr;47register uint64_t reg1 __asm__("x1") = arg1;48register uint64_t reg2 __asm__("x2") = arg2;49register uint64_t reg3 __asm__("x3") = (uint64_t)stub;50register uint64_t result __asm__("x0");51asm volatile(// "stp x29, x30, [sp, #-16]!;"52" blr %1;"53// " ldp x29, x30, [sp], #16 // regs %0, %1, %2, %3, %4"54: "=r"(result), "+r"(reg3), "+r"(reg2)55: "r"(reg1), "0"(reg0) : "x8", "x9", "x30", "cc", "memory");56return result;57}5859template <typename F, typename D, typename T1>60inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1) {61return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub),62dest, (uint64_t)arg1);63}6465template <typename F, typename D, typename T1, typename T2>66inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) {67return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub),68dest, (uint64_t)arg1, (uint64_t)arg2);69}7071template<size_t byte_size>72struct Atomic::PlatformAdd {73template<typename D, typename I>74D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;7576template<typename D, typename I>77D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {78D value = fetch_and_add(dest, add_value, order) + add_value;79return value;80}81};8283template<>84template<typename D, typename I>85inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,86atomic_memory_order order) const {87STATIC_ASSERT(4 == sizeof(I));88STATIC_ASSERT(4 == sizeof(D));89D old_value90= atomic_fastcall(aarch64_atomic_fetch_add_4_impl, dest, add_value);91return old_value;92}9394template<>95template<typename D, typename I>96inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,97atomic_memory_order order) const {98STATIC_ASSERT(8 == sizeof(I));99STATIC_ASSERT(8 == sizeof(D));100D old_value101= atomic_fastcall(aarch64_atomic_fetch_add_8_impl, dest, add_value);102return old_value;103}104105template<>106template<typename T>107inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,108T exchange_value,109atomic_memory_order order) const {110STATIC_ASSERT(4 == sizeof(T));111T old_value = atomic_fastcall(aarch64_atomic_xchg_4_impl, dest, exchange_value);112return old_value;113}114115template<>116template<typename T>117inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,118atomic_memory_order order) const {119STATIC_ASSERT(8 == sizeof(T));120T old_value = atomic_fastcall(aarch64_atomic_xchg_8_impl, dest, exchange_value);121return old_value;122}123124template<>125template<typename T>126inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,127T compare_value,128T exchange_value,129atomic_memory_order order) const {130STATIC_ASSERT(1 == sizeof(T));131aarch64_atomic_stub_t stub;132switch (order) {133case memory_order_relaxed:134stub = aarch64_atomic_cmpxchg_1_relaxed_impl; break;135default:136stub = aarch64_atomic_cmpxchg_1_impl; break;137}138139return atomic_fastcall(stub, dest, compare_value, exchange_value);140}141142template<>143template<typename T>144inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,145T compare_value,146T exchange_value,147atomic_memory_order order) const {148STATIC_ASSERT(4 == sizeof(T));149aarch64_atomic_stub_t stub;150switch (order) {151case memory_order_relaxed:152stub = aarch64_atomic_cmpxchg_4_relaxed_impl; break;153default:154stub = aarch64_atomic_cmpxchg_4_impl; break;155}156157return atomic_fastcall(stub, dest, compare_value, exchange_value);158}159160template<>161template<typename T>162inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,163T compare_value,164T exchange_value,165atomic_memory_order order) const {166STATIC_ASSERT(8 == sizeof(T));167aarch64_atomic_stub_t stub;168switch (order) {169case memory_order_relaxed:170stub = aarch64_atomic_cmpxchg_8_relaxed_impl; break;171default:172stub = aarch64_atomic_cmpxchg_8_impl; break;173}174175return atomic_fastcall(stub, dest, compare_value, exchange_value);176}177178template<size_t byte_size>179struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>180{181template <typename T>182T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }183};184185template<size_t byte_size>186struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>187{188template <typename T>189void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }190};191192template<size_t byte_size>193struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>194{195template <typename T>196void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }197};198199#endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP200201202