1 /**
2 * Copyright (c) 2024 NextSilicon
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 #ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ATOMIC_H_
7 #define ZEPHYR_INCLUDE_ARCH_RISCV_ATOMIC_H_
8
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12
13 /* The standard RISC-V atomic-instruction extension, "A", specifies
14 * the number of instructions that atomically read-modify-write memory,
15 * which RISC-V harts should support in order to synchronise harts
16 * running in the same memory space. This is the subset of RISC-V
17 * atomic-instructions not present in atomic_builtin.h file.
18 */
19
20 #ifdef CONFIG_64BIT
atomic_swap(const atomic_t * target,atomic_val_t newval)21 static ALWAYS_INLINE atomic_val_t atomic_swap(const atomic_t *target, atomic_val_t newval)
22 {
23 atomic_val_t ret;
24
25 __asm__ volatile("amoswap.d.aq %0, %1, %2"
26 : "=r"(ret)
27 : "r"(newval), "A"(*target)
28 : "memory");
29
30 return ret;
31 }
32
atomic_max(atomic_t * target,atomic_val_t value)33 static ALWAYS_INLINE atomic_val_t atomic_max(atomic_t *target, atomic_val_t value)
34 {
35 atomic_val_t ret;
36
37 __asm__ volatile("amomax.d.aq %0, %1, %2"
38 : "=r"(ret)
39 : "r"(value), "A"(*target)
40 : "memory");
41
42 return ret;
43 }
44
atomic_min(atomic_t * target,atomic_val_t value)45 static ALWAYS_INLINE atomic_val_t atomic_min(atomic_t *target, atomic_val_t value)
46 {
47 atomic_val_t ret;
48
49 __asm__ volatile("amomin.d.aq %0, %1, %2"
50 : "=r"(ret)
51 : "r"(value), "A"(*target)
52 : "memory");
53
54 return ret;
55 }
56
atomic_maxu(unsigned long * target,unsigned long value)57 static ALWAYS_INLINE atomic_val_t atomic_maxu(unsigned long *target, unsigned long value)
58 {
59 unsigned long ret;
60
61 __asm__ volatile("amomaxu.d.aq %0, %1, %2"
62 : "=r"(ret)
63 : "r"(value), "A"(*target)
64 : "memory");
65
66 return ret;
67 }
68
atomic_minu(unsigned long * target,unsigned long value)69 static ALWAYS_INLINE atomic_val_t atomic_minu(unsigned long *target, unsigned long value)
70 {
71 unsigned long ret;
72
73 __asm__ volatile("amominu.d.aq %0, %1, %2"
74 : "=r"(ret)
75 : "r"(value), "A"(*target)
76 : "memory");
77
78 return ret;
79 }
80
81 #else
82
83 static ALWAYS_INLINE atomic_val_t atomic_swap(const atomic_t *target, atomic_val_t newval)
84 {
85 atomic_val_t ret;
86
87 __asm__ volatile("amoswap.w.aq %0, %1, %2"
88 : "=r"(ret)
89 : "r"(newval), "A"(*target)
90 : "memory");
91
92 return ret;
93 }
94
95 static ALWAYS_INLINE atomic_val_t atomic_max(atomic_t *target, atomic_val_t value)
96 {
97 atomic_val_t ret;
98
99 __asm__ volatile("amomax.w.aq %0, %1, %2"
100 : "=r"(ret)
101 : "r"(value), "A"(*target)
102 : "memory");
103
104 return ret;
105 }
106
107 static ALWAYS_INLINE atomic_val_t atomic_min(atomic_t *target, atomic_val_t value)
108 {
109 atomic_val_t ret;
110
111 __asm__ volatile("amomin.w.aq %0, %1, %2"
112 : "=r"(ret)
113 : "r"(value), "A"(*target)
114 : "memory");
115
116 return ret;
117 }
118
119 static ALWAYS_INLINE unsigned long atomic_maxu(unsigned long *target, unsigned long value)
120 {
121 unsigned long ret;
122
123 __asm__ volatile("amomaxu.w.aq %0, %1, %2"
124 : "=r"(ret)
125 : "r"(value), "A"(*target)
126 : "memory");
127
128 return ret;
129 }
130
131 static ALWAYS_INLINE unsigned long atomic_minu(unsigned long *target, unsigned long value)
132 {
133 unsigned long ret;
134
135 __asm__ volatile("amominu.w.aq %0, %1, %2"
136 : "=r"(ret)
137 : "r"(value), "A"(*target)
138 : "memory");
139
140 return ret;
141 }
142
143 #endif /* CONFIG_64BIT */
144
145 #ifdef __cplusplus
146 }
147 #endif
148
149 #endif /* ZEPHYR_INCLUDE_ARCH_RISCV_ATOMIC_H_ */
150