1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * R/W semaphores for ia64
4  *
5  * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
6  * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
7  * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
8  *
9  * Based on asm-i386/rwsem.h and other architecture implementation.
10  *
11  * The MSW of the count is the negated number of active writers and
12  * waiting lockers, and the LSW is the total number of active locks.
13  *
14  * The lock count is initialized to 0 (no active and no waiting lockers).
15  *
16  * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
17  * the case of an uncontended lock. Readers increment by 1 and see a positive
18  * value when uncontended, negative if there are writers (and maybe) readers
19  * waiting (in which case it goes to sleep).
20  */
21 
22 #ifndef _ASM_IA64_RWSEM_H
23 #define _ASM_IA64_RWSEM_H
24 
25 #ifndef _LINUX_RWSEM_H
26 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
27 #endif
28 
29 #include <asm/intrinsics.h>
30 
31 #define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000)
32 #define RWSEM_ACTIVE_BIAS		(1L)
33 #define RWSEM_ACTIVE_MASK		(0xffffffffL)
34 #define RWSEM_WAITING_BIAS		(-0x100000000L)
35 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
36 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37 
38 /*
39  * lock for reading
40  */
41 static inline int
___down_read(struct rw_semaphore * sem)42 ___down_read (struct rw_semaphore *sem)
43 {
44 	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
45 
46 	return (result < 0);
47 }
48 
49 static inline void
__down_read(struct rw_semaphore * sem)50 __down_read (struct rw_semaphore *sem)
51 {
52 	if (___down_read(sem))
53 		rwsem_down_read_failed(sem);
54 }
55 
56 static inline int
__down_read_killable(struct rw_semaphore * sem)57 __down_read_killable (struct rw_semaphore *sem)
58 {
59 	if (___down_read(sem))
60 		if (IS_ERR(rwsem_down_read_failed_killable(sem)))
61 			return -EINTR;
62 
63 	return 0;
64 }
65 
66 /*
67  * lock for writing
68  */
69 static inline long
___down_write(struct rw_semaphore * sem)70 ___down_write (struct rw_semaphore *sem)
71 {
72 	long old, new;
73 
74 	do {
75 		old = atomic_long_read(&sem->count);
76 		new = old + RWSEM_ACTIVE_WRITE_BIAS;
77 	} while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
78 
79 	return old;
80 }
81 
82 static inline void
__down_write(struct rw_semaphore * sem)83 __down_write (struct rw_semaphore *sem)
84 {
85 	if (___down_write(sem))
86 		rwsem_down_write_failed(sem);
87 }
88 
89 static inline int
__down_write_killable(struct rw_semaphore * sem)90 __down_write_killable (struct rw_semaphore *sem)
91 {
92 	if (___down_write(sem)) {
93 		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
94 			return -EINTR;
95 	}
96 
97 	return 0;
98 }
99 
100 /*
101  * unlock after reading
102  */
103 static inline void
__up_read(struct rw_semaphore * sem)104 __up_read (struct rw_semaphore *sem)
105 {
106 	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
107 
108 	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
109 		rwsem_wake(sem);
110 }
111 
112 /*
113  * unlock after writing
114  */
115 static inline void
__up_write(struct rw_semaphore * sem)116 __up_write (struct rw_semaphore *sem)
117 {
118 	long old, new;
119 
120 	do {
121 		old = atomic_long_read(&sem->count);
122 		new = old - RWSEM_ACTIVE_WRITE_BIAS;
123 	} while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
124 
125 	if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
126 		rwsem_wake(sem);
127 }
128 
129 /*
130  * trylock for reading -- returns 1 if successful, 0 if contention
131  */
132 static inline int
__down_read_trylock(struct rw_semaphore * sem)133 __down_read_trylock (struct rw_semaphore *sem)
134 {
135 	long tmp;
136 	while ((tmp = atomic_long_read(&sem->count)) >= 0) {
137 		if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
138 			return 1;
139 		}
140 	}
141 	return 0;
142 }
143 
144 /*
145  * trylock for writing -- returns 1 if successful, 0 if contention
146  */
147 static inline int
__down_write_trylock(struct rw_semaphore * sem)148 __down_write_trylock (struct rw_semaphore *sem)
149 {
150 	long tmp = atomic_long_cmpxchg_acquire(&sem->count,
151 			RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
152 	return tmp == RWSEM_UNLOCKED_VALUE;
153 }
154 
155 /*
156  * downgrade write lock to read lock
157  */
158 static inline void
__downgrade_write(struct rw_semaphore * sem)159 __downgrade_write (struct rw_semaphore *sem)
160 {
161 	long old, new;
162 
163 	do {
164 		old = atomic_long_read(&sem->count);
165 		new = old - RWSEM_WAITING_BIAS;
166 	} while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
167 
168 	if (old < 0)
169 		rwsem_downgrade_wake(sem);
170 }
171 
172 #endif /* _ASM_IA64_RWSEM_H */
173