Skip to content

Commit 1bdadf4

Browse files
mrutland-armPeter Zijlstra
authored andcommitted
locking/atomic: atomic64: support ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as this will enable functionality, and once all architectures are converted it will be possible to make significant cleanups to the atomic headers. A number of architectures use asm-generic/atomic64.h, and it's impractical to convert the header and all these architectures in one go. To make it possible to convert them one-by-one, let's make the asm-generic implementation function as either atomic64_*() or arch_atomic64_*() depending on whether ARCH_ATOMIC is selected. To do this, the generic implementations are prefixed as generic_atomic64_*(), and preprocessor definitions map atomic64_*()/arch_atomic64_*() onto these as appropriate. Once all users are moved over to ARCH_ATOMIC the ifdeffery in the header can be simplified and/or removed entirely. For existing users (none of which select ARCH_ATOMIC), there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-11-mark.rutland@arm.com
1 parent f8b6455 commit 1bdadf4

File tree

2 files changed

+79
-31
lines changed

2 files changed

+79
-31
lines changed

include/asm-generic/atomic64.h

Lines changed: 61 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,17 @@ typedef struct {
1515

1616
#define ATOMIC64_INIT(i) { (i) }
1717

18-
extern s64 atomic64_read(const atomic64_t *v);
19-
extern void atomic64_set(atomic64_t *v, s64 i);
20-
21-
#define atomic64_set_release(v, i) atomic64_set((v), (i))
18+
extern s64 generic_atomic64_read(const atomic64_t *v);
19+
extern void generic_atomic64_set(atomic64_t *v, s64 i);
2220

2321
#define ATOMIC64_OP(op) \
24-
extern void atomic64_##op(s64 a, atomic64_t *v);
22+
extern void generic_atomic64_##op(s64 a, atomic64_t *v);
2523

2624
#define ATOMIC64_OP_RETURN(op) \
27-
extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
25+
extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
2826

2927
#define ATOMIC64_FETCH_OP(op) \
30-
extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
28+
extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
3129

3230
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
3331

@@ -46,11 +44,61 @@ ATOMIC64_OPS(xor)
4644
#undef ATOMIC64_OP_RETURN
4745
#undef ATOMIC64_OP
4846

49-
extern s64 atomic64_dec_if_positive(atomic64_t *v);
50-
#define atomic64_dec_if_positive atomic64_dec_if_positive
51-
extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
52-
extern s64 atomic64_xchg(atomic64_t *v, s64 new);
53-
extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
54-
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
47+
extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
48+
extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
49+
extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
50+
extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
51+
52+
#ifdef CONFIG_ARCH_ATOMIC
53+
54+
#define arch_atomic64_read generic_atomic64_read
55+
#define arch_atomic64_set generic_atomic64_set
56+
#define arch_atomic64_set_release generic_atomic64_set
57+
58+
#define arch_atomic64_add generic_atomic64_add
59+
#define arch_atomic64_add_return generic_atomic64_add_return
60+
#define arch_atomic64_fetch_add generic_atomic64_fetch_add
61+
#define arch_atomic64_sub generic_atomic64_sub
62+
#define arch_atomic64_sub_return generic_atomic64_sub_return
63+
#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
64+
65+
#define arch_atomic64_and generic_atomic64_and
66+
#define arch_atomic64_fetch_and generic_atomic64_fetch_and
67+
#define arch_atomic64_or generic_atomic64_or
68+
#define arch_atomic64_fetch_or generic_atomic64_fetch_or
69+
#define arch_atomic64_xor generic_atomic64_xor
70+
#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
71+
72+
#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
73+
#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
74+
#define arch_atomic64_xchg generic_atomic64_xchg
75+
#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
76+
77+
#else /* CONFIG_ARCH_ATOMIC */
78+
79+
#define atomic64_read generic_atomic64_read
80+
#define atomic64_set generic_atomic64_set
81+
#define atomic64_set_release generic_atomic64_set
82+
83+
#define atomic64_add generic_atomic64_add
84+
#define atomic64_add_return generic_atomic64_add_return
85+
#define atomic64_fetch_add generic_atomic64_fetch_add
86+
#define atomic64_sub generic_atomic64_sub
87+
#define atomic64_sub_return generic_atomic64_sub_return
88+
#define atomic64_fetch_sub generic_atomic64_fetch_sub
89+
90+
#define atomic64_and generic_atomic64_and
91+
#define atomic64_fetch_and generic_atomic64_fetch_and
92+
#define atomic64_or generic_atomic64_or
93+
#define atomic64_fetch_or generic_atomic64_fetch_or
94+
#define atomic64_xor generic_atomic64_xor
95+
#define atomic64_fetch_xor generic_atomic64_fetch_xor
96+
97+
#define atomic64_dec_if_positive generic_atomic64_dec_if_positive
98+
#define atomic64_cmpxchg generic_atomic64_cmpxchg
99+
#define atomic64_xchg generic_atomic64_xchg
100+
#define atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
101+
102+
#endif /* CONFIG_ARCH_ATOMIC */
55103

56104
#endif /* _ASM_GENERIC_ATOMIC64_H */

lib/atomic64.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
4242
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
4343
}
4444

45-
s64 atomic64_read(const atomic64_t *v)
45+
s64 generic_atomic64_read(const atomic64_t *v)
4646
{
4747
unsigned long flags;
4848
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
5353
raw_spin_unlock_irqrestore(lock, flags);
5454
return val;
5555
}
56-
EXPORT_SYMBOL(atomic64_read);
56+
EXPORT_SYMBOL(generic_atomic64_read);
5757

58-
void atomic64_set(atomic64_t *v, s64 i)
58+
void generic_atomic64_set(atomic64_t *v, s64 i)
5959
{
6060
unsigned long flags;
6161
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
6464
v->counter = i;
6565
raw_spin_unlock_irqrestore(lock, flags);
6666
}
67-
EXPORT_SYMBOL(atomic64_set);
67+
EXPORT_SYMBOL(generic_atomic64_set);
6868

6969
#define ATOMIC64_OP(op, c_op) \
70-
void atomic64_##op(s64 a, atomic64_t *v) \
70+
void generic_atomic64_##op(s64 a, atomic64_t *v) \
7171
{ \
7272
unsigned long flags; \
7373
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
7676
v->counter c_op a; \
7777
raw_spin_unlock_irqrestore(lock, flags); \
7878
} \
79-
EXPORT_SYMBOL(atomic64_##op);
79+
EXPORT_SYMBOL(generic_atomic64_##op);
8080

8181
#define ATOMIC64_OP_RETURN(op, c_op) \
82-
s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
82+
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
8383
{ \
8484
unsigned long flags; \
8585
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
9090
raw_spin_unlock_irqrestore(lock, flags); \
9191
return val; \
9292
} \
93-
EXPORT_SYMBOL(atomic64_##op##_return);
93+
EXPORT_SYMBOL(generic_atomic64_##op##_return);
9494

9595
#define ATOMIC64_FETCH_OP(op, c_op) \
96-
s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
96+
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
9797
{ \
9898
unsigned long flags; \
9999
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
105105
raw_spin_unlock_irqrestore(lock, flags); \
106106
return val; \
107107
} \
108-
EXPORT_SYMBOL(atomic64_fetch_##op);
108+
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
109109

110110
#define ATOMIC64_OPS(op, c_op) \
111111
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
130130
#undef ATOMIC64_OP_RETURN
131131
#undef ATOMIC64_OP
132132

133-
s64 atomic64_dec_if_positive(atomic64_t *v)
133+
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
134134
{
135135
unsigned long flags;
136136
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
143143
raw_spin_unlock_irqrestore(lock, flags);
144144
return val;
145145
}
146-
EXPORT_SYMBOL(atomic64_dec_if_positive);
146+
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
147147

148-
s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
148+
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
149149
{
150150
unsigned long flags;
151151
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
158158
raw_spin_unlock_irqrestore(lock, flags);
159159
return val;
160160
}
161-
EXPORT_SYMBOL(atomic64_cmpxchg);
161+
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
162162

163-
s64 atomic64_xchg(atomic64_t *v, s64 new)
163+
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
164164
{
165165
unsigned long flags;
166166
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
172172
raw_spin_unlock_irqrestore(lock, flags);
173173
return val;
174174
}
175-
EXPORT_SYMBOL(atomic64_xchg);
175+
EXPORT_SYMBOL(generic_atomic64_xchg);
176176

177-
s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
177+
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
178178
{
179179
unsigned long flags;
180180
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
188188

189189
return val;
190190
}
191-
EXPORT_SYMBOL(atomic64_fetch_add_unless);
191+
EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);

0 commit comments

Comments
 (0)