Skip to content

Commit 560cb12

Browse files
Peter Zijlstraingomolnar
authored andcommitted
locking,arch: Rewrite generic atomic support
Rewrite generic atomic support to only require cmpxchg(), generate all other primitives from that. Furthermore reduce the endless repetition for all these primitives to a few CPP macros. This way we get more for less lines. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140508135852.940119622@infradead.org Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Howells <dhowells@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent d4608dd commit 560cb12

File tree

3 files changed

+148
-147
lines changed

3 files changed

+148
-147
lines changed

include/asm-generic/atomic.h

Lines changed: 96 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -18,23 +18,107 @@
1818
#include <asm/cmpxchg.h>
1919
#include <asm/barrier.h>
2020

21+
/*
22+
* atomic_$op() - $op integer to atomic variable
23+
* @i: integer value to $op
24+
* @v: pointer to the atomic variable
25+
*
26+
* Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
27+
* smp_mb__{before,after}_atomic().
28+
*/
29+
30+
/*
31+
* atomic_$op_return() - $op interer to atomic variable and returns the result
32+
* @i: integer value to $op
33+
* @v: pointer to the atomic variable
34+
*
35+
* Atomically $ops @i to @v. Does imply a full memory barrier.
36+
*/
37+
2138
#ifdef CONFIG_SMP
22-
/* Force people to define core atomics */
23-
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
24-
!defined(atomic_clear_mask) || !defined(atomic_set_mask)
25-
# error "SMP requires a little arch-specific magic"
26-
# endif
39+
40+
/* we can build all atomic primitives from cmpxchg */
41+
42+
#define ATOMIC_OP(op, c_op) \
43+
static inline void atomic_##op(int i, atomic_t *v) \
44+
{ \
45+
int c, old; \
46+
\
47+
c = v->counter; \
48+
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
49+
c = old; \
50+
}
51+
52+
#define ATOMIC_OP_RETURN(op, c_op) \
53+
static inline int atomic_##op##_return(int i, atomic_t *v) \
54+
{ \
55+
int c, old; \
56+
\
57+
c = v->counter; \
58+
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
59+
c = old; \
60+
\
61+
return c c_op i; \
62+
}
63+
64+
#else
65+
66+
#include <linux/irqflags.h>
67+
68+
#define ATOMIC_OP(op, c_op) \
69+
static inline void atomic_##op(int i, atomic_t *v) \
70+
{ \
71+
unsigned long flags; \
72+
\
73+
raw_local_irq_save(flags); \
74+
v->counter = v->counter c_op i; \
75+
raw_local_irq_restore(flags); \
76+
}
77+
78+
#define ATOMIC_OP_RETURN(op, c_op) \
79+
static inline int atomic_##op##_return(int i, atomic_t *v) \
80+
{ \
81+
unsigned long flags; \
82+
int ret; \
83+
\
84+
raw_local_irq_save(flags); \
85+
ret = (v->counter = v->counter c_op i); \
86+
raw_local_irq_restore(flags); \
87+
\
88+
return ret; \
89+
}
90+
91+
#endif /* CONFIG_SMP */
92+
93+
#ifndef atomic_add_return
94+
ATOMIC_OP_RETURN(add, +)
95+
#endif
96+
97+
#ifndef atomic_sub_return
98+
ATOMIC_OP_RETURN(sub, -)
99+
#endif
100+
101+
#ifndef atomic_clear_mask
102+
ATOMIC_OP(and, &)
103+
#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
27104
#endif
28105

106+
#ifndef atomic_set_mask
107+
#define CONFIG_ARCH_HAS_ATOMIC_OR
108+
ATOMIC_OP(or, |)
109+
#define atomic_set_mask(i, v) atomic_or((i), (v))
110+
#endif
111+
112+
#undef ATOMIC_OP_RETURN
113+
#undef ATOMIC_OP
114+
29115
/*
30116
* Atomic operations that C can't guarantee us. Useful for
31117
* resource counting etc..
32118
*/
33119

34120
#define ATOMIC_INIT(i) { (i) }
35121

36-
#ifdef __KERNEL__
37-
38122
/**
39123
* atomic_read - read atomic variable
40124
* @v: pointer of type atomic_t
@@ -56,52 +140,6 @@
56140

57141
#include <linux/irqflags.h>
58142

59-
/**
60-
* atomic_add_return - add integer to atomic variable
61-
* @i: integer value to add
62-
* @v: pointer of type atomic_t
63-
*
64-
* Atomically adds @i to @v and returns the result
65-
*/
66-
#ifndef atomic_add_return
67-
static inline int atomic_add_return(int i, atomic_t *v)
68-
{
69-
unsigned long flags;
70-
int temp;
71-
72-
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
73-
temp = v->counter;
74-
temp += i;
75-
v->counter = temp;
76-
raw_local_irq_restore(flags);
77-
78-
return temp;
79-
}
80-
#endif
81-
82-
/**
83-
* atomic_sub_return - subtract integer from atomic variable
84-
* @i: integer value to subtract
85-
* @v: pointer of type atomic_t
86-
*
87-
* Atomically subtracts @i from @v and returns the result
88-
*/
89-
#ifndef atomic_sub_return
90-
static inline int atomic_sub_return(int i, atomic_t *v)
91-
{
92-
unsigned long flags;
93-
int temp;
94-
95-
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
96-
temp = v->counter;
97-
temp -= i;
98-
v->counter = temp;
99-
raw_local_irq_restore(flags);
100-
101-
return temp;
102-
}
103-
#endif
104-
105143
static inline int atomic_add_negative(int i, atomic_t *v)
106144
{
107145
return atomic_add_return(i, v) < 0;
@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v)
139177

140178
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
141179
{
142-
int c, old;
143-
c = atomic_read(v);
144-
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
145-
c = old;
146-
return c;
147-
}
148-
149-
/**
150-
* atomic_clear_mask - Atomically clear bits in atomic variable
151-
* @mask: Mask of the bits to be cleared
152-
* @v: pointer of type atomic_t
153-
*
154-
* Atomically clears the bits set in @mask from @v
155-
*/
156-
#ifndef atomic_clear_mask
157-
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
158-
{
159-
unsigned long flags;
160-
161-
mask = ~mask;
162-
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
163-
v->counter &= mask;
164-
raw_local_irq_restore(flags);
180+
int c, old;
181+
c = atomic_read(v);
182+
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
183+
c = old;
184+
return c;
165185
}
166-
#endif
167-
168-
/**
169-
* atomic_set_mask - Atomically set bits in atomic variable
170-
* @mask: Mask of the bits to be set
171-
* @v: pointer of type atomic_t
172-
*
173-
* Atomically sets the bits set in @mask in @v
174-
*/
175-
#ifndef atomic_set_mask
176-
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
177-
{
178-
unsigned long flags;
179-
180-
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
181-
v->counter |= mask;
182-
raw_local_irq_restore(flags);
183-
}
184-
#endif
185186

186-
#endif /* __KERNEL__ */
187187
#endif /* __ASM_GENERIC_ATOMIC_H */

include/asm-generic/atomic64.h

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,22 @@ typedef struct {
2020

2121
extern long long atomic64_read(const atomic64_t *v);
2222
extern void atomic64_set(atomic64_t *v, long long i);
23-
extern void atomic64_add(long long a, atomic64_t *v);
24-
extern long long atomic64_add_return(long long a, atomic64_t *v);
25-
extern void atomic64_sub(long long a, atomic64_t *v);
26-
extern long long atomic64_sub_return(long long a, atomic64_t *v);
23+
24+
#define ATOMIC64_OP(op) \
25+
extern void atomic64_##op(long long a, atomic64_t *v);
26+
27+
#define ATOMIC64_OP_RETURN(op) \
28+
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
29+
30+
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
31+
32+
ATOMIC64_OPS(add)
33+
ATOMIC64_OPS(sub)
34+
35+
#undef ATOMIC64_OPS
36+
#undef ATOMIC64_OP_RETURN
37+
#undef ATOMIC64_OP
38+
2739
extern long long atomic64_dec_if_positive(atomic64_t *v);
2840
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
2941
extern long long atomic64_xchg(atomic64_t *v, long long new);

lib/atomic64.c

Lines changed: 36 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
7070
}
7171
EXPORT_SYMBOL(atomic64_set);
7272

73-
void atomic64_add(long long a, atomic64_t *v)
74-
{
75-
unsigned long flags;
76-
raw_spinlock_t *lock = lock_addr(v);
77-
78-
raw_spin_lock_irqsave(lock, flags);
79-
v->counter += a;
80-
raw_spin_unlock_irqrestore(lock, flags);
81-
}
82-
EXPORT_SYMBOL(atomic64_add);
83-
84-
long long atomic64_add_return(long long a, atomic64_t *v)
85-
{
86-
unsigned long flags;
87-
raw_spinlock_t *lock = lock_addr(v);
88-
long long val;
89-
90-
raw_spin_lock_irqsave(lock, flags);
91-
val = v->counter += a;
92-
raw_spin_unlock_irqrestore(lock, flags);
93-
return val;
94-
}
95-
EXPORT_SYMBOL(atomic64_add_return);
96-
97-
void atomic64_sub(long long a, atomic64_t *v)
98-
{
99-
unsigned long flags;
100-
raw_spinlock_t *lock = lock_addr(v);
101-
102-
raw_spin_lock_irqsave(lock, flags);
103-
v->counter -= a;
104-
raw_spin_unlock_irqrestore(lock, flags);
105-
}
106-
EXPORT_SYMBOL(atomic64_sub);
107-
108-
long long atomic64_sub_return(long long a, atomic64_t *v)
109-
{
110-
unsigned long flags;
111-
raw_spinlock_t *lock = lock_addr(v);
112-
long long val;
113-
114-
raw_spin_lock_irqsave(lock, flags);
115-
val = v->counter -= a;
116-
raw_spin_unlock_irqrestore(lock, flags);
117-
return val;
118-
}
119-
EXPORT_SYMBOL(atomic64_sub_return);
73+
#define ATOMIC64_OP(op, c_op) \
74+
void atomic64_##op(long long a, atomic64_t *v) \
75+
{ \
76+
unsigned long flags; \
77+
raw_spinlock_t *lock = lock_addr(v); \
78+
\
79+
raw_spin_lock_irqsave(lock, flags); \
80+
v->counter c_op a; \
81+
raw_spin_unlock_irqrestore(lock, flags); \
82+
} \
83+
EXPORT_SYMBOL(atomic64_##op);
84+
85+
#define ATOMIC64_OP_RETURN(op, c_op) \
86+
long long atomic64_##op##_return(long long a, atomic64_t *v) \
87+
{ \
88+
unsigned long flags; \
89+
raw_spinlock_t *lock = lock_addr(v); \
90+
long long val; \
91+
\
92+
raw_spin_lock_irqsave(lock, flags); \
93+
val = (v->counter c_op a); \
94+
raw_spin_unlock_irqrestore(lock, flags); \
95+
return val; \
96+
} \
97+
EXPORT_SYMBOL(atomic64_##op##_return);
98+
99+
#define ATOMIC64_OPS(op, c_op) \
100+
ATOMIC64_OP(op, c_op) \
101+
ATOMIC64_OP_RETURN(op, c_op)
102+
103+
ATOMIC64_OPS(add, +=)
104+
ATOMIC64_OPS(sub, -=)
105+
106+
#undef ATOMIC64_OPS
107+
#undef ATOMIC64_OP_RETURN
108+
#undef ATOMIC64_OP
120109

121110
long long atomic64_dec_if_positive(atomic64_t *v)
122111
{

0 commit comments

Comments
 (0)