Skip to content

Commit f59ca05

Browse files
Shan HaiIngo Molnar
authored andcommitted
locking, lib/atomic64: Annotate atomic64_lock::lock as raw
The spinlock protected atomic64 operations must be irq safe as they are used in hard interrupt context and cannot be preempted on -rt: NIP [c068b218] rt_spin_lock_slowlock+0x78/0x3a8 LR [c068b1e0] rt_spin_lock_slowlock+0x40/0x3a8 Call Trace: [eb459b90] [c068b1e0] rt_spin_lock_slowlock+0x40/0x3a8 (unreliable) [eb459c20] [c068bdb0] rt_spin_lock+0x40/0x98 [eb459c40] [c03d2a14] atomic64_read+0x48/0x84 [eb459c60] [c001aaf4] perf_event_interrupt+0xec/0x28c [eb459d10] [c0010138] performance_monitor_exception+0x7c/0x150 [eb459d30] [c0014170] ret_from_except_full+0x0/0x4c So annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Shan Hai <haishan.bai@gmail.com> Reviewed-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent 3b8f404 commit f59ca05

File tree

1 file changed

+22
-22
lines changed

1 file changed

+22
-22
lines changed

lib/atomic64.c

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
* Ensure each lock is in a separate cacheline.
3030
*/
3131
static union {
32-
spinlock_t lock;
32+
raw_spinlock_t lock;
3333
char pad[L1_CACHE_BYTES];
3434
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
3535

@@ -48,9 +48,9 @@ long long atomic64_read(const atomic64_t *v)
4848
spinlock_t *lock = lock_addr(v);
4949
long long val;
5050

51-
spin_lock_irqsave(lock, flags);
51+
raw_spin_lock_irqsave(lock, flags);
5252
val = v->counter;
53-
spin_unlock_irqrestore(lock, flags);
53+
raw_spin_unlock_irqrestore(lock, flags);
5454
return val;
5555
}
5656
EXPORT_SYMBOL(atomic64_read);
@@ -60,9 +60,9 @@ void atomic64_set(atomic64_t *v, long long i)
6060
unsigned long flags;
6161
spinlock_t *lock = lock_addr(v);
6262

63-
spin_lock_irqsave(lock, flags);
63+
raw_spin_lock_irqsave(lock, flags);
6464
v->counter = i;
65-
spin_unlock_irqrestore(lock, flags);
65+
raw_spin_unlock_irqrestore(lock, flags);
6666
}
6767
EXPORT_SYMBOL(atomic64_set);
6868

@@ -71,9 +71,9 @@ void atomic64_add(long long a, atomic64_t *v)
7171
unsigned long flags;
7272
spinlock_t *lock = lock_addr(v);
7373

74-
spin_lock_irqsave(lock, flags);
74+
raw_spin_lock_irqsave(lock, flags);
7575
v->counter += a;
76-
spin_unlock_irqrestore(lock, flags);
76+
raw_spin_unlock_irqrestore(lock, flags);
7777
}
7878
EXPORT_SYMBOL(atomic64_add);
7979

@@ -83,9 +83,9 @@ long long atomic64_add_return(long long a, atomic64_t *v)
8383
spinlock_t *lock = lock_addr(v);
8484
long long val;
8585

86-
spin_lock_irqsave(lock, flags);
86+
raw_spin_lock_irqsave(lock, flags);
8787
val = v->counter += a;
88-
spin_unlock_irqrestore(lock, flags);
88+
raw_spin_unlock_irqrestore(lock, flags);
8989
return val;
9090
}
9191
EXPORT_SYMBOL(atomic64_add_return);
@@ -95,9 +95,9 @@ void atomic64_sub(long long a, atomic64_t *v)
9595
unsigned long flags;
9696
spinlock_t *lock = lock_addr(v);
9797

98-
spin_lock_irqsave(lock, flags);
98+
raw_spin_lock_irqsave(lock, flags);
9999
v->counter -= a;
100-
spin_unlock_irqrestore(lock, flags);
100+
raw_spin_unlock_irqrestore(lock, flags);
101101
}
102102
EXPORT_SYMBOL(atomic64_sub);
103103

@@ -107,9 +107,9 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
107107
spinlock_t *lock = lock_addr(v);
108108
long long val;
109109

110-
spin_lock_irqsave(lock, flags);
110+
raw_spin_lock_irqsave(lock, flags);
111111
val = v->counter -= a;
112-
spin_unlock_irqrestore(lock, flags);
112+
raw_spin_unlock_irqrestore(lock, flags);
113113
return val;
114114
}
115115
EXPORT_SYMBOL(atomic64_sub_return);
@@ -120,11 +120,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
120120
spinlock_t *lock = lock_addr(v);
121121
long long val;
122122

123-
spin_lock_irqsave(lock, flags);
123+
raw_spin_lock_irqsave(lock, flags);
124124
val = v->counter - 1;
125125
if (val >= 0)
126126
v->counter = val;
127-
spin_unlock_irqrestore(lock, flags);
127+
raw_spin_unlock_irqrestore(lock, flags);
128128
return val;
129129
}
130130
EXPORT_SYMBOL(atomic64_dec_if_positive);
@@ -135,11 +135,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
135135
spinlock_t *lock = lock_addr(v);
136136
long long val;
137137

138-
spin_lock_irqsave(lock, flags);
138+
raw_spin_lock_irqsave(lock, flags);
139139
val = v->counter;
140140
if (val == o)
141141
v->counter = n;
142-
spin_unlock_irqrestore(lock, flags);
142+
raw_spin_unlock_irqrestore(lock, flags);
143143
return val;
144144
}
145145
EXPORT_SYMBOL(atomic64_cmpxchg);
@@ -150,10 +150,10 @@ long long atomic64_xchg(atomic64_t *v, long long new)
150150
spinlock_t *lock = lock_addr(v);
151151
long long val;
152152

153-
spin_lock_irqsave(lock, flags);
153+
raw_spin_lock_irqsave(lock, flags);
154154
val = v->counter;
155155
v->counter = new;
156-
spin_unlock_irqrestore(lock, flags);
156+
raw_spin_unlock_irqrestore(lock, flags);
157157
return val;
158158
}
159159
EXPORT_SYMBOL(atomic64_xchg);
@@ -164,12 +164,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
164164
spinlock_t *lock = lock_addr(v);
165165
int ret = 0;
166166

167-
spin_lock_irqsave(lock, flags);
167+
raw_spin_lock_irqsave(lock, flags);
168168
if (v->counter != u) {
169169
v->counter += a;
170170
ret = 1;
171171
}
172-
spin_unlock_irqrestore(lock, flags);
172+
raw_spin_unlock_irqrestore(lock, flags);
173173
return ret;
174174
}
175175
EXPORT_SYMBOL(atomic64_add_unless);
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
179179
int i;
180180

181181
for (i = 0; i < NR_LOCKS; ++i)
182-
spin_lock_init(&atomic64_lock[i].lock);
182+
raw_spin_lock_init(&atomic64_lock[i].lock);
183183
return 0;
184184
}
185185

0 commit comments

Comments
 (0)