Skip to content

Commit f05cab0

Browse files
chleroympe
authored andcommitted
powerpc/atomics: Remove atomic_inc()/atomic_dec() and friends
Now that atomic_add() and atomic_sub() handle immediate operands, atomic_inc() and atomic_dec() have no added value compared to the generic fallback which calls atomic_add(1) and atomic_sub(1). Also remove atomic_inc_not_zero() which fallsback to atomic_add_unless() which itself fallsback to atomic_fetch_add_unless() which now handles immediate operands. Signed-off-by: Christophe Leroy <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/0bc64a2f18726055093dbb2e479cefc60a409cfd.1632236981.git.christophe.leroy@csgroup.eu
1 parent 41d6520 commit f05cab0

File tree

1 file changed

+0
-95
lines changed

1 file changed

+0
-95
lines changed

arch/powerpc/include/asm/atomic.h

Lines changed: 0 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -118,71 +118,6 @@ ATOMIC_OPS(xor, xor, "", K)
118118
#undef ATOMIC_OP_RETURN_RELAXED
119119
#undef ATOMIC_OP
120120

121-
static __inline__ void arch_atomic_inc(atomic_t *v)
122-
{
123-
int t;
124-
125-
__asm__ __volatile__(
126-
"1: lwarx %0,0,%2 # atomic_inc\n\
127-
addic %0,%0,1\n"
128-
" stwcx. %0,0,%2 \n\
129-
bne- 1b"
130-
: "=&r" (t), "+m" (v->counter)
131-
: "r" (&v->counter)
132-
: "cc", "xer");
133-
}
134-
#define arch_atomic_inc arch_atomic_inc
135-
136-
static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
137-
{
138-
int t;
139-
140-
__asm__ __volatile__(
141-
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
142-
" addic %0,%0,1\n"
143-
" stwcx. %0,0,%2\n"
144-
" bne- 1b"
145-
: "=&r" (t), "+m" (v->counter)
146-
: "r" (&v->counter)
147-
: "cc", "xer");
148-
149-
return t;
150-
}
151-
152-
static __inline__ void arch_atomic_dec(atomic_t *v)
153-
{
154-
int t;
155-
156-
__asm__ __volatile__(
157-
"1: lwarx %0,0,%2 # atomic_dec\n\
158-
addic %0,%0,-1\n"
159-
" stwcx. %0,0,%2\n\
160-
bne- 1b"
161-
: "=&r" (t), "+m" (v->counter)
162-
: "r" (&v->counter)
163-
: "cc", "xer");
164-
}
165-
#define arch_atomic_dec arch_atomic_dec
166-
167-
static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
168-
{
169-
int t;
170-
171-
__asm__ __volatile__(
172-
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
173-
" addic %0,%0,-1\n"
174-
" stwcx. %0,0,%2\n"
175-
" bne- 1b"
176-
: "=&r" (t), "+m" (v->counter)
177-
: "r" (&v->counter)
178-
: "cc", "xer");
179-
180-
return t;
181-
}
182-
183-
#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
184-
#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
185-
186121
#define arch_atomic_cmpxchg(v, o, n) \
187122
(arch_cmpxchg(&((v)->counter), (o), (n)))
188123
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
@@ -255,36 +190,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
255190
}
256191
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
257192

258-
/**
259-
* atomic_inc_not_zero - increment unless the number is zero
260-
* @v: pointer of type atomic_t
261-
*
262-
* Atomically increments @v by 1, so long as @v is non-zero.
263-
* Returns non-zero if @v was non-zero, and zero otherwise.
264-
*/
265-
static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
266-
{
267-
int t1, t2;
268-
269-
__asm__ __volatile__ (
270-
PPC_ATOMIC_ENTRY_BARRIER
271-
"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
272-
cmpwi 0,%0,0\n\
273-
beq- 2f\n\
274-
addic %1,%0,1\n"
275-
" stwcx. %1,0,%2\n\
276-
bne- 1b\n"
277-
PPC_ATOMIC_EXIT_BARRIER
278-
"\n\
279-
2:"
280-
: "=&r" (t1), "=&r" (t2)
281-
: "r" (&v->counter)
282-
: "cc", "xer", "memory");
283-
284-
return t1;
285-
}
286-
#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
287-
288193
/*
289194
* Atomically test *v and decrement if it is greater than 0.
290195
* The function returns the old value of *v minus 1, even if

0 commit comments

Comments
 (0)