locking,arch,sparc: Fold atomic_ops
Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.825281379@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c6470150df
commit
4f3316c2b5
6 changed files with 131 additions and 146 deletions
|
|
@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy);
|
|||
|
||||
#endif /* SMP */
|
||||
|
||||
int __atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(ATOMIC_HASH(v), flags);
|
||||
#define ATOMIC_OP(op, cop) \
|
||||
int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
int ret; \
|
||||
unsigned long flags; \
|
||||
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
|
||||
\
|
||||
ret = (v->counter cop i); \
|
||||
\
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
||||
return ret; \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic_##op##_return);
|
||||
|
||||
ret = (v->counter += i);
|
||||
ATOMIC_OP(add, +=)
|
||||
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__atomic_add_return);
|
||||
#undef ATOMIC_OP
|
||||
|
||||
int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue