| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
 | 
					
						
							|  |  |  |  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #ifndef _ASM_PARISC_ATOMIC_H_
 | 
					
						
							|  |  |  | #define _ASM_PARISC_ATOMIC_H_
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | #include <linux/types.h>
 | 
					
						
							| 
									
										
										
										
											2012-04-01 16:38:42 -04:00
										 |  |  | #include <asm/cmpxchg.h>
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * Atomic operations that C can't guarantee us.  Useful for | 
					
						
							|  |  |  |  * resource counting etc.. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * And probably incredibly slow on parisc.  OTOH, we don't | 
					
						
							|  |  |  |  * have to write any serious assembly.   prumpf | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_SMP
 | 
					
						
							|  |  |  | #include <asm/spinlock.h>
 | 
					
						
							|  |  |  | #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Use an array of spinlocks for our atomic_ts.
 | 
					
						
							|  |  |  |  * Hash function to index into a different SPINLOCK. | 
					
						
							|  |  |  |  * Since "a" is usually an address, use one spinlock per cacheline. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #  define ATOMIC_HASH_SIZE 4
 | 
					
						
							| 
									
										
										
										
											2009-03-22 03:58:40 +00:00
										 |  |  | #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 19:49:50 +01:00
										 |  |  | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
											  
											
												[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code.  It does the following
things:
 - consolidates and enhances the spinlock/rwlock debugging code
 - simplifies the asm/spinlock.h files
 - encapsulates the raw spinlock type and moves generic spinlock
   features (such as ->break_lock) into the generic code.
 - cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c.  (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners.  There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
 include/asm-i386/spinlock_types.h       |   16
 include/asm-x86_64/spinlock_types.h     |   16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
   SMP                         |  UP
   ----------------------------|-----------------------------------
   asm/spinlock_types_smp.h    |  linux/spinlock_types_up.h
   linux/spinlock_types.h      |  linux/spinlock_types.h
   asm/spinlock_smp.h          |  linux/spinlock_up.h
   linux/spinlock_api_smp.h    |  linux/spinlock_api_up.h
   linux/spinlock.h            |  linux/spinlock.h
/*
 * here's the role of the various spinlock/rwlock related include files:
 *
 * on SMP builds:
 *
 *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
 *                        initializers
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
 *  asm/spinlock.h:       contains the __raw_spin_*()/etc. lowlevel
 *                        implementations, mostly inline assembly code
 *
 *   (also included on UP-debug builds:)
 *
 *  linux/spinlock_api_smp.h:
 *                        contains the prototypes for the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
 *
 * on UP builds:
 *
 *  linux/spinlock_type_up.h:
 *                        contains the generic, simplified UP spinlock type.
 *                        (which is an empty structure on non-debug builds)
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
 *  linux/spinlock_up.h:
 *                        contains the __raw_spin_*()/etc. version of UP
 *                        builds. (which are NOPs on non-debug, non-preempt
 *                        builds)
 *
 *   (included on UP-non-debug builds:)
 *
 *  linux/spinlock_api_up.h:
 *                        builds the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
 */
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers.  m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
  Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
  Builds 32-bit SMP kernel (not booted or tested).  I did not try to build
  non-SMP kernels.  That should be trivial to fix up later if necessary.
  I converted bit ops atomic_hash lock to raw_spinlock_t.  Doing so avoids
  some ugly nesting of linux/*.h and asm/*.h files.  Those particular locks
  are well tested and contained entirely inside arch specific code.  I do NOT
  expect any new issues to arise with them.
 If someone does ever need to use debug/metrics with them, then they will
  need to unravel this hairball between spinlocks, atomic ops, and bit ops
  that exist only because parisc has exactly one atomic instruction: LDCW
  (load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
   ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
											
										 
											2005-09-10 00:25:56 -07:00
										 |  |  | /* Can't use raw_spin_lock_irq because of #include problems, so
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * this is the substitute */ | 
					
						
							|  |  |  | #define _atomic_spin_lock_irqsave(l,f) do {	\
 | 
					
						
							| 
									
										
										
										
											2009-12-02 19:49:50 +01:00
										 |  |  | 	arch_spinlock_t *s = ATOMIC_HASH(l);		\ | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	local_irq_save(f);			\ | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | 	arch_spin_lock(s);			\ | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } while(0) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define _atomic_spin_unlock_irqrestore(l,f) do {	\
 | 
					
						
							| 
									
										
										
										
											2009-12-02 19:49:50 +01:00
										 |  |  | 	arch_spinlock_t *s = ATOMIC_HASH(l);			\ | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | 	arch_spin_unlock(s);				\ | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	local_irq_restore(f);				\ | 
					
						
							|  |  |  | } while(0) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
 | 
					
						
							|  |  |  | #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-01-06 14:40:39 -08:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Note that we need not lock read accesses - aligned word writes/reads | 
					
						
							|  |  |  |  * are atomic, so a reader never sees inconsistent values. | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* It's possible to reduce all atomic operations to either
 | 
					
						
							|  |  |  |  * __atomic_add_return, atomic_set and atomic_read (the latter | 
					
						
							|  |  |  |  * is there only for consistency). | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static __inline__ int __atomic_add_return(int i, atomic_t *v) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 	_atomic_spin_lock_irqsave(v, flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ret = (v->counter += i); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_atomic_spin_unlock_irqrestore(v, flags); | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static __inline__ void atomic_set(atomic_t *v, int i)  | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 	_atomic_spin_lock_irqsave(v, flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	v->counter = i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_atomic_spin_unlock_irqrestore(v, flags); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static __inline__ int atomic_read(const atomic_t *v) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2010-05-17 14:33:53 +10:00
										 |  |  | 	return (*(volatile int *)&(v)->counter); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* exported interface */ | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:26 -07:00
										 |  |  | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 | 
					
						
							| 
									
										
										
										
											2006-01-09 15:59:17 -08:00
										 |  |  | #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-11-13 16:07:25 -08:00
										 |  |  | /**
 | 
					
						
							| 
									
										
										
										
											2011-07-26 16:09:07 -07:00
										 |  |  |  * __atomic_add_unless - add unless the number is a given value | 
					
						
							| 
									
										
										
										
											2005-11-13 16:07:25 -08:00
										 |  |  |  * @v: pointer of type atomic_t | 
					
						
							|  |  |  |  * @a: the amount to add to v... | 
					
						
							|  |  |  |  * @u: ...unless v is equal to u. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Atomically adds @a to @v, so long as it was not @u. | 
					
						
							| 
									
										
										
										
											2011-07-26 16:09:07 -07:00
										 |  |  |  * Returns the old value of @v. | 
					
						
							| 
									
										
										
										
											2005-11-13 16:07:25 -08:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2011-07-26 16:09:07 -07:00
										 |  |  | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:38 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	int c, old; | 
					
						
							|  |  |  | 	c = atomic_read(v); | 
					
						
							|  |  |  | 	for (;;) { | 
					
						
							|  |  |  | 		if (unlikely(c == (u))) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		old = atomic_cmpxchg((v), c, c + (a)); | 
					
						
							|  |  |  | 		if (likely(old == c)) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		c = old; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2011-07-26 16:09:07 -07:00
										 |  |  | 	return c; | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:38 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-11-13 16:07:25 -08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-02 20:01:05 +01:00
										 |  |  | #define atomic_add(i,v)	((void)(__atomic_add_return(        (i),(v))))
 | 
					
						
							|  |  |  | #define atomic_sub(i,v)	((void)(__atomic_add_return(-((int) (i)),(v))))
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #define atomic_inc(v)	((void)(__atomic_add_return(   1,(v))))
 | 
					
						
							|  |  |  | #define atomic_dec(v)	((void)(__atomic_add_return(  -1,(v))))
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-04 20:54:26 +00:00
										 |  |  | #define atomic_add_return(i,v)	(__atomic_add_return( (i),(v)))
 | 
					
						
							|  |  |  | #define atomic_sub_return(i,v)	(__atomic_add_return(-(i),(v)))
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #define atomic_inc_return(v)	(__atomic_add_return(   1,(v)))
 | 
					
						
							|  |  |  | #define atomic_dec_return(v)	(__atomic_add_return(  -1,(v)))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * atomic_inc_and_test - increment and test | 
					
						
							|  |  |  |  * @v: pointer of type atomic_t | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Atomically increments @v by 1 | 
					
						
							|  |  |  |  * and returns true if the result is zero, or false for all | 
					
						
							|  |  |  |  * other cases. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-03-29 19:47:32 -05:00
										 |  |  | #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-23 12:16:19 +01:00
										 |  |  | #define ATOMIC_INIT(i)	{ (i) }
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | #define smp_mb__before_atomic_dec()	smp_mb()
 | 
					
						
							|  |  |  | #define smp_mb__after_atomic_dec()	smp_mb()
 | 
					
						
							|  |  |  | #define smp_mb__before_atomic_inc()	smp_mb()
 | 
					
						
							|  |  |  | #define smp_mb__after_atomic_inc()	smp_mb()
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-01-28 15:09:20 +01:00
										 |  |  | #ifdef CONFIG_64BIT
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-23 12:16:19 +01:00
										 |  |  | #define ATOMIC64_INIT(i) { (i) }
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-11 14:42:06 -04:00
										 |  |  | static __inline__ s64 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | __atomic64_add_return(s64 i, atomic64_t *v) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2011-06-11 14:42:06 -04:00
										 |  |  | 	s64 ret; | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 	_atomic_spin_lock_irqsave(v, flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ret = (v->counter += i); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_atomic_spin_unlock_irqrestore(v, flags); | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static __inline__ void | 
					
						
							|  |  |  | atomic64_set(atomic64_t *v, s64 i) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 	_atomic_spin_lock_irqsave(v, flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	v->counter = i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_atomic_spin_unlock_irqrestore(v, flags); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static __inline__ s64 | 
					
						
							|  |  |  | atomic64_read(const atomic64_t *v) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2010-05-17 14:33:53 +10:00
										 |  |  | 	return (*(volatile long *)&(v)->counter); | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-22 03:58:40 +00:00
										 |  |  | #define atomic64_add(i,v)	((void)(__atomic64_add_return( ((s64)(i)),(v))))
 | 
					
						
							|  |  |  | #define atomic64_sub(i,v)	((void)(__atomic64_add_return(-((s64)(i)),(v))))
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | #define atomic64_inc(v)		((void)(__atomic64_add_return(   1,(v))))
 | 
					
						
							|  |  |  | #define atomic64_dec(v)		((void)(__atomic64_add_return(  -1,(v))))
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-22 03:58:40 +00:00
										 |  |  | #define atomic64_add_return(i,v)	(__atomic64_add_return( ((s64)(i)),(v)))
 | 
					
						
							|  |  |  | #define atomic64_sub_return(i,v)	(__atomic64_add_return(-((s64)(i)),(v)))
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | #define atomic64_inc_return(v)		(__atomic64_add_return(   1,(v)))
 | 
					
						
							|  |  |  | #define atomic64_dec_return(v)		(__atomic64_add_return(  -1,(v)))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
 | 
					
						
							|  |  |  | #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
 | 
					
						
							| 
									
										
										
										
											2006-03-29 19:47:32 -05:00
										 |  |  | #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:26 -07:00
										 |  |  | /* exported interface */ | 
					
						
							|  |  |  | #define atomic64_cmpxchg(v, o, n) \
 | 
					
						
							|  |  |  | 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | 
					
						
							|  |  |  | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * atomic64_add_unless - add unless the number is a given value | 
					
						
							|  |  |  |  * @v: pointer of type atomic64_t | 
					
						
							|  |  |  |  * @a: the amount to add to v... | 
					
						
							|  |  |  |  * @u: ...unless v is equal to u. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Atomically adds @a to @v, so long as it was not @u. | 
					
						
							| 
									
										
										
										
											2011-07-26 16:09:07 -07:00
										 |  |  |  * Returns the old value of @v. | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:26 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:38 -07:00
										 |  |  | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	long c, old; | 
					
						
							|  |  |  | 	c = atomic64_read(v); | 
					
						
							|  |  |  | 	for (;;) { | 
					
						
							|  |  |  | 		if (unlikely(c == (u))) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		old = atomic64_cmpxchg((v), c, c + (a)); | 
					
						
							|  |  |  | 		if (likely(old == c)) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		c = old; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return c != (u); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 00:34:26 -07:00
										 |  |  | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-06 19:13:33 +00:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * atomic64_dec_if_positive - decrement by 1 if old value positive | 
					
						
							|  |  |  |  * @v: pointer of type atomic_t | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The function returns the old value of *v minus 1, even if | 
					
						
							|  |  |  |  * the atomic variable, v, was not decremented. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static inline long atomic64_dec_if_positive(atomic64_t *v) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	long c, old, dec; | 
					
						
							|  |  |  | 	c = atomic64_read(v); | 
					
						
							|  |  |  | 	for (;;) { | 
					
						
							|  |  |  | 		dec = c - 1; | 
					
						
							|  |  |  | 		if (unlikely(dec < 0)) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		old = atomic64_cmpxchg((v), c, dec); | 
					
						
							|  |  |  | 		if (likely(old == c)) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		c = old; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return dec; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-07-02 13:10:29 -04:00
										 |  |  | #endif /* !CONFIG_64BIT */
 | 
					
						
							| 
									
										
										
										
											2006-01-17 08:33:01 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #endif /* _ASM_PARISC_ATOMIC_H_ */
 |