 1efe4ce3ca
			
		
	
	
	1efe4ce3ca
	
	
	
		
			
			This implements kernel-level atomic rollback built on top of gUSA, as an alternative non-IRQ based atomicity method. This is generally a faster method for platforms that are lacking the LL/SC pairs that SH-4A and later use, and is only supportable on legacy cores. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
		
			
				
	
	
		
			89 lines
		
	
	
	
		
			2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			89 lines
		
	
	
	
		
			2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef __ASM_SH_ATOMIC_H
 | |
| #define __ASM_SH_ATOMIC_H
 | |
| 
 | |
| /*
 | |
|  * Atomic operations that C can't guarantee us.  Useful for
 | |
|  * resource counting etc..
 | |
|  *
 | |
|  */
 | |
| 
 | |
| typedef struct { volatile int counter; } atomic_t;
 | |
| 
 | |
| #define ATOMIC_INIT(i)	( (atomic_t) { (i) } )
 | |
| 
 | |
| #define atomic_read(v)		((v)->counter)
 | |
| #define atomic_set(v,i)		((v)->counter = (i))
 | |
| 
 | |
| #include <linux/compiler.h>
 | |
| #include <asm/system.h>
 | |
| 
 | |
| #if defined(CONFIG_GUSA_RB)
 | |
| #include <asm/atomic-grb.h>
 | |
| #elif defined(CONFIG_CPU_SH4A)
 | |
| #include <asm/atomic-llsc.h>
 | |
| #else
 | |
| #include <asm/atomic-irq.h>
 | |
| #endif
 | |
| 
 | |
| #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 | |
| 
 | |
| #define atomic_dec_return(v) atomic_sub_return(1,(v))
 | |
| #define atomic_inc_return(v) atomic_add_return(1,(v))
 | |
| 
 | |
| /*
 | |
|  * atomic_inc_and_test - increment and test
 | |
|  * @v: pointer of type atomic_t
 | |
|  *
 | |
|  * Atomically increments @v by 1
 | |
|  * and returns true if the result is zero, or false for all
 | |
|  * other cases.
 | |
|  */
 | |
| #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
 | |
| 
 | |
| #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
 | |
| #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
 | |
| 
 | |
| #define atomic_inc(v) atomic_add(1,(v))
 | |
| #define atomic_dec(v) atomic_sub(1,(v))
 | |
| 
 | |
| #ifndef CONFIG_GUSA_RB
 | |
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 | |
| {
 | |
| 	int ret;
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	ret = v->counter;
 | |
| 	if (likely(ret == old))
 | |
| 		v->counter = new;
 | |
| 	local_irq_restore(flags);
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline int atomic_add_unless(atomic_t *v, int a, int u)
 | |
| {
 | |
| 	int ret;
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	ret = v->counter;
 | |
| 	if (ret != u)
 | |
| 		v->counter += a;
 | |
| 	local_irq_restore(flags);
 | |
| 
 | |
| 	return ret != u;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 | |
| #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 | |
| 
 | |
| /* Atomic operations are already serializing on SH */
 | |
| #define smp_mb__before_atomic_dec()	barrier()
 | |
| #define smp_mb__after_atomic_dec()	barrier()
 | |
| #define smp_mb__before_atomic_inc()	barrier()
 | |
| #define smp_mb__after_atomic_inc()	barrier()
 | |
| 
 | |
| #include <asm-generic/atomic.h>
 | |
| #endif /* __ASM_SH_ATOMIC_H */
 |