| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | /* spinlock.h: 32-bit Sparc spinlock support.
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifndef __SPARC_SPINLOCK_H
 | 
					
						
							|  |  |  | #define __SPARC_SPINLOCK_H
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifndef __ASSEMBLY__
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include <asm/psr.h>
 | 
					
						
							| 
									
										
										
										
											2011-05-21 22:55:17 +02:00
										 |  |  | #include <asm/processor.h> /* for cpu_relax */
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | #define arch_spin_unlock_wait(lock) \
 | 
					
						
							|  |  |  | 	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | static inline void arch_spin_lock(arch_spinlock_t *lock) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	__asm__ __volatile__( | 
					
						
							|  |  |  | 	"\n1:\n\t" | 
					
						
							|  |  |  | 	"ldstub	[%0], %%g2\n\t" | 
					
						
							|  |  |  | 	"orcc	%%g2, 0x0, %%g0\n\t" | 
					
						
							|  |  |  | 	"bne,a	2f\n\t" | 
					
						
							|  |  |  | 	" ldub	[%0], %%g2\n\t" | 
					
						
							|  |  |  | 	".subsection	2\n" | 
					
						
							|  |  |  | 	"2:\n\t" | 
					
						
							|  |  |  | 	"orcc	%%g2, 0x0, %%g0\n\t" | 
					
						
							|  |  |  | 	"bne,a	2b\n\t" | 
					
						
							|  |  |  | 	" ldub	[%0], %%g2\n\t" | 
					
						
							|  |  |  | 	"b,a	1b\n\t" | 
					
						
							|  |  |  | 	".previous\n" | 
					
						
							|  |  |  | 	: /* no outputs */ | 
					
						
							|  |  |  | 	: "r" (lock) | 
					
						
							|  |  |  | 	: "g2", "memory", "cc"); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	unsigned int result; | 
					
						
							|  |  |  | 	__asm__ __volatile__("ldstub [%1], %0" | 
					
						
							|  |  |  | 			     : "=r" (result) | 
					
						
							|  |  |  | 			     : "r" (lock) | 
					
						
							|  |  |  | 			     : "memory"); | 
					
						
							|  |  |  | 	return (result == 0); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Read-write spinlocks, allowing multiple readers
 | 
					
						
							|  |  |  |  * but only one writer. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * NOTE! it is quite common to have readers in interrupts | 
					
						
							|  |  |  |  * but no interrupt writers. For those circumstances we | 
					
						
							|  |  |  |  * can "mix" irq-safe locks - any writer needs to get a | 
					
						
							|  |  |  |  * irq-safe write-lock, but readers can get non-irqsafe | 
					
						
							|  |  |  |  * read-locks. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * XXX This might create some problems with my dual spinlock | 
					
						
							|  |  |  |  * XXX scheme, deadlocks etc. -DaveM | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Sort of like atomic_t's on Sparc, but even more clever. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *	------------------------------------ | 
					
						
							| 
									
										
										
										
											2009-12-03 20:01:19 +01:00
										 |  |  |  *	| 24-bit counter           | wlock |  arch_rwlock_t | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  |  *	------------------------------------ | 
					
						
							|  |  |  |  *	 31                       8 7     0 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * wlock signifies the one writer is in or somebody is updating | 
					
						
							|  |  |  |  * counter. For a writer, if he successfully acquires the wlock, | 
					
						
							|  |  |  |  * but counter is non-zero, he has to release the lock and wait, | 
					
						
							|  |  |  |  * till both counter and wlock are zero. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unfortunately this scheme limits us to ~16,000,000 cpus. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | static inline void __arch_read_lock(arch_rwlock_t *rw) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2009-12-03 20:01:19 +01:00
										 |  |  | 	register arch_rwlock_t *lp asm("g1"); | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	lp = rw; | 
					
						
							|  |  |  | 	__asm__ __volatile__( | 
					
						
							|  |  |  | 	"mov	%%o7, %%g4\n\t" | 
					
						
							|  |  |  | 	"call	___rw_read_enter\n\t" | 
					
						
							|  |  |  | 	" ldstub	[%%g1 + 3], %%g2\n" | 
					
						
							|  |  |  | 	: /* no outputs */ | 
					
						
							|  |  |  | 	: "r" (lp) | 
					
						
							|  |  |  | 	: "g2", "g4", "memory", "cc"); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | #define arch_read_lock(lock) \
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | do {	unsigned long flags; \ | 
					
						
							|  |  |  | 	local_irq_save(flags); \ | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | 	__arch_read_lock(lock); \ | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	local_irq_restore(flags); \ | 
					
						
							|  |  |  | } while(0) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | static inline void __arch_read_unlock(arch_rwlock_t *rw) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2009-12-03 20:01:19 +01:00
										 |  |  | 	register arch_rwlock_t *lp asm("g1"); | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	lp = rw; | 
					
						
							|  |  |  | 	__asm__ __volatile__( | 
					
						
							|  |  |  | 	"mov	%%o7, %%g4\n\t" | 
					
						
							|  |  |  | 	"call	___rw_read_exit\n\t" | 
					
						
							|  |  |  | 	" ldstub	[%%g1 + 3], %%g2\n" | 
					
						
							|  |  |  | 	: /* no outputs */ | 
					
						
							|  |  |  | 	: "r" (lp) | 
					
						
							|  |  |  | 	: "g2", "g4", "memory", "cc"); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | #define arch_read_unlock(lock) \
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | do {	unsigned long flags; \ | 
					
						
							|  |  |  | 	local_irq_save(flags); \ | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | 	__arch_read_unlock(lock); \ | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	local_irq_restore(flags); \ | 
					
						
							|  |  |  | } while(0) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | static inline void arch_write_lock(arch_rwlock_t *rw) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2009-12-03 20:01:19 +01:00
										 |  |  | 	register arch_rwlock_t *lp asm("g1"); | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	lp = rw; | 
					
						
							|  |  |  | 	__asm__ __volatile__( | 
					
						
							|  |  |  | 	"mov	%%o7, %%g4\n\t" | 
					
						
							|  |  |  | 	"call	___rw_write_enter\n\t" | 
					
						
							|  |  |  | 	" ldstub	[%%g1 + 3], %%g2\n" | 
					
						
							|  |  |  | 	: /* no outputs */ | 
					
						
							|  |  |  | 	: "r" (lp) | 
					
						
							|  |  |  | 	: "g2", "g4", "memory", "cc"); | 
					
						
							|  |  |  | 	*(volatile __u32 *)&lp->lock = ~0U; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-15 10:11:50 +00:00
										 |  |  | static void inline arch_write_unlock(arch_rwlock_t *lock) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	__asm__ __volatile__( | 
					
						
							|  |  |  | "	st		%%g0, [%0]" | 
					
						
							|  |  |  | 	: /* no outputs */ | 
					
						
							|  |  |  | 	: "r" (lock) | 
					
						
							|  |  |  | 	: "memory"); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | static inline int arch_write_trylock(arch_rwlock_t *rw) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	unsigned int val; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	__asm__ __volatile__("ldstub [%1 + 3], %0" | 
					
						
							|  |  |  | 			     : "=r" (val) | 
					
						
							|  |  |  | 			     : "r" (&rw->lock) | 
					
						
							|  |  |  | 			     : "memory"); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (val == 0) { | 
					
						
							|  |  |  | 		val = rw->lock & ~0xff; | 
					
						
							|  |  |  | 		if (val) | 
					
						
							|  |  |  | 			((volatile u8*)&rw->lock)[3] = 0; | 
					
						
							|  |  |  | 		else | 
					
						
							|  |  |  | 			*(volatile u32*)&rw->lock = ~0U; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return (val == 0); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | static inline int __arch_read_trylock(arch_rwlock_t *rw) | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2009-12-03 20:01:19 +01:00
										 |  |  | 	register arch_rwlock_t *lp asm("g1"); | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	register int res asm("o0"); | 
					
						
							|  |  |  | 	lp = rw; | 
					
						
							|  |  |  | 	__asm__ __volatile__( | 
					
						
							|  |  |  | 	"mov	%%o7, %%g4\n\t" | 
					
						
							|  |  |  | 	"call	___rw_read_try\n\t" | 
					
						
							|  |  |  | 	" ldstub	[%%g1 + 3], %%g2\n" | 
					
						
							|  |  |  | 	: "=r" (res) | 
					
						
							|  |  |  | 	: "r" (lp) | 
					
						
							|  |  |  | 	: "g2", "g4", "memory", "cc"); | 
					
						
							|  |  |  | 	return res; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | #define arch_read_trylock(lock) \
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | ({	unsigned long flags; \ | 
					
						
							|  |  |  | 	int res; \ | 
					
						
							|  |  |  | 	local_irq_save(flags); \ | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | 	res = __arch_read_trylock(lock); \ | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 	local_irq_restore(flags); \ | 
					
						
							|  |  |  | 	res; \ | 
					
						
							|  |  |  | }) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | #define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
 | 
					
						
							|  |  |  | #define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-02 20:01:25 +01:00
										 |  |  | #define arch_spin_relax(lock)	cpu_relax()
 | 
					
						
							|  |  |  | #define arch_read_relax(lock)	cpu_relax()
 | 
					
						
							|  |  |  | #define arch_write_relax(lock)	cpu_relax()
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-03 20:08:46 +01:00
										 |  |  | #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
 | 
					
						
							|  |  |  | #define arch_write_can_lock(rw) (!(rw)->lock)
 | 
					
						
							| 
									
										
										
										
											2008-07-17 21:55:51 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | #endif /* !(__ASSEMBLY__) */
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #endif /* __SPARC_SPINLOCK_H */
 |