 3a0310eb36
			
		
	
	
	3a0310eb36
	
	
	
		
			
			Our uses of inline asm constraints for atomic operations are fairly
wild and varied. We basically need to guarantee the following:
  1. Any instructions with barrier implications
     (load-acquire/store-release) have a "memory" clobber
  2. When performing exclusive accesses, the addresing mode is generated
     using the "Q" constraint
  3. Atomic blocks which use the condition flags, have a "cc" clobber
This patch addresses these concerns which, as well as fixing the
semantics of the code, stops GCC complaining about impossible asm
constraints.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
		
	
			
		
			
				
	
	
		
			173 lines
		
	
	
	
		
			3.7 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			173 lines
		
	
	
	
		
			3.7 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Based on arch/arm/include/asm/cmpxchg.h
 | |
|  *
 | |
|  * Copyright (C) 2012 ARM Ltd.
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License version 2 as
 | |
|  * published by the Free Software Foundation.
 | |
|  *
 | |
|  * This program is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
|  * GNU General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU General Public License
 | |
|  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | |
|  */
 | |
| #ifndef __ASM_CMPXCHG_H
 | |
| #define __ASM_CMPXCHG_H
 | |
| 
 | |
| #include <linux/bug.h>
 | |
| 
 | |
| #include <asm/barrier.h>
 | |
| 
 | |
| static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 | |
| {
 | |
| 	unsigned long ret, tmp;
 | |
| 
 | |
| 	switch (size) {
 | |
| 	case 1:
 | |
| 		asm volatile("//	__xchg1\n"
 | |
| 		"1:	ldaxrb	%w0, %2\n"
 | |
| 		"	stlxrb	%w1, %w3, %2\n"
 | |
| 		"	cbnz	%w1, 1b\n"
 | |
| 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
 | |
| 			: "r" (x)
 | |
| 			: "cc", "memory");
 | |
| 		break;
 | |
| 	case 2:
 | |
| 		asm volatile("//	__xchg2\n"
 | |
| 		"1:	ldaxrh	%w0, %2\n"
 | |
| 		"	stlxrh	%w1, %w3, %2\n"
 | |
| 		"	cbnz	%w1, 1b\n"
 | |
| 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
 | |
| 			: "r" (x)
 | |
| 			: "cc", "memory");
 | |
| 		break;
 | |
| 	case 4:
 | |
| 		asm volatile("//	__xchg4\n"
 | |
| 		"1:	ldaxr	%w0, %2\n"
 | |
| 		"	stlxr	%w1, %w3, %2\n"
 | |
| 		"	cbnz	%w1, 1b\n"
 | |
| 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
 | |
| 			: "r" (x)
 | |
| 			: "cc", "memory");
 | |
| 		break;
 | |
| 	case 8:
 | |
| 		asm volatile("//	__xchg8\n"
 | |
| 		"1:	ldaxr	%0, %2\n"
 | |
| 		"	stlxr	%w1, %3, %2\n"
 | |
| 		"	cbnz	%w1, 1b\n"
 | |
| 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
 | |
| 			: "r" (x)
 | |
| 			: "cc", "memory");
 | |
| 		break;
 | |
| 	default:
 | |
| 		BUILD_BUG();
 | |
| 	}
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #define xchg(ptr,x) \
 | |
| 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 | |
| 
 | |
| static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 | |
| 				      unsigned long new, int size)
 | |
| {
 | |
| 	unsigned long oldval = 0, res;
 | |
| 
 | |
| 	switch (size) {
 | |
| 	case 1:
 | |
| 		do {
 | |
| 			asm volatile("// __cmpxchg1\n"
 | |
| 			"	ldxrb	%w1, %2\n"
 | |
| 			"	mov	%w0, #0\n"
 | |
| 			"	cmp	%w1, %w3\n"
 | |
| 			"	b.ne	1f\n"
 | |
| 			"	stxrb	%w0, %w4, %2\n"
 | |
| 			"1:\n"
 | |
| 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
 | |
| 				: "Ir" (old), "r" (new)
 | |
| 				: "cc");
 | |
| 		} while (res);
 | |
| 		break;
 | |
| 
 | |
| 	case 2:
 | |
| 		do {
 | |
| 			asm volatile("// __cmpxchg2\n"
 | |
| 			"	ldxrh	%w1, %2\n"
 | |
| 			"	mov	%w0, #0\n"
 | |
| 			"	cmp	%w1, %w3\n"
 | |
| 			"	b.ne	1f\n"
 | |
| 			"	stxrh	%w0, %w4, %2\n"
 | |
| 			"1:\n"
 | |
| 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
 | |
| 				: "Ir" (old), "r" (new)
 | |
| 				: "cc");
 | |
| 		} while (res);
 | |
| 		break;
 | |
| 
 | |
| 	case 4:
 | |
| 		do {
 | |
| 			asm volatile("// __cmpxchg4\n"
 | |
| 			"	ldxr	%w1, %2\n"
 | |
| 			"	mov	%w0, #0\n"
 | |
| 			"	cmp	%w1, %w3\n"
 | |
| 			"	b.ne	1f\n"
 | |
| 			"	stxr	%w0, %w4, %2\n"
 | |
| 			"1:\n"
 | |
| 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
 | |
| 				: "Ir" (old), "r" (new)
 | |
| 				: "cc");
 | |
| 		} while (res);
 | |
| 		break;
 | |
| 
 | |
| 	case 8:
 | |
| 		do {
 | |
| 			asm volatile("// __cmpxchg8\n"
 | |
| 			"	ldxr	%1, %2\n"
 | |
| 			"	mov	%w0, #0\n"
 | |
| 			"	cmp	%1, %3\n"
 | |
| 			"	b.ne	1f\n"
 | |
| 			"	stxr	%w0, %4, %2\n"
 | |
| 			"1:\n"
 | |
| 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
 | |
| 				: "Ir" (old), "r" (new)
 | |
| 				: "cc");
 | |
| 		} while (res);
 | |
| 		break;
 | |
| 
 | |
| 	default:
 | |
| 		BUILD_BUG();
 | |
| 	}
 | |
| 
 | |
| 	return oldval;
 | |
| }
 | |
| 
 | |
| static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 | |
| 					 unsigned long new, int size)
 | |
| {
 | |
| 	unsigned long ret;
 | |
| 
 | |
| 	smp_mb();
 | |
| 	ret = __cmpxchg(ptr, old, new, size);
 | |
| 	smp_mb();
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #define cmpxchg(ptr,o,n)						\
 | |
| 	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\
 | |
| 					  (unsigned long)(o),		\
 | |
| 					  (unsigned long)(n),		\
 | |
| 					  sizeof(*(ptr))))
 | |
| 
 | |
| #define cmpxchg_local(ptr,o,n)						\
 | |
| 	((__typeof__(*(ptr)))__cmpxchg((ptr),				\
 | |
| 				       (unsigned long)(o),		\
 | |
| 				       (unsigned long)(n),		\
 | |
| 				       sizeof(*(ptr))))
 | |
| 
 | |
| #endif	/* __ASM_CMPXCHG_H */
 |