 bca7a5a049
			
		
	
	
	bca7a5a049
	
	
	
		
			
			Remove the majority of cache flushing calls from the individual platform files. This is now handled by the core code. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
		
			
				
	
	
		
			111 lines
		
	
	
	
		
			2.3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			111 lines
		
	
	
	
		
			2.3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  linux/arch/arm/mach-realview/hotplug.c
 | |
|  *
 | |
|  *  Copyright (C) 2002 ARM Ltd.
 | |
|  *  All Rights Reserved
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License version 2 as
 | |
|  * published by the Free Software Foundation.
 | |
|  */
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/errno.h>
 | |
| #include <linux/smp.h>
 | |
| 
 | |
| #include <asm/cp15.h>
 | |
| #include <asm/smp_plat.h>
 | |
| 
 | |
| static inline void cpu_enter_lowpower(void)
 | |
| {
 | |
| 	unsigned int v;
 | |
| 
 | |
| 	asm volatile(
 | |
| 	"	mcr	p15, 0, %1, c7, c5, 0\n"
 | |
| 	"	mcr	p15, 0, %1, c7, c10, 4\n"
 | |
| 	/*
 | |
| 	 * Turn off coherency
 | |
| 	 */
 | |
| 	"	mrc	p15, 0, %0, c1, c0, 1\n"
 | |
| 	"	bic	%0, %0, #0x20\n"
 | |
| 	"	mcr	p15, 0, %0, c1, c0, 1\n"
 | |
| 	"	mrc	p15, 0, %0, c1, c0, 0\n"
 | |
| 	"	bic	%0, %0, %2\n"
 | |
| 	"	mcr	p15, 0, %0, c1, c0, 0\n"
 | |
| 	  : "=&r" (v)
 | |
| 	  : "r" (0), "Ir" (CR_C)
 | |
| 	  : "cc");
 | |
| }
 | |
| 
 | |
| static inline void cpu_leave_lowpower(void)
 | |
| {
 | |
| 	unsigned int v;
 | |
| 
 | |
| 	asm volatile(	"mrc	p15, 0, %0, c1, c0, 0\n"
 | |
| 	"	orr	%0, %0, %1\n"
 | |
| 	"	mcr	p15, 0, %0, c1, c0, 0\n"
 | |
| 	"	mrc	p15, 0, %0, c1, c0, 1\n"
 | |
| 	"	orr	%0, %0, #0x20\n"
 | |
| 	"	mcr	p15, 0, %0, c1, c0, 1\n"
 | |
| 	  : "=&r" (v)
 | |
| 	  : "Ir" (CR_C)
 | |
| 	  : "cc");
 | |
| }
 | |
| 
 | |
| static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
 | |
| {
 | |
| 	/*
 | |
| 	 * there is no power-control hardware on this platform, so all
 | |
| 	 * we can do is put the core into WFI; this is safe as the calling
 | |
| 	 * code will have already disabled interrupts
 | |
| 	 */
 | |
| 	for (;;) {
 | |
| 		/*
 | |
| 		 * here's the WFI
 | |
| 		 */
 | |
| 		asm(".word	0xe320f003\n"
 | |
| 		    :
 | |
| 		    :
 | |
| 		    : "memory", "cc");
 | |
| 
 | |
| 		if (pen_release == cpu_logical_map(cpu)) {
 | |
| 			/*
 | |
| 			 * OK, proper wakeup, we're done
 | |
| 			 */
 | |
| 			break;
 | |
| 		}
 | |
| 
 | |
| 		/*
 | |
| 		 * Getting here, means that we have come out of WFI without
 | |
| 		 * having been woken up - this shouldn't happen
 | |
| 		 *
 | |
| 		 * Just note it happening - when we're woken, we can report
 | |
| 		 * its occurrence.
 | |
| 		 */
 | |
| 		(*spurious)++;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * platform-specific code to shutdown a CPU
 | |
|  *
 | |
|  * Called with IRQs disabled
 | |
|  */
 | |
| void __ref realview_cpu_die(unsigned int cpu)
 | |
| {
 | |
| 	int spurious = 0;
 | |
| 
 | |
| 	/*
 | |
| 	 * we're ready for shutdown now, so do it
 | |
| 	 */
 | |
| 	cpu_enter_lowpower();
 | |
| 	platform_do_lowpower(cpu, &spurious);
 | |
| 
 | |
| 	/*
 | |
| 	 * bring this CPU back into the world of cache
 | |
| 	 * coherency, and then restore interrupts
 | |
| 	 */
 | |
| 	cpu_leave_lowpower();
 | |
| 
 | |
| 	if (spurious)
 | |
| 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
 | |
| }
 |