This is a bug fix for using physical arch timers when the arch_timer_use_virtual boolean is false. It restores the arch_counter_get_cntpct() function after removal in0d651e4e"clocksource: arch_timer: use virtual counters" We need this on certain ARMv7 systems which are architected like this: * The firmware doesn't know and doesn't care about hypervisor mode and we don't want to add the complexity of hypervisor there. * The firmware isn't involved in SMP bringup or resume. * The ARCH timer come up with an uninitialized offset between the virtual and physical counters. Each core gets a different random offset. * The device boots in "Secure SVC" mode. * Nothing has touched the reset value of CNTHCTL.PL1PCEN or CNTHCTL.PL1PCTEN (both default to 1 at reset) One example of such as system is RK3288 where it is much simpler to use the physical counter since there's nobody managing the offset and each time a core goes down and comes back up it will get reinitialized to some other random value. Fixes:0d651e4e65("clocksource: arch_timer: use virtual counters") Cc: stable@vger.kernel.org Signed-off-by: Sonny Rao <sonnyrao@chromium.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org> Signed-off-by: Olof Johansson <olof@lixom.net>
		
			
				
	
	
		
			113 lines
		
	
	
	
		
			2.4 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			113 lines
		
	
	
	
		
			2.4 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef __ASMARM_ARCH_TIMER_H
 | 
						|
#define __ASMARM_ARCH_TIMER_H
 | 
						|
 | 
						|
#include <asm/barrier.h>
 | 
						|
#include <asm/errno.h>
 | 
						|
#include <linux/clocksource.h>
 | 
						|
#include <linux/init.h>
 | 
						|
#include <linux/types.h>
 | 
						|
 | 
						|
#include <clocksource/arm_arch_timer.h>
 | 
						|
 | 
						|
#ifdef CONFIG_ARM_ARCH_TIMER
 | 
						|
int arch_timer_arch_init(void);
 | 
						|
 | 
						|
/*
 | 
						|
 * These register accessors are marked inline so the compiler can
 | 
						|
 * nicely work out which register we want, and chuck away the rest of
 | 
						|
 * the code. At least it does so with a recent GCC (4.6.3).
 | 
						|
 */
 | 
						|
static __always_inline
 | 
						|
void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
 | 
						|
{
 | 
						|
	if (access == ARCH_TIMER_PHYS_ACCESS) {
 | 
						|
		switch (reg) {
 | 
						|
		case ARCH_TIMER_REG_CTRL:
 | 
						|
			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
 | 
						|
			break;
 | 
						|
		case ARCH_TIMER_REG_TVAL:
 | 
						|
			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
 | 
						|
			break;
 | 
						|
		}
 | 
						|
	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
 | 
						|
		switch (reg) {
 | 
						|
		case ARCH_TIMER_REG_CTRL:
 | 
						|
			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
 | 
						|
			break;
 | 
						|
		case ARCH_TIMER_REG_TVAL:
 | 
						|
			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
 | 
						|
			break;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	isb();
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline
 | 
						|
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
 | 
						|
{
 | 
						|
	u32 val = 0;
 | 
						|
 | 
						|
	if (access == ARCH_TIMER_PHYS_ACCESS) {
 | 
						|
		switch (reg) {
 | 
						|
		case ARCH_TIMER_REG_CTRL:
 | 
						|
			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
 | 
						|
			break;
 | 
						|
		case ARCH_TIMER_REG_TVAL:
 | 
						|
			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
 | 
						|
			break;
 | 
						|
		}
 | 
						|
	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
 | 
						|
		switch (reg) {
 | 
						|
		case ARCH_TIMER_REG_CTRL:
 | 
						|
			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
 | 
						|
			break;
 | 
						|
		case ARCH_TIMER_REG_TVAL:
 | 
						|
			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
 | 
						|
			break;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return val;
 | 
						|
}
 | 
						|
 | 
						|
static inline u32 arch_timer_get_cntfrq(void)
 | 
						|
{
 | 
						|
	u32 val;
 | 
						|
	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
 | 
						|
	return val;
 | 
						|
}
 | 
						|
 | 
						|
static inline u64 arch_counter_get_cntpct(void)
 | 
						|
{
 | 
						|
	u64 cval;
 | 
						|
 | 
						|
	isb();
 | 
						|
	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
 | 
						|
	return cval;
 | 
						|
}
 | 
						|
 | 
						|
static inline u64 arch_counter_get_cntvct(void)
 | 
						|
{
 | 
						|
	u64 cval;
 | 
						|
 | 
						|
	isb();
 | 
						|
	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
 | 
						|
	return cval;
 | 
						|
}
 | 
						|
 | 
						|
static inline u32 arch_timer_get_cntkctl(void)
 | 
						|
{
 | 
						|
	u32 cntkctl;
 | 
						|
	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
 | 
						|
	return cntkctl;
 | 
						|
}
 | 
						|
 | 
						|
static inline void arch_timer_set_cntkctl(u32 cntkctl)
 | 
						|
{
 | 
						|
	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
 | 
						|
}
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
#endif
 |