The only user of the cycle_last validation is the x86 TSC. In order to
provide NMI safe accessor functions for clock monotonic and
monotonic_raw we need to do that in the core.
We can't do the TSC specific
    if (now < cycle_last)
       	    now = cycle_last;
for the other wrapping around clocksources, but TSC has
CLOCKSOURCE_MASK(64) which actually does not mask out anything so if
now is less than cycle_last the subtraction will give a negative
result. So we can check for that in clocksource_delta() and return 0
for that case.
Implement and enable it for x86
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: John Stultz <john.stultz@linaro.org>
		
	
			
		
			
				
	
	
		
			29 lines
		
	
	
	
		
			663 B
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			29 lines
		
	
	
	
		
			663 B
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef _TIMEKEEPING_INTERNAL_H
 | 
						|
#define _TIMEKEEPING_INTERNAL_H
 | 
						|
/*
 | 
						|
 * timekeeping debug functions
 | 
						|
 */
 | 
						|
#include <linux/clocksource.h>
 | 
						|
#include <linux/time.h>
 | 
						|
 | 
						|
#ifdef CONFIG_DEBUG_FS
 | 
						|
extern void tk_debug_account_sleep_time(struct timespec64 *t);
 | 
						|
#else
 | 
						|
#define tk_debug_account_sleep_time(x)
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
 | 
						|
static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
 | 
						|
{
 | 
						|
	cycle_t ret = (now - last) & mask;
 | 
						|
 | 
						|
	return (s64) ret > 0 ? ret : 0;
 | 
						|
}
 | 
						|
#else
 | 
						|
static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
 | 
						|
{
 | 
						|
	return (now - last) & mask;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#endif /* _TIMEKEEPING_INTERNAL_H */
 |