hrtimer: Make offset update smarter
On every tick/hrtimer interrupt we update the offset variables of the clock bases. That's silly because these offsets change very seldom. Add a sequence counter to the time keeping code which keeps track of the offset updates (clock_was_set()). Have a sequence cache in the hrtimer cpu bases to evaluate whether the offsets must be updated or not. This allows us later to avoid pointless cacheline pollution. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: John Stultz <john.stultz@linaro.org> Link: http://lkml.kernel.org/r/20150414203501.132820245@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: John Stultz <john.stultz@linaro.org>
This commit is contained in:
		
					parent
					
						
							
								21d6d52a1b
							
						
					
				
			
			
				commit
				
					
						868a3e915f
					
				
			
		
					 5 changed files with 26 additions and 13 deletions
				
			
		| 
						 | 
				
			
			@ -163,7 +163,7 @@ enum  hrtimer_base_type {
 | 
			
		|||
 *			and timers
 | 
			
		||||
 * @cpu:		cpu number
 | 
			
		||||
 * @active_bases:	Bitfield to mark bases with active timers
 | 
			
		||||
 * @clock_was_set:	Indicates that clock was set from irq context.
 | 
			
		||||
 * @clock_was_set_seq:	Sequence counter of clock was set events
 | 
			
		||||
 * @expires_next:	absolute time of the next event which was scheduled
 | 
			
		||||
 *			via clock_set_next_event()
 | 
			
		||||
 * @in_hrtirq:		hrtimer_interrupt() is currently executing
 | 
			
		||||
| 
						 | 
				
			
			@ -179,7 +179,7 @@ struct hrtimer_cpu_base {
 | 
			
		|||
	raw_spinlock_t			lock;
 | 
			
		||||
	unsigned int			cpu;
 | 
			
		||||
	unsigned int			active_bases;
 | 
			
		||||
	unsigned int			clock_was_set;
 | 
			
		||||
	unsigned int			clock_was_set_seq;
 | 
			
		||||
#ifdef CONFIG_HIGH_RES_TIMERS
 | 
			
		||||
	ktime_t				expires_next;
 | 
			
		||||
	int				in_hrtirq;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -49,6 +49,7 @@ struct tk_read_base {
 | 
			
		|||
 * @offs_boot:		Offset clock monotonic -> clock boottime
 | 
			
		||||
 * @offs_tai:		Offset clock monotonic -> clock tai
 | 
			
		||||
 * @tai_offset:		The current UTC to TAI offset in seconds
 | 
			
		||||
 * @clock_was_set_seq:	The sequence number of clock was set events
 | 
			
		||||
 * @raw_time:		Monotonic raw base time in timespec64 format
 | 
			
		||||
 * @cycle_interval:	Number of clock cycles in one NTP interval
 | 
			
		||||
 * @xtime_interval:	Number of clock shifted nano seconds in one NTP
 | 
			
		||||
| 
						 | 
				
			
			@ -85,6 +86,7 @@ struct timekeeper {
 | 
			
		|||
	ktime_t			offs_boot;
 | 
			
		||||
	ktime_t			offs_tai;
 | 
			
		||||
	s32			tai_offset;
 | 
			
		||||
	unsigned int		clock_was_set_seq;
 | 
			
		||||
	struct timespec64	raw_time;
 | 
			
		||||
 | 
			
		||||
	/* The following members are for timekeeping internal use */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -451,7 +451,8 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
 | 
			
		|||
	ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
 | 
			
		||||
	ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
 | 
			
		||||
 | 
			
		||||
	return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai);
 | 
			
		||||
	return ktime_get_update_offsets_now(&base->clock_was_set_seq,
 | 
			
		||||
					    offs_real, offs_boot, offs_tai);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* High resolution timer related functions */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -602,6 +602,9 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
 | 
			
		|||
 | 
			
		||||
	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
 | 
			
		||||
	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
 | 
			
		||||
 | 
			
		||||
	if (action & TK_CLOCK_WAS_SET)
 | 
			
		||||
		tk->clock_was_set_seq++;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -1927,15 +1930,19 @@ void do_timer(unsigned long ticks)
 | 
			
		|||
 | 
			
		||||
/**
 | 
			
		||||
 * ktime_get_update_offsets_now - hrtimer helper
 | 
			
		||||
 * @cwsseq:	pointer to check and store the clock was set sequence number
 | 
			
		||||
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 | 
			
		||||
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
 | 
			
		||||
 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
 | 
			
		||||
 *
 | 
			
		||||
 * Returns current monotonic time and updates the offsets
 | 
			
		||||
 * Returns current monotonic time and updates the offsets if the
 | 
			
		||||
 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
 | 
			
		||||
 * different.
 | 
			
		||||
 *
 | 
			
		||||
 * Called from hrtimer_interrupt() or retrigger_next_event()
 | 
			
		||||
 */
 | 
			
		||||
ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
 | 
			
		||||
							ktime_t *offs_tai)
 | 
			
		||||
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
 | 
			
		||||
				     ktime_t *offs_boot, ktime_t *offs_tai)
 | 
			
		||||
{
 | 
			
		||||
	struct timekeeper *tk = &tk_core.timekeeper;
 | 
			
		||||
	unsigned int seq;
 | 
			
		||||
| 
						 | 
				
			
			@ -1947,10 +1954,12 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
 | 
			
		|||
 | 
			
		||||
		base = tk->tkr_mono.base;
 | 
			
		||||
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 | 
			
		||||
 | 
			
		||||
		*offs_real = tk->offs_real;
 | 
			
		||||
		*offs_boot = tk->offs_boot;
 | 
			
		||||
		*offs_tai = tk->offs_tai;
 | 
			
		||||
		if (*cwsseq != tk->clock_was_set_seq) {
 | 
			
		||||
			*cwsseq = tk->clock_was_set_seq;
 | 
			
		||||
			*offs_real = tk->offs_real;
 | 
			
		||||
			*offs_boot = tk->offs_boot;
 | 
			
		||||
			*offs_tai = tk->offs_tai;
 | 
			
		||||
		}
 | 
			
		||||
	} while (read_seqcount_retry(&tk_core.seq, seq));
 | 
			
		||||
 | 
			
		||||
	return ktime_add_ns(base, nsecs);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3,9 +3,10 @@
 | 
			
		|||
/*
 | 
			
		||||
 * Internal interfaces for kernel/time/
 | 
			
		||||
 */
 | 
			
		||||
extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
 | 
			
		||||
						ktime_t *offs_boot,
 | 
			
		||||
						ktime_t *offs_tai);
 | 
			
		||||
extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
 | 
			
		||||
					    ktime_t *offs_real,
 | 
			
		||||
					    ktime_t *offs_boot,
 | 
			
		||||
					    ktime_t *offs_tai);
 | 
			
		||||
 | 
			
		||||
extern int timekeeping_valid_for_hres(void);
 | 
			
		||||
extern u64 timekeeping_max_deferment(void);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue