Create a new wakeup preemption mode, preempt towards tasks that run
shorter on avg. It sets next buddy to be sure we actually run the task
we preempted for.
Test results:
 root@twins:~# while :; do :; done &
 [1] 6537
 root@twins:~# while :; do :; done &
 [2] 6538
 root@twins:~# while :; do :; done &
 [3] 6539
 root@twins:~# while :; do :; done &
 [4] 6540
 root@twins:/home/peter# ./latt -c4 sleep 4
 Entries: 48 (clients=4)
 Averages:
 ------------------------------
        Max          4750 usec
        Avg           497 usec
        Stdev         737 usec
 root@twins:/home/peter# echo WAKEUP_RUNNING > /debug/sched_features
 root@twins:/home/peter# ./latt -c4 sleep 4
 Entries: 48 (clients=4)
 Averages:
 ------------------------------
        Max            14 usec
        Avg             5 usec
        Stdev           3 usec
Disabled by default - needs more testing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
LKML-Reference: <new-submission>
		
	
			
		
			
				
	
	
		
			123 lines
		
	
	
	
		
			3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			123 lines
		
	
	
	
		
			3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Disregards a certain amount of sleep time (sched_latency_ns) and
 | 
						|
 * considers the task to be running during that period. This gives it
 | 
						|
 * a service deficit on wakeup, allowing it to run sooner.
 | 
						|
 */
 | 
						|
SCHED_FEAT(FAIR_SLEEPERS, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Only give sleepers 50% of their service deficit. This allows
 | 
						|
 * them to run sooner, but does not allow tons of sleepers to
 | 
						|
 * rip the spread apart.
 | 
						|
 */
 | 
						|
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * By not normalizing the sleep time, heavy tasks get an effective
 | 
						|
 * longer period, and lighter task an effective shorter period they
 | 
						|
 * are considered running.
 | 
						|
 */
 | 
						|
SCHED_FEAT(NORMALIZED_SLEEPER, 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * Place new tasks ahead so that they do not starve already running
 | 
						|
 * tasks
 | 
						|
 */
 | 
						|
SCHED_FEAT(START_DEBIT, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Should wakeups try to preempt running tasks.
 | 
						|
 */
 | 
						|
SCHED_FEAT(WAKEUP_PREEMPT, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Compute wakeup_gran based on task behaviour, clipped to
 | 
						|
 *  [0, sched_wakeup_gran_ns]
 | 
						|
 */
 | 
						|
SCHED_FEAT(ADAPTIVE_GRAN, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * When converting the wakeup granularity to virtual time, do it such
 | 
						|
 * that heavier tasks preempting a lighter task have an edge.
 | 
						|
 */
 | 
						|
SCHED_FEAT(ASYM_GRAN, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
 | 
						|
 */
 | 
						|
SCHED_FEAT(WAKEUP_SYNC, 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * Wakeup preempt based on task behaviour. Tasks that do not overlap
 | 
						|
 * don't get preempted.
 | 
						|
 */
 | 
						|
SCHED_FEAT(WAKEUP_OVERLAP, 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * Wakeup preemption towards tasks that run short
 | 
						|
 */
 | 
						|
SCHED_FEAT(WAKEUP_RUNNING, 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * Use the SYNC wakeup hint, pipes and the likes use this to indicate
 | 
						|
 * the remote end is likely to consume the data we just wrote, and
 | 
						|
 * therefore has cache benefit from being placed on the same cpu, see
 | 
						|
 * also AFFINE_WAKEUPS.
 | 
						|
 */
 | 
						|
SCHED_FEAT(SYNC_WAKEUPS, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Based on load and program behaviour, see if it makes sense to place
 | 
						|
 * a newly woken task on the same cpu as the task that woke it --
 | 
						|
 * improve cache locality. Typically used with SYNC wakeups as
 | 
						|
 * generated by pipes and the like, see also SYNC_WAKEUPS.
 | 
						|
 */
 | 
						|
SCHED_FEAT(AFFINE_WAKEUPS, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Weaken SYNC hint based on overlap
 | 
						|
 */
 | 
						|
SCHED_FEAT(SYNC_LESS, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Add SYNC hint based on overlap
 | 
						|
 */
 | 
						|
SCHED_FEAT(SYNC_MORE, 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * Prefer to schedule the task we woke last (assuming it failed
 | 
						|
 * wakeup-preemption), since its likely going to consume data we
 | 
						|
 * touched, increases cache locality.
 | 
						|
 */
 | 
						|
SCHED_FEAT(NEXT_BUDDY, 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * Prefer to schedule the task that ran last (when we did
 | 
						|
 * wake-preempt) as that likely will touch the same data, increases
 | 
						|
 * cache locality.
 | 
						|
 */
 | 
						|
SCHED_FEAT(LAST_BUDDY, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Consider buddies to be cache hot, decreases the likelyness of a
 | 
						|
 * cache buddy being migrated away, increases cache locality.
 | 
						|
 */
 | 
						|
SCHED_FEAT(CACHE_HOT_BUDDY, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Use arch dependent cpu power functions
 | 
						|
 */
 | 
						|
SCHED_FEAT(ARCH_POWER, 0)
 | 
						|
 | 
						|
SCHED_FEAT(HRTICK, 0)
 | 
						|
SCHED_FEAT(DOUBLE_TICK, 0)
 | 
						|
SCHED_FEAT(LB_BIAS, 1)
 | 
						|
SCHED_FEAT(LB_SHARES_UPDATE, 1)
 | 
						|
SCHED_FEAT(ASYM_EFF_LOAD, 1)
 | 
						|
 | 
						|
/*
 | 
						|
 * Spin-wait on mutex acquisition when the mutex owner is running on
 | 
						|
 * another cpu -- assumes that when the owner is running, it will soon
 | 
						|
 * release the lock. Decreases scheduling overhead.
 | 
						|
 */
 | 
						|
SCHED_FEAT(OWNER_SPIN, 1)
 |