sched/deadline: Fix stale yield state
When we fail to start the deadline timer in update_curr_dl(), we
forget to clear ->dl_yielded, resulting in wrecked time keeping.
Since the natural place to clear both ->dl_yielded and ->dl_throttled
is in replenish_dl_entity(); both are after all waiting for that event;
make it so.
Luckily since 67dfa1b756 ("sched/deadline: Implement
cancel_dl_timer() to use in switched_from_dl()") the
task_on_rq_queued() condition in dl_task_timer() must be true, and can
therefore call enqueue_task_dl() unconditionally.
Reported-by: Wanpeng Li <wanpeng.li@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kirill Tkhai <ktkhai@parallels.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1416962647-76792-4-git-send-email-wanpeng.li@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
	
	
This commit is contained in:
		
					parent
					
						
							
								a7bebf4887
							
						
					
				
			
			
				commit
				
					
						1019a359d3
					
				
			
		
					 1 changed files with 19 additions and 19 deletions
				
			
		|  | @ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, | |||
| 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | ||||
| 		dl_se->runtime = pi_se->dl_runtime; | ||||
| 	} | ||||
| 
 | ||||
| 	if (dl_se->dl_yielded) | ||||
| 		dl_se->dl_yielded = 0; | ||||
| 	if (dl_se->dl_throttled) | ||||
| 		dl_se->dl_throttled = 0; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -536,23 +541,19 @@ again: | |||
| 
 | ||||
| 	sched_clock_tick(); | ||||
| 	update_rq_clock(rq); | ||||
| 	dl_se->dl_throttled = 0; | ||||
| 	dl_se->dl_yielded = 0; | ||||
| 	if (task_on_rq_queued(p)) { | ||||
| 		enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | ||||
| 		if (dl_task(rq->curr)) | ||||
| 			check_preempt_curr_dl(rq, p, 0); | ||||
| 		else | ||||
| 			resched_curr(rq); | ||||
| 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | ||||
| 	if (dl_task(rq->curr)) | ||||
| 		check_preempt_curr_dl(rq, p, 0); | ||||
| 	else | ||||
| 		resched_curr(rq); | ||||
| #ifdef CONFIG_SMP | ||||
| 		/*
 | ||||
| 		 * Queueing this task back might have overloaded rq, | ||||
| 		 * check if we need to kick someone away. | ||||
| 		 */ | ||||
| 		if (has_pushable_dl_tasks(rq)) | ||||
| 			push_dl_task(rq); | ||||
| 	/*
 | ||||
| 	 * Queueing this task back might have overloaded rq, | ||||
| 	 * check if we need to kick someone away. | ||||
| 	 */ | ||||
| 	if (has_pushable_dl_tasks(rq)) | ||||
| 		push_dl_task(rq); | ||||
| #endif | ||||
| 	} | ||||
| unlock: | ||||
| 	raw_spin_unlock(&rq->lock); | ||||
| 
 | ||||
|  | @ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq) | |||
| 
 | ||||
| 	dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; | ||||
| 	if (dl_runtime_exceeded(rq, dl_se)) { | ||||
| 		dl_se->dl_throttled = 1; | ||||
| 		__dequeue_task_dl(rq, curr, 0); | ||||
| 		if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) | ||||
| 			dl_se->dl_throttled = 1; | ||||
| 		else | ||||
| 		if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted))) | ||||
| 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | ||||
| 
 | ||||
| 		if (!is_leftmost(curr, &rq->dl)) | ||||
|  | @ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 	 * its rq, the bandwidth timer callback (which clearly has not | ||||
| 	 * run yet) will take care of this. | ||||
| 	 */ | ||||
| 	if (p->dl.dl_throttled) | ||||
| 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) | ||||
| 		return; | ||||
| 
 | ||||
| 	enqueue_dl_entity(&p->dl, pi_se, flags); | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Peter Zijlstra
				Peter Zijlstra