 1232d6132a
			
		
	
	
	1232d6132a
	
	
	
		
			
			Correct ->dequeue_tree() thinko into sched_class->dequeue_task and drop all references to ->task_new() since it is obviously gone. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <1300815978-16618-1-git-send-email-bp@amd64.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
		
			
				
	
	
		
			105 lines
		
	
	
	
		
			2.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			105 lines
		
	
	
	
		
			2.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * stop-task scheduling class.
 | |
|  *
 | |
|  * The stop task is the highest priority task in the system, it preempts
 | |
|  * everything and will be preempted by nothing.
 | |
|  *
 | |
|  * See kernel/stop_machine.c
 | |
|  */
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| static int
 | |
| select_task_rq_stop(struct rq *rq, struct task_struct *p,
 | |
| 		    int sd_flag, int flags)
 | |
| {
 | |
| 	return task_cpu(p); /* stop tasks as never migrate */
 | |
| }
 | |
| #endif /* CONFIG_SMP */
 | |
| 
 | |
| static void
 | |
| check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
 | |
| {
 | |
| 	/* we're never preempted */
 | |
| }
 | |
| 
 | |
| static struct task_struct *pick_next_task_stop(struct rq *rq)
 | |
| {
 | |
| 	struct task_struct *stop = rq->stop;
 | |
| 
 | |
| 	if (stop && stop->se.on_rq)
 | |
| 		return stop;
 | |
| 
 | |
| 	return NULL;
 | |
| }
 | |
| 
 | |
| static void
 | |
| enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void
 | |
| dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void yield_task_stop(struct rq *rq)
 | |
| {
 | |
| 	BUG(); /* the stop task should never yield, its pointless. */
 | |
| }
 | |
| 
 | |
| static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void set_curr_task_stop(struct rq *rq)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void switched_to_stop(struct rq *rq, struct task_struct *p)
 | |
| {
 | |
| 	BUG(); /* its impossible to change to this class */
 | |
| }
 | |
| 
 | |
| static void
 | |
| prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
 | |
| {
 | |
| 	BUG(); /* how!?, what priority? */
 | |
| }
 | |
| 
 | |
| static unsigned int
 | |
| get_rr_interval_stop(struct rq *rq, struct task_struct *task)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Simple, special scheduling class for the per-CPU stop tasks:
 | |
|  */
 | |
| static const struct sched_class stop_sched_class = {
 | |
| 	.next			= &rt_sched_class,
 | |
| 
 | |
| 	.enqueue_task		= enqueue_task_stop,
 | |
| 	.dequeue_task		= dequeue_task_stop,
 | |
| 	.yield_task		= yield_task_stop,
 | |
| 
 | |
| 	.check_preempt_curr	= check_preempt_curr_stop,
 | |
| 
 | |
| 	.pick_next_task		= pick_next_task_stop,
 | |
| 	.put_prev_task		= put_prev_task_stop,
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| 	.select_task_rq		= select_task_rq_stop,
 | |
| #endif
 | |
| 
 | |
| 	.set_curr_task          = set_curr_task_stop,
 | |
| 	.task_tick		= task_tick_stop,
 | |
| 
 | |
| 	.get_rr_interval	= get_rr_interval_stop,
 | |
| 
 | |
| 	.prio_changed		= prio_changed_stop,
 | |
| 	.switched_to		= switched_to_stop,
 | |
| };
 |