sched: Don't call task_group() too many times in set_task_rq()
It improves perfomance, especially if autogroup is enabled. The size of set_task_rq() was 0x180 and now it is 0xa0. Signed-off-by: Andrew Vagin <avagin@openvz.org> Acked-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1321020240-3874331-1-git-send-email-avagin@openvz.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
					parent
					
						
							
								f4d6f6c264
							
						
					
				
			
			
				commit
				
					
						a3e5d1091c
					
				
			
		
					 1 changed files with 8 additions and 4 deletions
				
			
		|  | @ -793,14 +793,18 @@ static inline struct task_group *task_group(struct task_struct *p) | ||||||
| /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | ||||||
| static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | ||||||
| { | { | ||||||
|  | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) | ||||||
|  | 	struct task_group *tg = task_group(p); | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_FAIR_GROUP_SCHED | #ifdef CONFIG_FAIR_GROUP_SCHED | ||||||
| 	p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | 	p->se.cfs_rq = tg->cfs_rq[cpu]; | ||||||
| 	p->se.parent = task_group(p)->se[cpu]; | 	p->se.parent = tg->se[cpu]; | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_RT_GROUP_SCHED | #ifdef CONFIG_RT_GROUP_SCHED | ||||||
| 	p->rt.rt_rq  = task_group(p)->rt_rq[cpu]; | 	p->rt.rt_rq  = tg->rt_rq[cpu]; | ||||||
| 	p->rt.parent = task_group(p)->rt_se[cpu]; | 	p->rt.parent = tg->rt_se[cpu]; | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andrew Vagin
				Andrew Vagin