signals: kill the awful task_rq_unlock_wait() hack
Now that task->signal can't go away we can revert the horrible hack added
by ad474caca3 ("fix for
account_group_exec_runtime(), make sure ->signal can't be freed under
rq->lock").
And we can do more cleanups sched_stats.h/posix-cpu-timers.c later.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alan Cox <alan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
	
	
This commit is contained in:
		
					parent
					
						
							
								4ada856fb0
							
						
					
				
			
			
				commit
				
					
						b7b8ff6373
					
				
			
		
					 3 changed files with 0 additions and 14 deletions
				
			
		|  | @ -268,7 +268,6 @@ extern void init_idle(struct task_struct *idle, int cpu); | ||||||
| extern void init_idle_bootup_task(struct task_struct *idle); | extern void init_idle_bootup_task(struct task_struct *idle); | ||||||
| 
 | 
 | ||||||
| extern int runqueue_is_locked(int cpu); | extern int runqueue_is_locked(int cpu); | ||||||
| extern void task_rq_unlock_wait(struct task_struct *p); |  | ||||||
| 
 | 
 | ||||||
| extern cpumask_var_t nohz_cpu_mask; | extern cpumask_var_t nohz_cpu_mask; | ||||||
| #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | ||||||
|  |  | ||||||
|  | @ -145,11 +145,6 @@ static void __exit_signal(struct task_struct *tsk) | ||||||
| 	if (sig) { | 	if (sig) { | ||||||
| 		flush_sigqueue(&sig->shared_pending); | 		flush_sigqueue(&sig->shared_pending); | ||||||
| 		taskstats_tgid_free(sig); | 		taskstats_tgid_free(sig); | ||||||
| 		/*
 |  | ||||||
| 		 * Make sure ->signal can't go away under rq->lock, |  | ||||||
| 		 * see account_group_exec_runtime(). |  | ||||||
| 		 */ |  | ||||||
| 		task_rq_unlock_wait(tsk); |  | ||||||
| 		tty_kref_put(tty); | 		tty_kref_put(tty); | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -969,14 +969,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void task_rq_unlock_wait(struct task_struct *p) |  | ||||||
| { |  | ||||||
| 	struct rq *rq = task_rq(p); |  | ||||||
| 
 |  | ||||||
| 	smp_mb(); /* spin-unlock-wait is not a full memory barrier */ |  | ||||||
| 	raw_spin_unlock_wait(&rq->lock); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void __task_rq_unlock(struct rq *rq) | static void __task_rq_unlock(struct rq *rq) | ||||||
| 	__releases(rq->lock) | 	__releases(rq->lock) | ||||||
| { | { | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Oleg Nesterov
				Oleg Nesterov