signals: make task_struct->signal immutable/refcountable
We have a lot of problems with accessing task_struct->signal, it can "disappear" at any moment. Even current can't use its ->signal safely after exit_notify(). ->siglock helps, but it is not convenient, not always possible, and sometimes it makes sense to use task->signal even after this task has already dead. This patch adds the reference counter, sigcnt, into signal_struct. This reference is owned by task_struct and it is dropped in __put_task_struct(). Perhaps it makes sense to export get/put_signal_struct() later, but currently I don't see the immediate reason. Rename __cleanup_signal() to free_signal_struct() and unexport it. With the previous changes it does nothing except kmem_cache_free(). Change __exit_signal() to not clear/free ->signal, it will be freed when the last reference to any thread in the thread group goes away. Note: - when the last thead exits signal->tty can point to nowhere, see the next patch. - with or without this patch signal_struct->count should go away, or at least it should be "int nr_threads" for fs/proc. This will be addressed later. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Alan Cox <alan@linux.intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
					parent
					
						
							
								4dec2a91fd
							
						
					
				
			
			
				commit
				
					
						ea6d290ca3
					
				
			
		
					 3 changed files with 17 additions and 11 deletions
				
			
		|  | @ -527,6 +527,7 @@ struct thread_group_cputimer { | |||
|  * the locking of signal_struct. | ||||
|  */ | ||||
| struct signal_struct { | ||||
| 	atomic_t		sigcnt; | ||||
| 	atomic_t		count; | ||||
| 	atomic_t		live; | ||||
| 
 | ||||
|  | @ -2101,7 +2102,6 @@ extern void flush_thread(void); | |||
| extern void exit_thread(void); | ||||
| 
 | ||||
| extern void exit_files(struct task_struct *); | ||||
| extern void __cleanup_signal(struct signal_struct *); | ||||
| extern void __cleanup_sighand(struct sighand_struct *); | ||||
| 
 | ||||
| extern void exit_itimers(struct signal_struct *); | ||||
|  |  | |||
|  | @ -134,8 +134,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. | ||||
| 	 */ | ||||
| 	flush_sigqueue(&tsk->pending); | ||||
| 
 | ||||
| 	tsk->signal = NULL; | ||||
| 	tsk->sighand = NULL; | ||||
| 	spin_unlock(&sighand->siglock); | ||||
| 
 | ||||
|  | @ -150,7 +148,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 		 */ | ||||
| 		task_rq_unlock_wait(tsk); | ||||
| 		tty_kref_put(sig->tty); | ||||
| 		__cleanup_signal(sig); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -165,6 +165,18 @@ void free_task(struct task_struct *tsk) | |||
| } | ||||
| EXPORT_SYMBOL(free_task); | ||||
| 
 | ||||
| static inline void free_signal_struct(struct signal_struct *sig) | ||||
| { | ||||
| 	thread_group_cputime_free(sig); | ||||
| 	kmem_cache_free(signal_cachep, sig); | ||||
| } | ||||
| 
 | ||||
| static inline void put_signal_struct(struct signal_struct *sig) | ||||
| { | ||||
| 	if (atomic_dec_and_test(&sig->sigcnt)) | ||||
| 		free_signal_struct(sig); | ||||
| } | ||||
| 
 | ||||
| void __put_task_struct(struct task_struct *tsk) | ||||
| { | ||||
| 	WARN_ON(!tsk->exit_state); | ||||
|  | @ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk) | |||
| 
 | ||||
| 	exit_creds(tsk); | ||||
| 	delayacct_tsk_free(tsk); | ||||
| 	put_signal_struct(tsk->signal); | ||||
| 
 | ||||
| 	if (!profile_handoff_task(tsk)) | ||||
| 		free_task(tsk); | ||||
|  | @ -864,6 +877,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 	if (!sig) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	atomic_set(&sig->sigcnt, 1); | ||||
| 	atomic_set(&sig->count, 1); | ||||
| 	atomic_set(&sig->live, 1); | ||||
| 	init_waitqueue_head(&sig->wait_chldexit); | ||||
|  | @ -889,12 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void __cleanup_signal(struct signal_struct *sig) | ||||
| { | ||||
| 	thread_group_cputime_free(sig); | ||||
| 	kmem_cache_free(signal_cachep, sig); | ||||
| } | ||||
| 
 | ||||
| static void copy_flags(unsigned long clone_flags, struct task_struct *p) | ||||
| { | ||||
| 	unsigned long new_flags = p->flags; | ||||
|  | @ -1248,6 +1256,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 	} | ||||
| 
 | ||||
| 	if (clone_flags & CLONE_THREAD) { | ||||
| 		atomic_inc(¤t->signal->sigcnt); | ||||
| 		atomic_inc(¤t->signal->count); | ||||
| 		atomic_inc(¤t->signal->live); | ||||
| 		p->group_leader = current->group_leader; | ||||
|  | @ -1294,7 +1303,7 @@ bad_fork_cleanup_mm: | |||
| 		mmput(p->mm); | ||||
| bad_fork_cleanup_signal: | ||||
| 	if (!(clone_flags & CLONE_THREAD)) | ||||
| 		__cleanup_signal(p->signal); | ||||
| 		free_signal_struct(p->signal); | ||||
| bad_fork_cleanup_sighand: | ||||
| 	__cleanup_sighand(p->sighand); | ||||
| bad_fork_cleanup_fs: | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Oleg Nesterov
				Oleg Nesterov