cgroup: consolidate cgroup_attach_task() and cgroup_attach_proc()
These two functions share most of the code. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								bd2953ebbb
							
						
					
				
			
			
				commit
				
					
						081aa458c3
					
				
			
		
					 3 changed files with 23 additions and 91 deletions
				
			
		|  | @ -693,7 +693,8 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | ||||||
| 					struct cgroup_iter *it); | 					struct cgroup_iter *it); | ||||||
| void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | ||||||
| int cgroup_scan_tasks(struct cgroup_scanner *scan); | int cgroup_scan_tasks(struct cgroup_scanner *scan); | ||||||
| int cgroup_attach_task(struct cgroup *, struct task_struct *); | int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | ||||||
|  | 		       bool threadgroup); | ||||||
| int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
							
								
								
									
										109
									
								
								kernel/cgroup.c
									
										
									
									
									
								
							
							
						
						
									
										109
									
								
								kernel/cgroup.c
									
										
									
									
									
								
							|  | @ -59,7 +59,7 @@ | ||||||
| #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | ||||||
| #include <linux/eventfd.h> | #include <linux/eventfd.h> | ||||||
| #include <linux/poll.h> | #include <linux/poll.h> | ||||||
| #include <linux/flex_array.h> /* used in cgroup_attach_proc */ | #include <linux/flex_array.h> /* used in cgroup_attach_task */ | ||||||
| #include <linux/kthread.h> | #include <linux/kthread.h> | ||||||
| 
 | 
 | ||||||
| #include <linux/atomic.h> | #include <linux/atomic.h> | ||||||
|  | @ -1943,82 +1943,6 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, | ||||||
| 	put_css_set(oldcg); | 	put_css_set(oldcg); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' |  | ||||||
|  * @cgrp: the cgroup the task is attaching to |  | ||||||
|  * @tsk: the task to be attached |  | ||||||
|  * |  | ||||||
|  * Call with cgroup_mutex and threadgroup locked. May take task_lock of |  | ||||||
|  * @tsk during call. |  | ||||||
|  */ |  | ||||||
| int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |  | ||||||
| { |  | ||||||
| 	int retval = 0; |  | ||||||
| 	struct cgroup_subsys *ss, *failed_ss = NULL; |  | ||||||
| 	struct cgroup *oldcgrp; |  | ||||||
| 	struct cgroupfs_root *root = cgrp->root; |  | ||||||
| 	struct cgroup_taskset tset = { }; |  | ||||||
| 	struct css_set *newcg; |  | ||||||
| 
 |  | ||||||
| 	/* @tsk either already exited or can't exit until the end */ |  | ||||||
| 	if (tsk->flags & PF_EXITING) |  | ||||||
| 		return -ESRCH; |  | ||||||
| 
 |  | ||||||
| 	/* Nothing to do if the task is already in that cgroup */ |  | ||||||
| 	oldcgrp = task_cgroup_from_root(tsk, root); |  | ||||||
| 	if (cgrp == oldcgrp) |  | ||||||
| 		return 0; |  | ||||||
| 
 |  | ||||||
| 	tset.single.task = tsk; |  | ||||||
| 	tset.single.cgrp = oldcgrp; |  | ||||||
| 
 |  | ||||||
| 	for_each_subsys(root, ss) { |  | ||||||
| 		if (ss->can_attach) { |  | ||||||
| 			retval = ss->can_attach(cgrp, &tset); |  | ||||||
| 			if (retval) { |  | ||||||
| 				/*
 |  | ||||||
| 				 * Remember on which subsystem the can_attach() |  | ||||||
| 				 * failed, so that we only call cancel_attach() |  | ||||||
| 				 * against the subsystems whose can_attach() |  | ||||||
| 				 * succeeded. (See below) |  | ||||||
| 				 */ |  | ||||||
| 				failed_ss = ss; |  | ||||||
| 				goto out; |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	newcg = find_css_set(tsk->cgroups, cgrp); |  | ||||||
| 	if (!newcg) { |  | ||||||
| 		retval = -ENOMEM; |  | ||||||
| 		goto out; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg); |  | ||||||
| 
 |  | ||||||
| 	for_each_subsys(root, ss) { |  | ||||||
| 		if (ss->attach) |  | ||||||
| 			ss->attach(cgrp, &tset); |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| out: |  | ||||||
| 	if (retval) { |  | ||||||
| 		for_each_subsys(root, ss) { |  | ||||||
| 			if (ss == failed_ss) |  | ||||||
| 				/*
 |  | ||||||
| 				 * This subsystem was the one that failed the |  | ||||||
| 				 * can_attach() check earlier, so we don't need |  | ||||||
| 				 * to call cancel_attach() against it or any |  | ||||||
| 				 * remaining subsystems. |  | ||||||
| 				 */ |  | ||||||
| 				break; |  | ||||||
| 			if (ss->cancel_attach) |  | ||||||
| 				ss->cancel_attach(cgrp, &tset); |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 	return retval; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' | ||||||
|  * @from: attach to all cgroups of a given task |  * @from: attach to all cgroups of a given task | ||||||
|  | @ -2033,7 +1957,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | ||||||
| 	for_each_active_root(root) { | 	for_each_active_root(root) { | ||||||
| 		struct cgroup *from_cg = task_cgroup_from_root(from, root); | 		struct cgroup *from_cg = task_cgroup_from_root(from, root); | ||||||
| 
 | 
 | ||||||
| 		retval = cgroup_attach_task(from_cg, tsk); | 		retval = cgroup_attach_task(from_cg, tsk, false); | ||||||
| 		if (retval) | 		if (retval) | ||||||
| 			break; | 			break; | ||||||
| 	} | 	} | ||||||
|  | @ -2044,21 +1968,22 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | ||||||
| EXPORT_SYMBOL_GPL(cgroup_attach_task_all); | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup |  * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup | ||||||
|  * @cgrp: the cgroup to attach to |  * @cgrp: the cgroup to attach to | ||||||
|  * @leader: the threadgroup leader task_struct of the group to be attached |  * @tsk: the task or the leader of the threadgroup to be attached | ||||||
|  |  * @threadgroup: attach the whole threadgroup? | ||||||
|  * |  * | ||||||
|  * Call holding cgroup_mutex and the group_rwsem of the leader. Will take |  * Call holding cgroup_mutex and the group_rwsem of the leader. Will take | ||||||
|  * task_lock of each thread in leader's threadgroup individually in turn. |  * task_lock of @tsk or each thread in the threadgroup individually in turn. | ||||||
|  */ |  */ | ||||||
| static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | ||||||
|  | 		       bool threadgroup) | ||||||
| { | { | ||||||
| 	int retval, i, group_size; | 	int retval, i, group_size; | ||||||
| 	struct cgroup_subsys *ss, *failed_ss = NULL; | 	struct cgroup_subsys *ss, *failed_ss = NULL; | ||||||
| 	/* guaranteed to be initialized later, but the compiler needs this */ |  | ||||||
| 	struct cgroupfs_root *root = cgrp->root; | 	struct cgroupfs_root *root = cgrp->root; | ||||||
| 	/* threadgroup list cursor and array */ | 	/* threadgroup list cursor and array */ | ||||||
| 	struct task_struct *tsk; | 	struct task_struct *leader = tsk; | ||||||
| 	struct task_and_cgroup *tc; | 	struct task_and_cgroup *tc; | ||||||
| 	struct flex_array *group; | 	struct flex_array *group; | ||||||
| 	struct cgroup_taskset tset = { }; | 	struct cgroup_taskset tset = { }; | ||||||
|  | @ -2070,7 +1995,10 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | ||||||
| 	 * group - group_rwsem prevents new threads from appearing, and if | 	 * group - group_rwsem prevents new threads from appearing, and if | ||||||
| 	 * threads exit, this will just be an over-estimate. | 	 * threads exit, this will just be an over-estimate. | ||||||
| 	 */ | 	 */ | ||||||
| 	group_size = get_nr_threads(leader); | 	if (threadgroup) | ||||||
|  | 		group_size = get_nr_threads(tsk); | ||||||
|  | 	else | ||||||
|  | 		group_size = 1; | ||||||
| 	/* flex_array supports very large thread-groups better than kmalloc. */ | 	/* flex_array supports very large thread-groups better than kmalloc. */ | ||||||
| 	group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); | 	group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); | ||||||
| 	if (!group) | 	if (!group) | ||||||
|  | @ -2080,7 +2008,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | ||||||
| 	if (retval) | 	if (retval) | ||||||
| 		goto out_free_group_list; | 		goto out_free_group_list; | ||||||
| 
 | 
 | ||||||
| 	tsk = leader; |  | ||||||
| 	i = 0; | 	i = 0; | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Prevent freeing of tasks while we take a snapshot. Tasks that are | 	 * Prevent freeing of tasks while we take a snapshot. Tasks that are | ||||||
|  | @ -2109,6 +2036,9 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | ||||||
| 		retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | 		retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | ||||||
| 		BUG_ON(retval != 0); | 		BUG_ON(retval != 0); | ||||||
| 		i++; | 		i++; | ||||||
|  | 
 | ||||||
|  | 		if (!threadgroup) | ||||||
|  | 			break; | ||||||
| 	} while_each_thread(leader, tsk); | 	} while_each_thread(leader, tsk); | ||||||
| 	rcu_read_unlock(); | 	rcu_read_unlock(); | ||||||
| 	/* remember the number of threads in the array for later. */ | 	/* remember the number of threads in the array for later. */ | ||||||
|  | @ -2262,9 +2192,10 @@ retry_find_task: | ||||||
| 			put_task_struct(tsk); | 			put_task_struct(tsk); | ||||||
| 			goto retry_find_task; | 			goto retry_find_task; | ||||||
| 		} | 		} | ||||||
| 		ret = cgroup_attach_proc(cgrp, tsk); | 	} | ||||||
| 	} else | 
 | ||||||
| 		ret = cgroup_attach_task(cgrp, tsk); | 	ret = cgroup_attach_task(cgrp, tsk, threadgroup); | ||||||
|  | 
 | ||||||
| 	threadgroup_unlock(tsk); | 	threadgroup_unlock(tsk); | ||||||
| 
 | 
 | ||||||
| 	put_task_struct(tsk); | 	put_task_struct(tsk); | ||||||
|  |  | ||||||
|  | @ -2008,7 +2008,7 @@ static void cpuset_do_move_task(struct task_struct *tsk, | ||||||
| 	struct cgroup *new_cgroup = scan->data; | 	struct cgroup *new_cgroup = scan->data; | ||||||
| 
 | 
 | ||||||
| 	cgroup_lock(); | 	cgroup_lock(); | ||||||
| 	cgroup_attach_task(new_cgroup, tsk); | 	cgroup_attach_task(new_cgroup, tsk, false); | ||||||
| 	cgroup_unlock(); | 	cgroup_unlock(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Li Zefan
				Li Zefan