cgroup: use a per-cgroup work for release agent
Instead of using a global work to schedule release agent on removable cgroups, we change to use a per-cgroup work to do this, which makes the code much simpler. v2: use a dedicated work instead of reusing css->destroy_work. (Tejun) Signed-off-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								0c8fc2c121
							
						
					
				
			
			
				commit
				
					
						971ff49355
					
				
			
		
					 2 changed files with 33 additions and 79 deletions
				
			
		|  | @ -233,13 +233,6 @@ struct cgroup { | ||||||
| 	 */ | 	 */ | ||||||
| 	struct list_head e_csets[CGROUP_SUBSYS_COUNT]; | 	struct list_head e_csets[CGROUP_SUBSYS_COUNT]; | ||||||
| 
 | 
 | ||||||
| 	/*
 |  | ||||||
| 	 * Linked list running through all cgroups that can |  | ||||||
| 	 * potentially be reaped by the release agent. Protected by |  | ||||||
| 	 * release_list_lock |  | ||||||
| 	 */ |  | ||||||
| 	struct list_head release_list; |  | ||||||
| 
 |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * list of pidlists, up to two for each namespace (one for procs, one | 	 * list of pidlists, up to two for each namespace (one for procs, one | ||||||
| 	 * for tasks); created on demand. | 	 * for tasks); created on demand. | ||||||
|  | @ -249,6 +242,9 @@ struct cgroup { | ||||||
| 
 | 
 | ||||||
| 	/* used to wait for offlining of csses */ | 	/* used to wait for offlining of csses */ | ||||||
| 	wait_queue_head_t offline_waitq; | 	wait_queue_head_t offline_waitq; | ||||||
|  | 
 | ||||||
|  | 	/* used to schedule release agent */ | ||||||
|  | 	struct work_struct release_agent_work; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #define MAX_CGROUP_ROOT_NAMELEN 64 | #define MAX_CGROUP_ROOT_NAMELEN 64 | ||||||
|  |  | ||||||
							
								
								
									
										102
									
								
								kernel/cgroup.c
									
										
									
									
									
								
							
							
						
						
									
										102
									
								
								kernel/cgroup.c
									
										
									
									
									
								
							|  | @ -392,12 +392,7 @@ static int notify_on_release(const struct cgroup *cgrp) | ||||||
| 			;						\ | 			;						\ | ||||||
| 		else | 		else | ||||||
| 
 | 
 | ||||||
| /* the list of cgroups eligible for automatic release. Protected by
 |  | ||||||
|  * release_list_lock */ |  | ||||||
| static LIST_HEAD(release_list); |  | ||||||
| static DEFINE_RAW_SPINLOCK(release_list_lock); |  | ||||||
| static void cgroup_release_agent(struct work_struct *work); | static void cgroup_release_agent(struct work_struct *work); | ||||||
| static DECLARE_WORK(release_agent_work, cgroup_release_agent); |  | ||||||
| static void check_for_release(struct cgroup *cgrp); | static void check_for_release(struct cgroup *cgrp); | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -1577,7 +1572,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) | ||||||
| 	INIT_LIST_HEAD(&cgrp->self.sibling); | 	INIT_LIST_HEAD(&cgrp->self.sibling); | ||||||
| 	INIT_LIST_HEAD(&cgrp->self.children); | 	INIT_LIST_HEAD(&cgrp->self.children); | ||||||
| 	INIT_LIST_HEAD(&cgrp->cset_links); | 	INIT_LIST_HEAD(&cgrp->cset_links); | ||||||
| 	INIT_LIST_HEAD(&cgrp->release_list); |  | ||||||
| 	INIT_LIST_HEAD(&cgrp->pidlists); | 	INIT_LIST_HEAD(&cgrp->pidlists); | ||||||
| 	mutex_init(&cgrp->pidlist_mutex); | 	mutex_init(&cgrp->pidlist_mutex); | ||||||
| 	cgrp->self.cgroup = cgrp; | 	cgrp->self.cgroup = cgrp; | ||||||
|  | @ -1587,6 +1581,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) | ||||||
| 		INIT_LIST_HEAD(&cgrp->e_csets[ssid]); | 		INIT_LIST_HEAD(&cgrp->e_csets[ssid]); | ||||||
| 
 | 
 | ||||||
| 	init_waitqueue_head(&cgrp->offline_waitq); | 	init_waitqueue_head(&cgrp->offline_waitq); | ||||||
|  | 	INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void init_cgroup_root(struct cgroup_root *root, | static void init_cgroup_root(struct cgroup_root *root, | ||||||
|  | @ -4342,6 +4337,7 @@ static void css_free_work_fn(struct work_struct *work) | ||||||
| 		/* cgroup free path */ | 		/* cgroup free path */ | ||||||
| 		atomic_dec(&cgrp->root->nr_cgrps); | 		atomic_dec(&cgrp->root->nr_cgrps); | ||||||
| 		cgroup_pidlist_destroy_all(cgrp); | 		cgroup_pidlist_destroy_all(cgrp); | ||||||
|  | 		cancel_work_sync(&cgrp->release_agent_work); | ||||||
| 
 | 
 | ||||||
| 		if (cgroup_parent(cgrp)) { | 		if (cgroup_parent(cgrp)) { | ||||||
| 			/*
 | 			/*
 | ||||||
|  | @ -4804,12 +4800,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | ||||||
| 	for_each_css(css, ssid, cgrp) | 	for_each_css(css, ssid, cgrp) | ||||||
| 		kill_css(css); | 		kill_css(css); | ||||||
| 
 | 
 | ||||||
| 	/* CSS_ONLINE is clear, remove from ->release_list for the last time */ |  | ||||||
| 	raw_spin_lock(&release_list_lock); |  | ||||||
| 	if (!list_empty(&cgrp->release_list)) |  | ||||||
| 		list_del_init(&cgrp->release_list); |  | ||||||
| 	raw_spin_unlock(&release_list_lock); |  | ||||||
| 
 |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Remove @cgrp directory along with the base files.  @cgrp has an | 	 * Remove @cgrp directory along with the base files.  @cgrp has an | ||||||
| 	 * extra ref on its kn. | 	 * extra ref on its kn. | ||||||
|  | @ -5271,25 +5261,9 @@ void cgroup_exit(struct task_struct *tsk) | ||||||
| 
 | 
 | ||||||
| static void check_for_release(struct cgroup *cgrp) | static void check_for_release(struct cgroup *cgrp) | ||||||
| { | { | ||||||
| 	if (cgroup_is_releasable(cgrp) && list_empty(&cgrp->cset_links) && | 	if (cgroup_is_releasable(cgrp) && !cgroup_has_tasks(cgrp) && | ||||||
| 	    !css_has_online_children(&cgrp->self)) { | 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) | ||||||
| 		/*
 | 		schedule_work(&cgrp->release_agent_work); | ||||||
| 		 * Control Group is currently removeable. If it's not |  | ||||||
| 		 * already queued for a userspace notification, queue |  | ||||||
| 		 * it now |  | ||||||
| 		 */ |  | ||||||
| 		int need_schedule_work = 0; |  | ||||||
| 
 |  | ||||||
| 		raw_spin_lock(&release_list_lock); |  | ||||||
| 		if (!cgroup_is_dead(cgrp) && |  | ||||||
| 		    list_empty(&cgrp->release_list)) { |  | ||||||
| 			list_add(&cgrp->release_list, &release_list); |  | ||||||
| 			need_schedule_work = 1; |  | ||||||
| 		} |  | ||||||
| 		raw_spin_unlock(&release_list_lock); |  | ||||||
| 		if (need_schedule_work) |  | ||||||
| 			schedule_work(&release_agent_work); |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -5317,52 +5291,36 @@ static void check_for_release(struct cgroup *cgrp) | ||||||
|  */ |  */ | ||||||
| static void cgroup_release_agent(struct work_struct *work) | static void cgroup_release_agent(struct work_struct *work) | ||||||
| { | { | ||||||
| 	BUG_ON(work != &release_agent_work); | 	struct cgroup *cgrp = | ||||||
|  | 		container_of(work, struct cgroup, release_agent_work); | ||||||
|  | 	char *pathbuf = NULL, *agentbuf = NULL, *path; | ||||||
|  | 	char *argv[3], *envp[3]; | ||||||
|  | 
 | ||||||
| 	mutex_lock(&cgroup_mutex); | 	mutex_lock(&cgroup_mutex); | ||||||
| 	raw_spin_lock(&release_list_lock); |  | ||||||
| 	while (!list_empty(&release_list)) { |  | ||||||
| 		char *argv[3], *envp[3]; |  | ||||||
| 		int i; |  | ||||||
| 		char *pathbuf = NULL, *agentbuf = NULL, *path; |  | ||||||
| 		struct cgroup *cgrp = list_entry(release_list.next, |  | ||||||
| 						    struct cgroup, |  | ||||||
| 						    release_list); |  | ||||||
| 		list_del_init(&cgrp->release_list); |  | ||||||
| 		raw_spin_unlock(&release_list_lock); |  | ||||||
| 		pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); |  | ||||||
| 		if (!pathbuf) |  | ||||||
| 			goto continue_free; |  | ||||||
| 		path = cgroup_path(cgrp, pathbuf, PATH_MAX); |  | ||||||
| 		if (!path) |  | ||||||
| 			goto continue_free; |  | ||||||
| 		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); |  | ||||||
| 		if (!agentbuf) |  | ||||||
| 			goto continue_free; |  | ||||||
| 
 | 
 | ||||||
| 		i = 0; | 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); | ||||||
| 		argv[i++] = agentbuf; | 	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); | ||||||
| 		argv[i++] = path; | 	if (!pathbuf || !agentbuf) | ||||||
| 		argv[i] = NULL; | 		goto out; | ||||||
| 
 | 
 | ||||||
| 		i = 0; | 	path = cgroup_path(cgrp, pathbuf, PATH_MAX); | ||||||
| 		/* minimal command environment */ | 	if (!path) | ||||||
| 		envp[i++] = "HOME=/"; | 		goto out; | ||||||
| 		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | 
 | ||||||
| 		envp[i] = NULL; | 	argv[0] = agentbuf; | ||||||
|  | 	argv[1] = path; | ||||||
|  | 	argv[2] = NULL; | ||||||
|  | 
 | ||||||
|  | 	/* minimal command environment */ | ||||||
|  | 	envp[0] = "HOME=/"; | ||||||
|  | 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | ||||||
|  | 	envp[2] = NULL; | ||||||
| 
 | 
 | ||||||
| 		/* Drop the lock while we invoke the usermode helper,
 |  | ||||||
| 		 * since the exec could involve hitting disk and hence |  | ||||||
| 		 * be a slow process */ |  | ||||||
| 		mutex_unlock(&cgroup_mutex); |  | ||||||
| 		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); |  | ||||||
| 		mutex_lock(&cgroup_mutex); |  | ||||||
|  continue_free: |  | ||||||
| 		kfree(pathbuf); |  | ||||||
| 		kfree(agentbuf); |  | ||||||
| 		raw_spin_lock(&release_list_lock); |  | ||||||
| 	} |  | ||||||
| 	raw_spin_unlock(&release_list_lock); |  | ||||||
| 	mutex_unlock(&cgroup_mutex); | 	mutex_unlock(&cgroup_mutex); | ||||||
|  | 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||||||
|  | out: | ||||||
|  | 	kfree(agentbuf); | ||||||
|  | 	kfree(pathbuf); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int __init cgroup_disable(char *str) | static int __init cgroup_disable(char *str) | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Zefan Li
				Zefan Li