LOCKD: Don't call utsname()->nodename from nlmclnt_setlockargs
Firstly, nlmclnt_setlockargs can be called from a reclaimer thread, in
which case we're in entirely the wrong namespace.
Secondly, commit 8aac62706a (move
exit_task_namespaces() outside of exit_notify()) now means that
exit_task_work() is called after exit_task_namespaces(), which
triggers an Oops when we're freeing up the locks.
Fix this by ensuring that we initialise the nlm_host's rpc_client at mount
time, so that the cl_nodename field is initialised to the value of
utsname()->nodename that the net namespace uses. Then replace the
lockd callers of utsname()->nodename.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: Toralf Förster <toralf.foerster@gmx.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Nix <nix@esperi.org.uk>
Cc: Jeff Layton <jlayton@redhat.com>
Cc: stable@vger.kernel.org # 3.10.x
	
	
This commit is contained in:
		
					parent
					
						
							
								c095ba7224
							
						
					
				
			
			
				commit
				
					
						9a1b6bf818
					
				
			
		
					 2 changed files with 12 additions and 6 deletions
				
			
		| 
						 | 
					@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
 | 
				
			||||||
				   nlm_init->protocol, nlm_version,
 | 
									   nlm_init->protocol, nlm_version,
 | 
				
			||||||
				   nlm_init->hostname, nlm_init->noresvport,
 | 
									   nlm_init->hostname, nlm_init->noresvport,
 | 
				
			||||||
				   nlm_init->net);
 | 
									   nlm_init->net);
 | 
				
			||||||
	if (host == NULL) {
 | 
						if (host == NULL)
 | 
				
			||||||
		lockd_down(nlm_init->net);
 | 
							goto out_nohost;
 | 
				
			||||||
		return ERR_PTR(-ENOLCK);
 | 
						if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
 | 
				
			||||||
	}
 | 
							goto out_nobind;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return host;
 | 
						return host;
 | 
				
			||||||
 | 
					out_nobind:
 | 
				
			||||||
 | 
						nlmclnt_release_host(host);
 | 
				
			||||||
 | 
					out_nohost:
 | 
				
			||||||
 | 
						lockd_down(nlm_init->net);
 | 
				
			||||||
 | 
						return ERR_PTR(-ENOLCK);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(nlmclnt_init);
 | 
					EXPORT_SYMBOL_GPL(nlmclnt_init);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nlm_args	*argp = &req->a_args;
 | 
						struct nlm_args	*argp = &req->a_args;
 | 
				
			||||||
	struct nlm_lock	*lock = &argp->lock;
 | 
						struct nlm_lock	*lock = &argp->lock;
 | 
				
			||||||
 | 
						char *nodename = req->a_host->h_rpcclnt->cl_nodename;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nlmclnt_next_cookie(&argp->cookie);
 | 
						nlmclnt_next_cookie(&argp->cookie);
 | 
				
			||||||
	memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
 | 
						memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
 | 
				
			||||||
	lock->caller  = utsname()->nodename;
 | 
						lock->caller  = nodename;
 | 
				
			||||||
	lock->oh.data = req->a_owner;
 | 
						lock->oh.data = req->a_owner;
 | 
				
			||||||
	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
 | 
						lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
 | 
				
			||||||
				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
 | 
									(unsigned int)fl->fl_u.nfs_fl.owner->pid,
 | 
				
			||||||
				utsname()->nodename);
 | 
									nodename);
 | 
				
			||||||
	lock->svid = fl->fl_u.nfs_fl.owner->pid;
 | 
						lock->svid = fl->fl_u.nfs_fl.owner->pid;
 | 
				
			||||||
	lock->fl.fl_start = fl->fl_start;
 | 
						lock->fl.fl_start = fl->fl_start;
 | 
				
			||||||
	lock->fl.fl_end = fl->fl_end;
 | 
						lock->fl.fl_end = fl->fl_end;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue