fuse: make request allocations for background processing explicit
There are two types of processing requests in FUSE: synchronous (via fuse_request_send()) and asynchronous (via adding to fc->bg_queue). Fortunately, the type of processing is always known in advance, at the time of request allocation. This preparatory patch utilizes this fact making fuse_get_req() aware about the type. Next patches will use it. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
This commit is contained in:
		
					parent
					
						
							
								4c82456eeb
							
						
					
				
			
			
				commit
				
					
						8b41e6715e
					
				
			
		
					 5 changed files with 37 additions and 5 deletions
				
			
		| 
						 | 
				
			
			@ -422,7 +422,7 @@ static int cuse_send_init(struct cuse_conn *cc)
 | 
			
		|||
 | 
			
		||||
	BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	req = fuse_get_req(fc, 1);
 | 
			
		||||
	req = fuse_get_req_for_background(fc, 1);
 | 
			
		||||
	if (IS_ERR(req)) {
 | 
			
		||||
		rc = PTR_ERR(req);
 | 
			
		||||
		goto err;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -130,7 +130,8 @@ static void fuse_req_init_context(struct fuse_req *req)
 | 
			
		|||
	req->in.h.pid = current->pid;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
 | 
			
		||||
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
 | 
			
		||||
				       bool for_background)
 | 
			
		||||
{
 | 
			
		||||
	struct fuse_req *req;
 | 
			
		||||
	sigset_t oldset;
 | 
			
		||||
| 
						 | 
				
			
			@ -156,14 +157,27 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
 | 
			
		|||
 | 
			
		||||
	fuse_req_init_context(req);
 | 
			
		||||
	req->waiting = 1;
 | 
			
		||||
	req->background = for_background;
 | 
			
		||||
	return req;
 | 
			
		||||
 | 
			
		||||
 out:
 | 
			
		||||
	atomic_dec(&fc->num_waiting);
 | 
			
		||||
	return ERR_PTR(err);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
 | 
			
		||||
{
 | 
			
		||||
	return __fuse_get_req(fc, npages, false);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(fuse_get_req);
 | 
			
		||||
 | 
			
		||||
struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
 | 
			
		||||
					     unsigned npages)
 | 
			
		||||
{
 | 
			
		||||
	return __fuse_get_req(fc, npages, true);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Return request in fuse_file->reserved_req.  However that may
 | 
			
		||||
 * currently be in use.  If that is the case, wait for it to become
 | 
			
		||||
| 
						 | 
				
			
			@ -232,6 +246,7 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
 | 
			
		|||
 | 
			
		||||
	fuse_req_init_context(req);
 | 
			
		||||
	req->waiting = 1;
 | 
			
		||||
	req->background = 0;
 | 
			
		||||
	return req;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -442,6 +457,7 @@ __acquires(fc->lock)
 | 
			
		|||
 | 
			
		||||
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 | 
			
		||||
{
 | 
			
		||||
	BUG_ON(req->background);
 | 
			
		||||
	spin_lock(&fc->lock);
 | 
			
		||||
	if (!fc->connected)
 | 
			
		||||
		req->out.h.error = -ENOTCONN;
 | 
			
		||||
| 
						 | 
				
			
			@ -469,7 +485,7 @@ EXPORT_SYMBOL_GPL(fuse_request_send);
 | 
			
		|||
static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
 | 
			
		||||
					    struct fuse_req *req)
 | 
			
		||||
{
 | 
			
		||||
	req->background = 1;
 | 
			
		||||
	BUG_ON(!req->background);
 | 
			
		||||
	fc->num_background++;
 | 
			
		||||
	if (fc->num_background == fc->max_background)
 | 
			
		||||
		fc->blocked = 1;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -126,11 +126,13 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
 | 
			
		|||
		struct fuse_req *req = ff->reserved_req;
 | 
			
		||||
 | 
			
		||||
		if (sync) {
 | 
			
		||||
			req->background = 0;
 | 
			
		||||
			fuse_request_send(ff->fc, req);
 | 
			
		||||
			path_put(&req->misc.release.path);
 | 
			
		||||
			fuse_put_request(ff->fc, req);
 | 
			
		||||
		} else {
 | 
			
		||||
			req->end = fuse_release_end;
 | 
			
		||||
			req->background = 1;
 | 
			
		||||
			fuse_request_send_background(ff->fc, req);
 | 
			
		||||
		}
 | 
			
		||||
		kfree(ff);
 | 
			
		||||
| 
						 | 
				
			
			@ -282,6 +284,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
 | 
			
		|||
	WARN_ON(atomic_read(&ff->count) > 1);
 | 
			
		||||
	fuse_prepare_release(ff, flags, FUSE_RELEASE);
 | 
			
		||||
	ff->reserved_req->force = 1;
 | 
			
		||||
	ff->reserved_req->background = 0;
 | 
			
		||||
	fuse_request_send(ff->fc, ff->reserved_req);
 | 
			
		||||
	fuse_put_request(ff->fc, ff->reserved_req);
 | 
			
		||||
	kfree(ff);
 | 
			
		||||
| 
						 | 
				
			
			@ -661,7 +664,12 @@ static int fuse_readpages_fill(void *_data, struct page *page)
 | 
			
		|||
		int nr_alloc = min_t(unsigned, data->nr_pages,
 | 
			
		||||
				     FUSE_MAX_PAGES_PER_REQ);
 | 
			
		||||
		fuse_send_readpages(req, data->file);
 | 
			
		||||
		data->req = req = fuse_get_req(fc, nr_alloc);
 | 
			
		||||
		if (fc->async_read)
 | 
			
		||||
			req = fuse_get_req_for_background(fc, nr_alloc);
 | 
			
		||||
		else
 | 
			
		||||
			req = fuse_get_req(fc, nr_alloc);
 | 
			
		||||
 | 
			
		||||
		data->req = req;
 | 
			
		||||
		if (IS_ERR(req)) {
 | 
			
		||||
			unlock_page(page);
 | 
			
		||||
			return PTR_ERR(req);
 | 
			
		||||
| 
						 | 
				
			
			@ -696,6 +704,9 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
 | 
			
		|||
 | 
			
		||||
	data.file = file;
 | 
			
		||||
	data.inode = inode;
 | 
			
		||||
	if (fc->async_read)
 | 
			
		||||
		data.req = fuse_get_req_for_background(fc, nr_alloc);
 | 
			
		||||
	else
 | 
			
		||||
		data.req = fuse_get_req(fc, nr_alloc);
 | 
			
		||||
	data.nr_pages = nr_pages;
 | 
			
		||||
	err = PTR_ERR(data.req);
 | 
			
		||||
| 
						 | 
				
			
			@ -1375,6 +1386,7 @@ static int fuse_writepage_locked(struct page *page)
 | 
			
		|||
	if (!req)
 | 
			
		||||
		goto err;
 | 
			
		||||
 | 
			
		||||
	req->background = 1; /* writeback always goes to bg_queue */
 | 
			
		||||
	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 | 
			
		||||
	if (!tmp_page)
 | 
			
		||||
		goto err_free;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -708,6 +708,8 @@ void fuse_request_free(struct fuse_req *req);
 | 
			
		|||
 * caller should specify # elements in req->pages[] explicitly
 | 
			
		||||
 */
 | 
			
		||||
struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages);
 | 
			
		||||
struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
 | 
			
		||||
					     unsigned npages);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Get a request, may fail with -ENOMEM,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -346,6 +346,7 @@ static void fuse_send_destroy(struct fuse_conn *fc)
 | 
			
		|||
		fc->destroy_req = NULL;
 | 
			
		||||
		req->in.h.opcode = FUSE_DESTROY;
 | 
			
		||||
		req->force = 1;
 | 
			
		||||
		req->background = 0;
 | 
			
		||||
		fuse_request_send(fc, req);
 | 
			
		||||
		fuse_put_request(fc, req);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1043,6 +1044,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 | 
			
		|||
	init_req = fuse_request_alloc(0);
 | 
			
		||||
	if (!init_req)
 | 
			
		||||
		goto err_put_root;
 | 
			
		||||
	init_req->background = 1;
 | 
			
		||||
 | 
			
		||||
	if (is_bdev) {
 | 
			
		||||
		fc->destroy_req = fuse_request_alloc(0);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue