fuse: skip blocking on allocations of synchronous requests
A task may have at most one synchronous request allocated. So these requests need not be otherwise limited. The patch re-works fuse_get_req() to follow this idea. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
This commit is contained in:
parent
796523fb24
commit
0aada88476
3 changed files with 21 additions and 13 deletions
|
@ -504,7 +504,6 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
|
|||
cc->fc.release = cuse_fc_release;
|
||||
|
||||
cc->fc.connected = 1;
|
||||
cc->fc.blocked = 0;
|
||||
cc->fc.initialized = 1;
|
||||
rc = cuse_send_init(cc);
|
||||
if (rc) {
|
||||
|
|
|
@ -130,21 +130,30 @@ static void fuse_req_init_context(struct fuse_req *req)
|
|||
req->in.h.pid = current->pid;
|
||||
}
|
||||
|
||||
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
|
||||
{
|
||||
return !fc->initialized || (for_background && fc->blocked);
|
||||
}
|
||||
|
||||
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
|
||||
bool for_background)
|
||||
{
|
||||
struct fuse_req *req;
|
||||
sigset_t oldset;
|
||||
int intr;
|
||||
int err;
|
||||
|
||||
atomic_inc(&fc->num_waiting);
|
||||
block_sigs(&oldset);
|
||||
intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
|
||||
restore_sigs(&oldset);
|
||||
err = -EINTR;
|
||||
if (intr)
|
||||
goto out;
|
||||
|
||||
if (fuse_block_alloc(fc, for_background)) {
|
||||
sigset_t oldset;
|
||||
int intr;
|
||||
|
||||
block_sigs(&oldset);
|
||||
intr = wait_event_interruptible(fc->blocked_waitq,
|
||||
!fuse_block_alloc(fc, for_background));
|
||||
restore_sigs(&oldset);
|
||||
err = -EINTR;
|
||||
if (intr)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -ENOTCONN;
|
||||
if (!fc->connected)
|
||||
|
@ -239,7 +248,7 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
|
|||
struct fuse_req *req;
|
||||
|
||||
atomic_inc(&fc->num_waiting);
|
||||
wait_event(fc->blocked_waitq, !fc->blocked);
|
||||
wait_event(fc->blocked_waitq, fc->initialized);
|
||||
req = fuse_request_alloc(0);
|
||||
if (!req)
|
||||
req = get_reserved_req(fc, file);
|
||||
|
@ -2106,6 +2115,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
|
|||
spin_lock(&fc->lock);
|
||||
fc->connected = 0;
|
||||
fc->blocked = 0;
|
||||
fc->initialized = 1;
|
||||
end_queued_requests(fc);
|
||||
end_polls(fc);
|
||||
wake_up_all(&fc->blocked_waitq);
|
||||
|
|
|
@ -583,7 +583,7 @@ void fuse_conn_init(struct fuse_conn *fc)
|
|||
fc->khctr = 0;
|
||||
fc->polled_files = RB_ROOT;
|
||||
fc->reqctr = 0;
|
||||
fc->blocked = 1;
|
||||
fc->blocked = 0;
|
||||
fc->initialized = 0;
|
||||
fc->attr_version = 1;
|
||||
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
|
||||
|
@ -883,7 +883,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
|
|||
fc->max_write = max_t(unsigned, 4096, fc->max_write);
|
||||
fc->conn_init = 1;
|
||||
}
|
||||
fc->blocked = 0;
|
||||
fc->initialized = 1;
|
||||
wake_up_all(&fc->blocked_waitq);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue