Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits) Remove commented-out code copied from NFS NFS: Switch from intr mount option to TASK_KILLABLE Add wait_for_completion_killable Add wait_event_killable Add schedule_timeout_killable Use mutex_lock_killable in vfs_readdir Add mutex_lock_killable Use lock_page_killable Add lock_page_killable Add fatal_signal_pending Add TASK_WAKEKILL exit: Use task_is_* signal: Use task_is_* sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL ptrace: Use task_is_* power: Use task_is_* wait: Use TASK_NORMAL proc/base.c: Use task_is_* proc/array.c: Use TASK_REPORT perfmon: Use task_is_* ... Fixed up conflicts in NFS/sunrpc manually..
This commit is contained in:
commit
75659ca0c1
38 changed files with 282 additions and 252 deletions
|
@ -2631,7 +2631,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
|
||||||
*/
|
*/
|
||||||
if (task == current) return 0;
|
if (task == current) return 0;
|
||||||
|
|
||||||
if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
|
if (!task_is_stopped_or_traced(task)) {
|
||||||
DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
|
DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
@ -4792,7 +4792,7 @@ recheck:
|
||||||
* the task must be stopped.
|
* the task must be stopped.
|
||||||
*/
|
*/
|
||||||
if (PFM_CMD_STOPPED(cmd)) {
|
if (PFM_CMD_STOPPED(cmd)) {
|
||||||
if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
|
if (!task_is_stopped_or_traced(task)) {
|
||||||
DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
|
DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
|
@ -656,8 +656,7 @@ is_linked:
|
||||||
* wait list.
|
* wait list.
|
||||||
*/
|
*/
|
||||||
if (waitqueue_active(&ep->wq))
|
if (waitqueue_active(&ep->wq))
|
||||||
__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
|
wake_up_locked(&ep->wq);
|
||||||
TASK_INTERRUPTIBLE);
|
|
||||||
if (waitqueue_active(&ep->poll_wait))
|
if (waitqueue_active(&ep->poll_wait))
|
||||||
pwake++;
|
pwake++;
|
||||||
|
|
||||||
|
@ -780,7 +779,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
|
||||||
|
|
||||||
/* Notify waiting tasks that events are available */
|
/* Notify waiting tasks that events are available */
|
||||||
if (waitqueue_active(&ep->wq))
|
if (waitqueue_active(&ep->wq))
|
||||||
__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
|
wake_up_locked(&ep->wq);
|
||||||
if (waitqueue_active(&ep->poll_wait))
|
if (waitqueue_active(&ep->poll_wait))
|
||||||
pwake++;
|
pwake++;
|
||||||
}
|
}
|
||||||
|
@ -854,8 +853,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
|
||||||
|
|
||||||
/* Notify waiting tasks that events are available */
|
/* Notify waiting tasks that events are available */
|
||||||
if (waitqueue_active(&ep->wq))
|
if (waitqueue_active(&ep->wq))
|
||||||
__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
|
wake_up_locked(&ep->wq);
|
||||||
TASK_INTERRUPTIBLE);
|
|
||||||
if (waitqueue_active(&ep->poll_wait))
|
if (waitqueue_active(&ep->poll_wait))
|
||||||
pwake++;
|
pwake++;
|
||||||
}
|
}
|
||||||
|
@ -978,8 +976,7 @@ errxit:
|
||||||
* wait list (delayed after we release the lock).
|
* wait list (delayed after we release the lock).
|
||||||
*/
|
*/
|
||||||
if (waitqueue_active(&ep->wq))
|
if (waitqueue_active(&ep->wq))
|
||||||
__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
|
wake_up_locked(&ep->wq);
|
||||||
TASK_INTERRUPTIBLE);
|
|
||||||
if (waitqueue_active(&ep->poll_wait))
|
if (waitqueue_active(&ep->poll_wait))
|
||||||
pwake++;
|
pwake++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -386,7 +386,7 @@ found_client:
|
||||||
if (new)
|
if (new)
|
||||||
nfs_free_client(new);
|
nfs_free_client(new);
|
||||||
|
|
||||||
error = wait_event_interruptible(nfs_client_active_wq,
|
error = wait_event_killable(nfs_client_active_wq,
|
||||||
clp->cl_cons_state != NFS_CS_INITING);
|
clp->cl_cons_state != NFS_CS_INITING);
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
nfs_put_client(clp);
|
nfs_put_client(clp);
|
||||||
|
@ -589,10 +589,6 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
|
||||||
if (server->flags & NFS_MOUNT_SOFT)
|
if (server->flags & NFS_MOUNT_SOFT)
|
||||||
server->client->cl_softrtry = 1;
|
server->client->cl_softrtry = 1;
|
||||||
|
|
||||||
server->client->cl_intr = 0;
|
|
||||||
if (server->flags & NFS4_MOUNT_INTR)
|
|
||||||
server->client->cl_intr = 1;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -188,17 +188,12 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
|
||||||
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
|
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
|
||||||
{
|
{
|
||||||
ssize_t result = -EIOCBQUEUED;
|
ssize_t result = -EIOCBQUEUED;
|
||||||
struct rpc_clnt *clnt;
|
|
||||||
sigset_t oldset;
|
|
||||||
|
|
||||||
/* Async requests don't wait here */
|
/* Async requests don't wait here */
|
||||||
if (dreq->iocb)
|
if (dreq->iocb)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
clnt = NFS_CLIENT(dreq->inode);
|
result = wait_for_completion_killable(&dreq->completion);
|
||||||
rpc_clnt_sigmask(clnt, &oldset);
|
|
||||||
result = wait_for_completion_interruptible(&dreq->completion);
|
|
||||||
rpc_clnt_sigunmask(clnt, &oldset);
|
|
||||||
|
|
||||||
if (!result)
|
if (!result)
|
||||||
result = dreq->error;
|
result = dreq->error;
|
||||||
|
|
|
@ -433,15 +433,11 @@ static int nfs_wait_schedule(void *word)
|
||||||
*/
|
*/
|
||||||
static int nfs_wait_on_inode(struct inode *inode)
|
static int nfs_wait_on_inode(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct rpc_clnt *clnt = NFS_CLIENT(inode);
|
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
sigset_t oldmask;
|
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
rpc_clnt_sigmask(clnt, &oldmask);
|
|
||||||
error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
|
error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
|
||||||
nfs_wait_schedule, TASK_INTERRUPTIBLE);
|
nfs_wait_schedule, TASK_KILLABLE);
|
||||||
rpc_clnt_sigunmask(clnt, &oldmask);
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path,
|
||||||
.program = &mnt_program,
|
.program = &mnt_program,
|
||||||
.version = version,
|
.version = version,
|
||||||
.authflavor = RPC_AUTH_UNIX,
|
.authflavor = RPC_AUTH_UNIX,
|
||||||
.flags = RPC_CLNT_CREATE_INTR,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
struct rpc_clnt *mnt_clnt;
|
struct rpc_clnt *mnt_clnt;
|
||||||
int status;
|
int status;
|
||||||
|
|
|
@ -27,17 +27,14 @@
|
||||||
static int
|
static int
|
||||||
nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
||||||
{
|
{
|
||||||
sigset_t oldset;
|
|
||||||
int res;
|
int res;
|
||||||
rpc_clnt_sigmask(clnt, &oldset);
|
|
||||||
do {
|
do {
|
||||||
res = rpc_call_sync(clnt, msg, flags);
|
res = rpc_call_sync(clnt, msg, flags);
|
||||||
if (res != -EJUKEBOX)
|
if (res != -EJUKEBOX)
|
||||||
break;
|
break;
|
||||||
schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
|
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||||
res = -ERESTARTSYS;
|
res = -ERESTARTSYS;
|
||||||
} while (!signalled());
|
} while (!fatal_signal_pending(current));
|
||||||
rpc_clnt_sigunmask(clnt, &oldset);
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -316,12 +316,9 @@ static void nfs4_opendata_put(struct nfs4_opendata *p)
|
||||||
|
|
||||||
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
|
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
sigset_t oldset;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
rpc_clnt_sigmask(task->tk_client, &oldset);
|
|
||||||
ret = rpc_wait_for_completion_task(task);
|
ret = rpc_wait_for_completion_task(task);
|
||||||
rpc_clnt_sigunmask(task->tk_client, &oldset);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2785,9 +2782,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nfs4_wait_bit_interruptible(void *word)
|
static int nfs4_wait_bit_killable(void *word)
|
||||||
{
|
{
|
||||||
if (signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
schedule();
|
schedule();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2795,18 +2792,14 @@ static int nfs4_wait_bit_interruptible(void *word)
|
||||||
|
|
||||||
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
|
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
sigset_t oldset;
|
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
|
rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
|
||||||
|
|
||||||
rpc_clnt_sigmask(clnt, &oldset);
|
|
||||||
res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
|
res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
|
||||||
nfs4_wait_bit_interruptible,
|
nfs4_wait_bit_killable, TASK_KILLABLE);
|
||||||
TASK_INTERRUPTIBLE);
|
|
||||||
rpc_clnt_sigunmask(clnt, &oldset);
|
|
||||||
|
|
||||||
rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
|
rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
|
||||||
return res;
|
return res;
|
||||||
|
@ -2814,7 +2807,6 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
|
||||||
|
|
||||||
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
||||||
{
|
{
|
||||||
sigset_t oldset;
|
|
||||||
int res = 0;
|
int res = 0;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
@ -2823,14 +2815,9 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
||||||
*timeout = NFS4_POLL_RETRY_MIN;
|
*timeout = NFS4_POLL_RETRY_MIN;
|
||||||
if (*timeout > NFS4_POLL_RETRY_MAX)
|
if (*timeout > NFS4_POLL_RETRY_MAX)
|
||||||
*timeout = NFS4_POLL_RETRY_MAX;
|
*timeout = NFS4_POLL_RETRY_MAX;
|
||||||
rpc_clnt_sigmask(clnt, &oldset);
|
schedule_timeout_killable(*timeout);
|
||||||
if (clnt->cl_intr) {
|
if (fatal_signal_pending(current))
|
||||||
schedule_timeout_interruptible(*timeout);
|
res = -ERESTARTSYS;
|
||||||
if (signalled())
|
|
||||||
res = -ERESTARTSYS;
|
|
||||||
} else
|
|
||||||
schedule_timeout_uninterruptible(*timeout);
|
|
||||||
rpc_clnt_sigunmask(clnt, &oldset);
|
|
||||||
*timeout <<= 1;
|
*timeout <<= 1;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -3069,7 +3056,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nfs4_set_lock_task_retry(unsigned long timeout)
|
nfs4_set_lock_task_retry(unsigned long timeout)
|
||||||
{
|
{
|
||||||
schedule_timeout_interruptible(timeout);
|
schedule_timeout_killable(timeout);
|
||||||
timeout <<= 1;
|
timeout <<= 1;
|
||||||
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
||||||
return NFS4_LOCK_MAXTIMEOUT;
|
return NFS4_LOCK_MAXTIMEOUT;
|
||||||
|
|
|
@ -228,10 +228,7 @@ static int __init root_nfs_parse(char *name, char *buf)
|
||||||
nfs_data.flags &= ~NFS_MOUNT_SOFT;
|
nfs_data.flags &= ~NFS_MOUNT_SOFT;
|
||||||
break;
|
break;
|
||||||
case Opt_intr:
|
case Opt_intr:
|
||||||
nfs_data.flags |= NFS_MOUNT_INTR;
|
|
||||||
break;
|
|
||||||
case Opt_nointr:
|
case Opt_nointr:
|
||||||
nfs_data.flags &= ~NFS_MOUNT_INTR;
|
|
||||||
break;
|
break;
|
||||||
case Opt_posix:
|
case Opt_posix:
|
||||||
nfs_data.flags |= NFS_MOUNT_POSIX;
|
nfs_data.flags |= NFS_MOUNT_POSIX;
|
||||||
|
|
|
@ -58,7 +58,6 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
unsigned int offset, unsigned int count)
|
unsigned int offset, unsigned int count)
|
||||||
{
|
{
|
||||||
struct nfs_server *server = NFS_SERVER(inode);
|
|
||||||
struct nfs_page *req;
|
struct nfs_page *req;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -67,7 +66,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
if (req != NULL)
|
if (req != NULL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (signalled() && (server->flags & NFS_MOUNT_INTR))
|
if (fatal_signal_pending(current))
|
||||||
return ERR_PTR(-ERESTARTSYS);
|
return ERR_PTR(-ERESTARTSYS);
|
||||||
yield();
|
yield();
|
||||||
}
|
}
|
||||||
|
@ -177,11 +176,11 @@ void nfs_release_request(struct nfs_page *req)
|
||||||
kref_put(&req->wb_kref, nfs_free_request);
|
kref_put(&req->wb_kref, nfs_free_request);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nfs_wait_bit_interruptible(void *word)
|
static int nfs_wait_bit_killable(void *word)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
ret = -ERESTARTSYS;
|
ret = -ERESTARTSYS;
|
||||||
else
|
else
|
||||||
schedule();
|
schedule();
|
||||||
|
@ -192,26 +191,18 @@ static int nfs_wait_bit_interruptible(void *word)
|
||||||
* nfs_wait_on_request - Wait for a request to complete.
|
* nfs_wait_on_request - Wait for a request to complete.
|
||||||
* @req: request to wait upon.
|
* @req: request to wait upon.
|
||||||
*
|
*
|
||||||
* Interruptible by signals only if mounted with intr flag.
|
* Interruptible by fatal signals only.
|
||||||
* The user is responsible for holding a count on the request.
|
* The user is responsible for holding a count on the request.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
nfs_wait_on_request(struct nfs_page *req)
|
nfs_wait_on_request(struct nfs_page *req)
|
||||||
{
|
{
|
||||||
struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
|
|
||||||
sigset_t oldmask;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!test_bit(PG_BUSY, &req->wb_flags))
|
if (!test_bit(PG_BUSY, &req->wb_flags))
|
||||||
goto out;
|
goto out;
|
||||||
/*
|
|
||||||
* Note: the call to rpc_clnt_sigmask() suffices to ensure that we
|
|
||||||
* are not interrupted if intr flag is not set
|
|
||||||
*/
|
|
||||||
rpc_clnt_sigmask(clnt, &oldmask);
|
|
||||||
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
|
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
|
||||||
nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
|
nfs_wait_bit_killable, TASK_KILLABLE);
|
||||||
rpc_clnt_sigunmask(clnt, &oldmask);
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -448,7 +448,6 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
|
||||||
const char *nostr;
|
const char *nostr;
|
||||||
} nfs_info[] = {
|
} nfs_info[] = {
|
||||||
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
|
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
|
||||||
{ NFS_MOUNT_INTR, ",intr", ",nointr" },
|
|
||||||
{ NFS_MOUNT_NOCTO, ",nocto", "" },
|
{ NFS_MOUNT_NOCTO, ",nocto", "" },
|
||||||
{ NFS_MOUNT_NOAC, ",noac", "" },
|
{ NFS_MOUNT_NOAC, ",noac", "" },
|
||||||
{ NFS_MOUNT_NONLM, ",nolock", "" },
|
{ NFS_MOUNT_NONLM, ",nolock", "" },
|
||||||
|
@ -708,10 +707,7 @@ static int nfs_parse_mount_options(char *raw,
|
||||||
mnt->flags &= ~NFS_MOUNT_SOFT;
|
mnt->flags &= ~NFS_MOUNT_SOFT;
|
||||||
break;
|
break;
|
||||||
case Opt_intr:
|
case Opt_intr:
|
||||||
mnt->flags |= NFS_MOUNT_INTR;
|
|
||||||
break;
|
|
||||||
case Opt_nointr:
|
case Opt_nointr:
|
||||||
mnt->flags &= ~NFS_MOUNT_INTR;
|
|
||||||
break;
|
break;
|
||||||
case Opt_posix:
|
case Opt_posix:
|
||||||
mnt->flags |= NFS_MOUNT_POSIX;
|
mnt->flags |= NFS_MOUNT_POSIX;
|
||||||
|
|
|
@ -488,7 +488,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
|
||||||
/*
|
/*
|
||||||
* Wait for a request to complete.
|
* Wait for a request to complete.
|
||||||
*
|
*
|
||||||
* Interruptible by signals only if mounted with intr flag.
|
* Interruptible by fatal signals only.
|
||||||
*/
|
*/
|
||||||
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
|
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
|
||||||
{
|
{
|
||||||
|
|
|
@ -141,12 +141,7 @@ static const char *task_state_array[] = {
|
||||||
|
|
||||||
static inline const char *get_task_state(struct task_struct *tsk)
|
static inline const char *get_task_state(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned int state = (tsk->state & (TASK_RUNNING |
|
unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
|
||||||
TASK_INTERRUPTIBLE |
|
|
||||||
TASK_UNINTERRUPTIBLE |
|
|
||||||
TASK_STOPPED |
|
|
||||||
TASK_TRACED)) |
|
|
||||||
tsk->exit_state;
|
|
||||||
const char **p = &task_state_array[0];
|
const char **p = &task_state_array[0];
|
||||||
|
|
||||||
while (state) {
|
while (state) {
|
||||||
|
|
|
@ -199,7 +199,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
|
||||||
(task == current || \
|
(task == current || \
|
||||||
(task->parent == current && \
|
(task->parent == current && \
|
||||||
(task->ptrace & PT_PTRACED) && \
|
(task->ptrace & PT_PTRACED) && \
|
||||||
(task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
|
(task_is_stopped_or_traced(task)) && \
|
||||||
security_ptrace(current,task) == 0))
|
security_ptrace(current,task) == 0))
|
||||||
|
|
||||||
struct mm_struct *mm_for_maps(struct task_struct *task)
|
struct mm_struct *mm_for_maps(struct task_struct *task)
|
||||||
|
|
|
@ -30,7 +30,10 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
|
||||||
if (res)
|
if (res)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
mutex_lock(&inode->i_mutex);
|
res = mutex_lock_killable(&inode->i_mutex);
|
||||||
|
if (res)
|
||||||
|
goto out;
|
||||||
|
|
||||||
res = -ENOENT;
|
res = -ENOENT;
|
||||||
if (!IS_DEADDIR(inode)) {
|
if (!IS_DEADDIR(inode)) {
|
||||||
res = file->f_op->readdir(file, buf, filler);
|
res = file->f_op->readdir(file, buf, filler);
|
||||||
|
|
|
@ -105,7 +105,7 @@ struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
|
||||||
if (nfs_try_to_free_pages(server))
|
if (nfs_try_to_free_pages(server))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (signalled() && (server->flags & NFS_MOUNT_INTR))
|
if (fatal_signal_pending(current))
|
||||||
return ERR_PTR(-ERESTARTSYS);
|
return ERR_PTR(-ERESTARTSYS);
|
||||||
current->policy = SCHED_YIELD;
|
current->policy = SCHED_YIELD;
|
||||||
schedule();
|
schedule();
|
||||||
|
|
|
@ -44,6 +44,7 @@ static inline void init_completion(struct completion *x)
|
||||||
|
|
||||||
extern void wait_for_completion(struct completion *);
|
extern void wait_for_completion(struct completion *);
|
||||||
extern int wait_for_completion_interruptible(struct completion *x);
|
extern int wait_for_completion_interruptible(struct completion *x);
|
||||||
|
extern int wait_for_completion_killable(struct completion *x);
|
||||||
extern unsigned long wait_for_completion_timeout(struct completion *x,
|
extern unsigned long wait_for_completion_timeout(struct completion *x,
|
||||||
unsigned long timeout);
|
unsigned long timeout);
|
||||||
extern unsigned long wait_for_completion_interruptible_timeout(
|
extern unsigned long wait_for_completion_interruptible_timeout(
|
||||||
|
|
|
@ -125,15 +125,20 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
|
||||||
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||||
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||||
unsigned int subclass);
|
unsigned int subclass);
|
||||||
|
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
||||||
|
unsigned int subclass);
|
||||||
|
|
||||||
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
||||||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||||
|
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
||||||
#else
|
#else
|
||||||
extern void fastcall mutex_lock(struct mutex *lock);
|
extern void fastcall mutex_lock(struct mutex *lock);
|
||||||
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
|
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
|
||||||
|
extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
|
||||||
|
|
||||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||||
|
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -556,14 +556,7 @@ extern void * nfs_root_data(void);
|
||||||
|
|
||||||
#define nfs_wait_event(clnt, wq, condition) \
|
#define nfs_wait_event(clnt, wq, condition) \
|
||||||
({ \
|
({ \
|
||||||
int __retval = 0; \
|
int __retval = wait_event_killable(wq, condition); \
|
||||||
if (clnt->cl_intr) { \
|
|
||||||
sigset_t oldmask; \
|
|
||||||
rpc_clnt_sigmask(clnt, &oldmask); \
|
|
||||||
__retval = wait_event_interruptible(wq, condition); \
|
|
||||||
rpc_clnt_sigunmask(clnt, &oldmask); \
|
|
||||||
} else \
|
|
||||||
wait_event(wq, condition); \
|
|
||||||
__retval; \
|
__retval; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ struct nfs_mount_data {
|
||||||
/* bits in the flags field */
|
/* bits in the flags field */
|
||||||
|
|
||||||
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
|
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
|
||||||
#define NFS_MOUNT_INTR 0x0002 /* 1 */
|
#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
|
||||||
#define NFS_MOUNT_SECURE 0x0004 /* 1 */
|
#define NFS_MOUNT_SECURE 0x0004 /* 1 */
|
||||||
#define NFS_MOUNT_POSIX 0x0008 /* 1 */
|
#define NFS_MOUNT_POSIX 0x0008 /* 1 */
|
||||||
#define NFS_MOUNT_NOCTO 0x0010 /* 1 */
|
#define NFS_MOUNT_NOCTO 0x0010 /* 1 */
|
||||||
|
|
|
@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void FASTCALL(__lock_page(struct page *page));
|
extern void FASTCALL(__lock_page(struct page *page));
|
||||||
|
extern int FASTCALL(__lock_page_killable(struct page *page));
|
||||||
extern void FASTCALL(__lock_page_nosync(struct page *page));
|
extern void FASTCALL(__lock_page_nosync(struct page *page));
|
||||||
extern void FASTCALL(unlock_page(struct page *page));
|
extern void FASTCALL(unlock_page(struct page *page));
|
||||||
|
|
||||||
|
@ -170,6 +171,19 @@ static inline void lock_page(struct page *page)
|
||||||
__lock_page(page);
|
__lock_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* lock_page_killable is like lock_page but can be interrupted by fatal
|
||||||
|
* signals. It returns 0 if it locked the page and -EINTR if it was
|
||||||
|
* killed while waiting.
|
||||||
|
*/
|
||||||
|
static inline int lock_page_killable(struct page *page)
|
||||||
|
{
|
||||||
|
might_sleep();
|
||||||
|
if (TestSetPageLocked(page))
|
||||||
|
return __lock_page_killable(page);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lock_page_nosync should only be used if we can't pin the page's inode.
|
* lock_page_nosync should only be used if we can't pin the page's inode.
|
||||||
* Doesn't play quite so well with block device plugging.
|
* Doesn't play quite so well with block device plugging.
|
||||||
|
|
|
@ -172,13 +172,35 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||||
#define TASK_RUNNING 0
|
#define TASK_RUNNING 0
|
||||||
#define TASK_INTERRUPTIBLE 1
|
#define TASK_INTERRUPTIBLE 1
|
||||||
#define TASK_UNINTERRUPTIBLE 2
|
#define TASK_UNINTERRUPTIBLE 2
|
||||||
#define TASK_STOPPED 4
|
#define __TASK_STOPPED 4
|
||||||
#define TASK_TRACED 8
|
#define __TASK_TRACED 8
|
||||||
/* in tsk->exit_state */
|
/* in tsk->exit_state */
|
||||||
#define EXIT_ZOMBIE 16
|
#define EXIT_ZOMBIE 16
|
||||||
#define EXIT_DEAD 32
|
#define EXIT_DEAD 32
|
||||||
/* in tsk->state again */
|
/* in tsk->state again */
|
||||||
#define TASK_DEAD 64
|
#define TASK_DEAD 64
|
||||||
|
#define TASK_WAKEKILL 128
|
||||||
|
|
||||||
|
/* Convenience macros for the sake of set_task_state */
|
||||||
|
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
|
||||||
|
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
|
||||||
|
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
|
||||||
|
|
||||||
|
/* Convenience macros for the sake of wake_up */
|
||||||
|
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
|
||||||
|
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
|
||||||
|
|
||||||
|
/* get_task_state() */
|
||||||
|
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
|
||||||
|
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
|
||||||
|
__TASK_TRACED)
|
||||||
|
|
||||||
|
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
|
||||||
|
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
|
||||||
|
#define task_is_stopped_or_traced(task) \
|
||||||
|
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||||
|
#define task_contributes_to_load(task) \
|
||||||
|
((task->state & TASK_UNINTERRUPTIBLE) != 0)
|
||||||
|
|
||||||
#define __set_task_state(tsk, state_value) \
|
#define __set_task_state(tsk, state_value) \
|
||||||
do { (tsk)->state = (state_value); } while (0)
|
do { (tsk)->state = (state_value); } while (0)
|
||||||
|
@ -302,6 +324,7 @@ extern int in_sched_functions(unsigned long addr);
|
||||||
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
|
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
|
||||||
extern signed long FASTCALL(schedule_timeout(signed long timeout));
|
extern signed long FASTCALL(schedule_timeout(signed long timeout));
|
||||||
extern signed long schedule_timeout_interruptible(signed long timeout);
|
extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||||
|
extern signed long schedule_timeout_killable(signed long timeout);
|
||||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||||
asmlinkage void schedule(void);
|
asmlinkage void schedule(void);
|
||||||
|
|
||||||
|
@ -1892,7 +1915,14 @@ static inline int signal_pending(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
|
||||||
|
|
||||||
|
static inline int fatal_signal_pending(struct task_struct *p)
|
||||||
|
{
|
||||||
|
return signal_pending(p) && __fatal_signal_pending(p);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int need_resched(void)
|
static inline int need_resched(void)
|
||||||
{
|
{
|
||||||
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
|
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
|
||||||
|
|
|
@ -41,7 +41,6 @@ struct rpc_clnt {
|
||||||
struct rpc_iostats * cl_metrics; /* per-client statistics */
|
struct rpc_iostats * cl_metrics; /* per-client statistics */
|
||||||
|
|
||||||
unsigned int cl_softrtry : 1,/* soft timeouts */
|
unsigned int cl_softrtry : 1,/* soft timeouts */
|
||||||
cl_intr : 1,/* interruptible */
|
|
||||||
cl_discrtry : 1,/* disconnect before retry */
|
cl_discrtry : 1,/* disconnect before retry */
|
||||||
cl_autobind : 1;/* use getport() */
|
cl_autobind : 1;/* use getport() */
|
||||||
|
|
||||||
|
@ -111,7 +110,6 @@ struct rpc_create_args {
|
||||||
|
|
||||||
/* Values for "flags" field */
|
/* Values for "flags" field */
|
||||||
#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
|
#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
|
||||||
#define RPC_CLNT_CREATE_INTR (1UL << 1)
|
|
||||||
#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
|
#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
|
||||||
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
|
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
|
||||||
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
|
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
|
||||||
|
@ -137,8 +135,6 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
|
||||||
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
|
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
|
||||||
int flags);
|
int flags);
|
||||||
void rpc_restart_call(struct rpc_task *);
|
void rpc_restart_call(struct rpc_task *);
|
||||||
void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
|
|
||||||
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
|
|
||||||
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
|
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
|
||||||
size_t rpc_max_payload(struct rpc_clnt *);
|
size_t rpc_max_payload(struct rpc_clnt *);
|
||||||
void rpc_force_rebind(struct rpc_clnt *);
|
void rpc_force_rebind(struct rpc_clnt *);
|
||||||
|
|
|
@ -137,7 +137,6 @@ struct rpc_task_setup {
|
||||||
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
|
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
|
||||||
#define RPC_TASK_KILLED 0x0100 /* task was killed */
|
#define RPC_TASK_KILLED 0x0100 /* task was killed */
|
||||||
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
|
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
|
||||||
#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
|
|
||||||
|
|
||||||
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
|
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
|
||||||
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
|
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
|
||||||
|
@ -145,7 +144,6 @@ struct rpc_task_setup {
|
||||||
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
|
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
|
||||||
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
|
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
|
||||||
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
|
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
|
||||||
#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
|
|
||||||
|
|
||||||
#define RPC_TASK_RUNNING 0
|
#define RPC_TASK_RUNNING 0
|
||||||
#define RPC_TASK_QUEUED 1
|
#define RPC_TASK_QUEUED 1
|
||||||
|
|
|
@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
|
||||||
int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
|
int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
|
||||||
wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
|
wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
|
||||||
|
|
||||||
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
|
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
|
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
||||||
#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
|
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
|
||||||
|
#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
|
||||||
|
|
||||||
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
|
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
|
||||||
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
|
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
|
||||||
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
|
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
|
||||||
#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
|
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
|
||||||
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
|
|
||||||
|
|
||||||
#define __wait_event(wq, condition) \
|
#define __wait_event(wq, condition) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -345,6 +346,47 @@ do { \
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define __wait_event_killable(wq, condition, ret) \
|
||||||
|
do { \
|
||||||
|
DEFINE_WAIT(__wait); \
|
||||||
|
\
|
||||||
|
for (;;) { \
|
||||||
|
prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
|
||||||
|
if (condition) \
|
||||||
|
break; \
|
||||||
|
if (!fatal_signal_pending(current)) { \
|
||||||
|
schedule(); \
|
||||||
|
continue; \
|
||||||
|
} \
|
||||||
|
ret = -ERESTARTSYS; \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
finish_wait(&wq, &__wait); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* wait_event_killable - sleep until a condition gets true
|
||||||
|
* @wq: the waitqueue to wait on
|
||||||
|
* @condition: a C expression for the event to wait for
|
||||||
|
*
|
||||||
|
* The process is put to sleep (TASK_KILLABLE) until the
|
||||||
|
* @condition evaluates to true or a signal is received.
|
||||||
|
* The @condition is checked each time the waitqueue @wq is woken up.
|
||||||
|
*
|
||||||
|
* wake_up() has to be called after changing any variable that could
|
||||||
|
* change the result of the wait condition.
|
||||||
|
*
|
||||||
|
* The function will return -ERESTARTSYS if it was interrupted by a
|
||||||
|
* signal and 0 if @condition evaluated to true.
|
||||||
|
*/
|
||||||
|
#define wait_event_killable(wq, condition) \
|
||||||
|
({ \
|
||||||
|
int __ret = 0; \
|
||||||
|
if (!(condition)) \
|
||||||
|
__wait_event_killable(wq, condition, __ret); \
|
||||||
|
__ret; \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be called with the spinlock in the wait_queue_head_t held.
|
* Must be called with the spinlock in the wait_queue_head_t held.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
||||||
if (p->state != TASK_STOPPED)
|
if (!task_is_stopped(p))
|
||||||
continue;
|
continue;
|
||||||
retval = 1;
|
retval = 1;
|
||||||
break;
|
break;
|
||||||
|
@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
|
||||||
p->parent = p->real_parent;
|
p->parent = p->real_parent;
|
||||||
add_parent(p);
|
add_parent(p);
|
||||||
|
|
||||||
if (p->state == TASK_TRACED) {
|
if (task_is_traced(p)) {
|
||||||
/*
|
/*
|
||||||
* If it was at a trace stop, turn it into
|
* If it was at a trace stop, turn it into
|
||||||
* a normal stop since it's no longer being
|
* a normal stop since it's no longer being
|
||||||
|
@ -1563,60 +1563,51 @@ repeat:
|
||||||
}
|
}
|
||||||
allowed = 1;
|
allowed = 1;
|
||||||
|
|
||||||
switch (p->state) {
|
if (task_is_stopped_or_traced(p)) {
|
||||||
case TASK_TRACED:
|
|
||||||
/*
|
|
||||||
* When we hit the race with PTRACE_ATTACH,
|
|
||||||
* we will not report this child. But the
|
|
||||||
* race means it has not yet been moved to
|
|
||||||
* our ptrace_children list, so we need to
|
|
||||||
* set the flag here to avoid a spurious ECHILD
|
|
||||||
* when the race happens with the only child.
|
|
||||||
*/
|
|
||||||
flag = 1;
|
|
||||||
if (!my_ptrace_child(p))
|
|
||||||
continue;
|
|
||||||
/*FALLTHROUGH*/
|
|
||||||
case TASK_STOPPED:
|
|
||||||
/*
|
/*
|
||||||
* It's stopped now, so it might later
|
* It's stopped now, so it might later
|
||||||
* continue, exit, or stop again.
|
* continue, exit, or stop again.
|
||||||
|
*
|
||||||
|
* When we hit the race with PTRACE_ATTACH, we
|
||||||
|
* will not report this child. But the race
|
||||||
|
* means it has not yet been moved to our
|
||||||
|
* ptrace_children list, so we need to set the
|
||||||
|
* flag here to avoid a spurious ECHILD when
|
||||||
|
* the race happens with the only child.
|
||||||
*/
|
*/
|
||||||
flag = 1;
|
flag = 1;
|
||||||
if (!(options & WUNTRACED) &&
|
|
||||||
!my_ptrace_child(p))
|
if (!my_ptrace_child(p)) {
|
||||||
continue;
|
if (task_is_traced(p))
|
||||||
|
continue;
|
||||||
|
if (!(options & WUNTRACED))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
retval = wait_task_stopped(p, ret == 2,
|
retval = wait_task_stopped(p, ret == 2,
|
||||||
(options & WNOWAIT),
|
(options & WNOWAIT), infop,
|
||||||
infop,
|
stat_addr, ru);
|
||||||
stat_addr, ru);
|
|
||||||
if (retval == -EAGAIN)
|
if (retval == -EAGAIN)
|
||||||
goto repeat;
|
goto repeat;
|
||||||
if (retval != 0) /* He released the lock. */
|
if (retval != 0) /* He released the lock. */
|
||||||
goto end;
|
goto end;
|
||||||
break;
|
} else if (p->exit_state == EXIT_DEAD) {
|
||||||
default:
|
continue;
|
||||||
// case EXIT_DEAD:
|
} else if (p->exit_state == EXIT_ZOMBIE) {
|
||||||
if (p->exit_state == EXIT_DEAD)
|
/*
|
||||||
|
* Eligible but we cannot release it yet:
|
||||||
|
*/
|
||||||
|
if (ret == 2)
|
||||||
|
goto check_continued;
|
||||||
|
if (!likely(options & WEXITED))
|
||||||
continue;
|
continue;
|
||||||
// case EXIT_ZOMBIE:
|
retval = wait_task_zombie(p,
|
||||||
if (p->exit_state == EXIT_ZOMBIE) {
|
(options & WNOWAIT), infop,
|
||||||
/*
|
stat_addr, ru);
|
||||||
* Eligible but we cannot release
|
/* He released the lock. */
|
||||||
* it yet:
|
if (retval != 0)
|
||||||
*/
|
goto end;
|
||||||
if (ret == 2)
|
} else {
|
||||||
goto check_continued;
|
|
||||||
if (!likely(options & WEXITED))
|
|
||||||
continue;
|
|
||||||
retval = wait_task_zombie(
|
|
||||||
p, (options & WNOWAIT),
|
|
||||||
infop, stat_addr, ru);
|
|
||||||
/* He released the lock. */
|
|
||||||
if (retval != 0)
|
|
||||||
goto end;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
check_continued:
|
check_continued:
|
||||||
/*
|
/*
|
||||||
* It's running now, so it might later
|
* It's running now, so it might later
|
||||||
|
@ -1625,12 +1616,11 @@ check_continued:
|
||||||
flag = 1;
|
flag = 1;
|
||||||
if (!unlikely(options & WCONTINUED))
|
if (!unlikely(options & WCONTINUED))
|
||||||
continue;
|
continue;
|
||||||
retval = wait_task_continued(
|
retval = wait_task_continued(p,
|
||||||
p, (options & WNOWAIT),
|
(options & WNOWAIT), infop,
|
||||||
infop, stat_addr, ru);
|
stat_addr, ru);
|
||||||
if (retval != 0) /* He released the lock. */
|
if (retval != 0) /* He released the lock. */
|
||||||
goto end;
|
goto end;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!flag) {
|
if (!flag) {
|
||||||
|
|
|
@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
* got a signal? (This code gets eliminated in the
|
* got a signal? (This code gets eliminated in the
|
||||||
* TASK_UNINTERRUPTIBLE case.)
|
* TASK_UNINTERRUPTIBLE case.)
|
||||||
*/
|
*/
|
||||||
if (unlikely(state == TASK_INTERRUPTIBLE &&
|
if (unlikely((state == TASK_INTERRUPTIBLE &&
|
||||||
signal_pending(task))) {
|
signal_pending(task)) ||
|
||||||
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
(state == TASK_KILLABLE &&
|
||||||
|
fatal_signal_pending(task)))) {
|
||||||
|
mutex_remove_waiter(lock, &waiter,
|
||||||
|
task_thread_info(task));
|
||||||
mutex_release(&lock->dep_map, 1, ip);
|
mutex_release(&lock->dep_map, 1, ip);
|
||||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
|
@ -210,6 +213,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
||||||
|
|
||||||
|
int __sched
|
||||||
|
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
||||||
|
{
|
||||||
|
might_sleep();
|
||||||
|
return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
||||||
|
|
||||||
int __sched
|
int __sched
|
||||||
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||||
{
|
{
|
||||||
|
@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
|
||||||
* mutex_lock_interruptible() and mutex_trylock().
|
* mutex_lock_interruptible() and mutex_trylock().
|
||||||
*/
|
*/
|
||||||
static int fastcall noinline __sched
|
static int fastcall noinline __sched
|
||||||
|
__mutex_lock_killable_slowpath(atomic_t *lock_count);
|
||||||
|
|
||||||
|
static noinline int fastcall __sched
|
||||||
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
||||||
|
|
||||||
/***
|
/***
|
||||||
|
@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
|
||||||
|
|
||||||
EXPORT_SYMBOL(mutex_lock_interruptible);
|
EXPORT_SYMBOL(mutex_lock_interruptible);
|
||||||
|
|
||||||
|
int fastcall __sched mutex_lock_killable(struct mutex *lock)
|
||||||
|
{
|
||||||
|
might_sleep();
|
||||||
|
return __mutex_fastpath_lock_retval
|
||||||
|
(&lock->count, __mutex_lock_killable_slowpath);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mutex_lock_killable);
|
||||||
|
|
||||||
static void fastcall noinline __sched
|
static void fastcall noinline __sched
|
||||||
__mutex_lock_slowpath(atomic_t *lock_count)
|
__mutex_lock_slowpath(atomic_t *lock_count)
|
||||||
{
|
{
|
||||||
|
@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastcall noinline __sched
|
static int fastcall noinline __sched
|
||||||
|
__mutex_lock_killable_slowpath(atomic_t *lock_count)
|
||||||
|
{
|
||||||
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||||
|
|
||||||
|
return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int fastcall __sched
|
||||||
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
|
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
|
||||||
{
|
{
|
||||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||||
|
|
|
@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
|
||||||
|
|
||||||
static void send_fake_signal(struct task_struct *p)
|
static void send_fake_signal(struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (p->state == TASK_STOPPED)
|
if (task_is_stopped(p))
|
||||||
force_sig_specific(SIGSTOP, p);
|
force_sig_specific(SIGSTOP, p);
|
||||||
fake_signal_wake_up(p, p->state == TASK_STOPPED);
|
fake_signal_wake_up(p, task_is_stopped(p));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int has_mm(struct task_struct *p)
|
static int has_mm(struct task_struct *p)
|
||||||
|
@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
|
||||||
if (frozen(p) || !freezeable(p))
|
if (frozen(p) || !freezeable(p))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (p->state == TASK_TRACED && frozen(p->parent)) {
|
if (task_is_traced(p) && frozen(p->parent)) {
|
||||||
cancel_freezing(p);
|
cancel_freezing(p);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
|
||||||
void ptrace_untrace(struct task_struct *child)
|
void ptrace_untrace(struct task_struct *child)
|
||||||
{
|
{
|
||||||
spin_lock(&child->sighand->siglock);
|
spin_lock(&child->sighand->siglock);
|
||||||
if (child->state == TASK_TRACED) {
|
if (task_is_traced(child)) {
|
||||||
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
|
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
|
||||||
child->state = TASK_STOPPED;
|
child->state = TASK_STOPPED;
|
||||||
} else {
|
} else {
|
||||||
|
@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
|
||||||
add_parent(child);
|
add_parent(child);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (child->state == TASK_TRACED)
|
if (task_is_traced(child))
|
||||||
ptrace_untrace(child);
|
ptrace_untrace(child);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
|
||||||
&& child->signal != NULL) {
|
&& child->signal != NULL) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
spin_lock_irq(&child->sighand->siglock);
|
spin_lock_irq(&child->sighand->siglock);
|
||||||
if (child->state == TASK_STOPPED) {
|
if (task_is_stopped(child)) {
|
||||||
child->state = TASK_TRACED;
|
child->state = TASK_TRACED;
|
||||||
} else if (child->state != TASK_TRACED && !kill) {
|
} else if (!task_is_traced(child) && !kill) {
|
||||||
ret = -ESRCH;
|
ret = -ESRCH;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&child->sighand->siglock);
|
spin_unlock_irq(&child->sighand->siglock);
|
||||||
|
|
|
@ -1350,7 +1350,7 @@ static int effective_prio(struct task_struct *p)
|
||||||
*/
|
*/
|
||||||
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
||||||
{
|
{
|
||||||
if (p->state == TASK_UNINTERRUPTIBLE)
|
if (task_contributes_to_load(p))
|
||||||
rq->nr_uninterruptible--;
|
rq->nr_uninterruptible--;
|
||||||
|
|
||||||
enqueue_task(rq, p, wakeup);
|
enqueue_task(rq, p, wakeup);
|
||||||
|
@ -1362,7 +1362,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
||||||
*/
|
*/
|
||||||
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
|
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
|
||||||
{
|
{
|
||||||
if (p->state == TASK_UNINTERRUPTIBLE)
|
if (task_contributes_to_load(p))
|
||||||
rq->nr_uninterruptible++;
|
rq->nr_uninterruptible++;
|
||||||
|
|
||||||
dequeue_task(rq, p, sleep);
|
dequeue_task(rq, p, sleep);
|
||||||
|
@ -1895,8 +1895,7 @@ out:
|
||||||
|
|
||||||
int fastcall wake_up_process(struct task_struct *p)
|
int fastcall wake_up_process(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
|
return try_to_wake_up(p, TASK_ALL, 0);
|
||||||
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wake_up_process);
|
EXPORT_SYMBOL(wake_up_process);
|
||||||
|
|
||||||
|
@ -4124,8 +4123,7 @@ void complete(struct completion *x)
|
||||||
|
|
||||||
spin_lock_irqsave(&x->wait.lock, flags);
|
spin_lock_irqsave(&x->wait.lock, flags);
|
||||||
x->done++;
|
x->done++;
|
||||||
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
|
__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
|
||||||
1, 0, NULL);
|
|
||||||
spin_unlock_irqrestore(&x->wait.lock, flags);
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(complete);
|
EXPORT_SYMBOL(complete);
|
||||||
|
@ -4136,8 +4134,7 @@ void complete_all(struct completion *x)
|
||||||
|
|
||||||
spin_lock_irqsave(&x->wait.lock, flags);
|
spin_lock_irqsave(&x->wait.lock, flags);
|
||||||
x->done += UINT_MAX/2;
|
x->done += UINT_MAX/2;
|
||||||
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
|
__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
|
||||||
0, 0, NULL);
|
|
||||||
spin_unlock_irqrestore(&x->wait.lock, flags);
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(complete_all);
|
EXPORT_SYMBOL(complete_all);
|
||||||
|
@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
|
||||||
wait.flags |= WQ_FLAG_EXCLUSIVE;
|
wait.flags |= WQ_FLAG_EXCLUSIVE;
|
||||||
__add_wait_queue_tail(&x->wait, &wait);
|
__add_wait_queue_tail(&x->wait, &wait);
|
||||||
do {
|
do {
|
||||||
if (state == TASK_INTERRUPTIBLE &&
|
if ((state == TASK_INTERRUPTIBLE &&
|
||||||
signal_pending(current)) {
|
signal_pending(current)) ||
|
||||||
|
(state == TASK_KILLABLE &&
|
||||||
|
fatal_signal_pending(current))) {
|
||||||
__remove_wait_queue(&x->wait, &wait);
|
__remove_wait_queue(&x->wait, &wait);
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
}
|
}
|
||||||
|
@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
|
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
|
||||||
|
|
||||||
|
int __sched wait_for_completion_killable(struct completion *x)
|
||||||
|
{
|
||||||
|
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
|
||||||
|
if (t == -ERESTARTSYS)
|
||||||
|
return t;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(wait_for_completion_killable);
|
||||||
|
|
||||||
static long __sched
|
static long __sched
|
||||||
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
|
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
|
||||||
{
|
{
|
||||||
|
|
|
@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
|
||||||
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For SIGKILL, we want to wake it up in the stopped/traced case.
|
* For SIGKILL, we want to wake it up in the stopped/traced/killable
|
||||||
* We don't check t->state here because there is a race with it
|
* case. We don't check t->state here because there is a race with it
|
||||||
* executing another processor and just now entering stopped state.
|
* executing another processor and just now entering stopped state.
|
||||||
* By using wake_up_state, we ensure the process will wake up and
|
* By using wake_up_state, we ensure the process will wake up and
|
||||||
* handle its death signal.
|
* handle its death signal.
|
||||||
*/
|
*/
|
||||||
mask = TASK_INTERRUPTIBLE;
|
mask = TASK_INTERRUPTIBLE;
|
||||||
if (resume)
|
if (resume)
|
||||||
mask |= TASK_STOPPED | TASK_TRACED;
|
mask |= TASK_WAKEKILL;
|
||||||
if (!wake_up_state(t, mask))
|
if (!wake_up_state(t, mask))
|
||||||
kick_process(t);
|
kick_process(t);
|
||||||
}
|
}
|
||||||
|
@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
|
||||||
* Wake up the stopped thread _after_ setting
|
* Wake up the stopped thread _after_ setting
|
||||||
* TIF_SIGPENDING
|
* TIF_SIGPENDING
|
||||||
*/
|
*/
|
||||||
state = TASK_STOPPED;
|
state = __TASK_STOPPED;
|
||||||
if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
|
if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
|
||||||
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
||||||
state |= TASK_INTERRUPTIBLE;
|
state |= TASK_INTERRUPTIBLE;
|
||||||
|
@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
|
||||||
return 0;
|
return 0;
|
||||||
if (sig == SIGKILL)
|
if (sig == SIGKILL)
|
||||||
return 1;
|
return 1;
|
||||||
if (p->state & (TASK_STOPPED | TASK_TRACED))
|
if (task_is_stopped_or_traced(p))
|
||||||
return 0;
|
return 0;
|
||||||
return task_curr(p) || !signal_pending(p);
|
return task_curr(p) || !signal_pending(p);
|
||||||
}
|
}
|
||||||
|
@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int fastcall __fatal_signal_pending(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
return sigismember(&tsk->pending.signal, SIGKILL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
|
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
|
||||||
*/
|
*/
|
||||||
|
@ -1441,7 +1446,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
|
||||||
BUG_ON(sig == -1);
|
BUG_ON(sig == -1);
|
||||||
|
|
||||||
/* do_notify_parent_cldstop should have been called instead. */
|
/* do_notify_parent_cldstop should have been called instead. */
|
||||||
BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
|
BUG_ON(task_is_stopped_or_traced(tsk));
|
||||||
|
|
||||||
BUG_ON(!tsk->ptrace &&
|
BUG_ON(!tsk->ptrace &&
|
||||||
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
|
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
|
||||||
|
@ -1729,7 +1734,7 @@ static int do_signal_stop(int signr)
|
||||||
* so this check has no races.
|
* so this check has no races.
|
||||||
*/
|
*/
|
||||||
if (!t->exit_state &&
|
if (!t->exit_state &&
|
||||||
!(t->state & (TASK_STOPPED|TASK_TRACED))) {
|
!task_is_stopped_or_traced(t)) {
|
||||||
stop_count++;
|
stop_count++;
|
||||||
signal_wake_up(t, 0);
|
signal_wake_up(t, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1099,6 +1099,13 @@ signed long __sched schedule_timeout_interruptible(signed long timeout)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(schedule_timeout_interruptible);
|
EXPORT_SYMBOL(schedule_timeout_interruptible);
|
||||||
|
|
||||||
|
signed long __sched schedule_timeout_killable(signed long timeout)
|
||||||
|
{
|
||||||
|
__set_current_state(TASK_KILLABLE);
|
||||||
|
return schedule_timeout(timeout);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(schedule_timeout_killable);
|
||||||
|
|
||||||
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
|
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
|
||||||
{
|
{
|
||||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
|
|
|
@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
|
||||||
{
|
{
|
||||||
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
|
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
|
||||||
if (waitqueue_active(wq))
|
if (waitqueue_active(wq))
|
||||||
__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
|
__wake_up(wq, TASK_NORMAL, 1, &key);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__wake_up_bit);
|
EXPORT_SYMBOL(__wake_up_bit);
|
||||||
|
|
||||||
|
|
25
mm/filemap.c
25
mm/filemap.c
|
@ -185,6 +185,12 @@ static int sync_page(void *word)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sync_page_killable(void *word)
|
||||||
|
{
|
||||||
|
sync_page(word);
|
||||||
|
return fatal_signal_pending(current) ? -EINTR : 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
|
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
|
||||||
* @mapping: address space structure to write
|
* @mapping: address space structure to write
|
||||||
|
@ -589,6 +595,14 @@ void fastcall __lock_page(struct page *page)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__lock_page);
|
EXPORT_SYMBOL(__lock_page);
|
||||||
|
|
||||||
|
int fastcall __lock_page_killable(struct page *page)
|
||||||
|
{
|
||||||
|
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
||||||
|
|
||||||
|
return __wait_on_bit_lock(page_waitqueue(page), &wait,
|
||||||
|
sync_page_killable, TASK_KILLABLE);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Variant of lock_page that does not require the caller to hold a reference
|
* Variant of lock_page that does not require the caller to hold a reference
|
||||||
* on the page's mapping.
|
* on the page's mapping.
|
||||||
|
@ -980,7 +994,8 @@ page_ok:
|
||||||
|
|
||||||
page_not_up_to_date:
|
page_not_up_to_date:
|
||||||
/* Get exclusive access to the page ... */
|
/* Get exclusive access to the page ... */
|
||||||
lock_page(page);
|
if (lock_page_killable(page))
|
||||||
|
goto readpage_eio;
|
||||||
|
|
||||||
/* Did it get truncated before we got the lock? */
|
/* Did it get truncated before we got the lock? */
|
||||||
if (!page->mapping) {
|
if (!page->mapping) {
|
||||||
|
@ -1008,7 +1023,8 @@ readpage:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!PageUptodate(page)) {
|
if (!PageUptodate(page)) {
|
||||||
lock_page(page);
|
if (lock_page_killable(page))
|
||||||
|
goto readpage_eio;
|
||||||
if (!PageUptodate(page)) {
|
if (!PageUptodate(page)) {
|
||||||
if (page->mapping == NULL) {
|
if (page->mapping == NULL) {
|
||||||
/*
|
/*
|
||||||
|
@ -1019,15 +1035,16 @@ readpage:
|
||||||
goto find_page;
|
goto find_page;
|
||||||
}
|
}
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
error = -EIO;
|
|
||||||
shrink_readahead_size_eio(filp, ra);
|
shrink_readahead_size_eio(filp, ra);
|
||||||
goto readpage_error;
|
goto readpage_eio;
|
||||||
}
|
}
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
goto page_ok;
|
goto page_ok;
|
||||||
|
|
||||||
|
readpage_eio:
|
||||||
|
error = -EIO;
|
||||||
readpage_error:
|
readpage_error:
|
||||||
/* UHHUH! A synchronous read error occurred. Report it */
|
/* UHHUH! A synchronous read error occurred. Report it */
|
||||||
desc->error = error;
|
desc->error = error;
|
||||||
|
|
|
@ -385,7 +385,6 @@ rpcauth_bindcred(struct rpc_task *task)
|
||||||
.group_info = current->group_info,
|
.group_info = current->group_info,
|
||||||
};
|
};
|
||||||
struct rpc_cred *ret;
|
struct rpc_cred *ret;
|
||||||
sigset_t oldset;
|
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
|
||||||
dprintk("RPC: %5u looking up %s cred\n",
|
dprintk("RPC: %5u looking up %s cred\n",
|
||||||
|
@ -393,9 +392,7 @@ rpcauth_bindcred(struct rpc_task *task)
|
||||||
get_group_info(acred.group_info);
|
get_group_info(acred.group_info);
|
||||||
if (task->tk_flags & RPC_TASK_ROOTCREDS)
|
if (task->tk_flags & RPC_TASK_ROOTCREDS)
|
||||||
flags |= RPCAUTH_LOOKUP_ROOTCREDS;
|
flags |= RPCAUTH_LOOKUP_ROOTCREDS;
|
||||||
rpc_clnt_sigmask(task->tk_client, &oldset);
|
|
||||||
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
|
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
|
||||||
rpc_clnt_sigunmask(task->tk_client, &oldset);
|
|
||||||
if (!IS_ERR(ret))
|
if (!IS_ERR(ret))
|
||||||
task->tk_msg.rpc_cred = ret;
|
task->tk_msg.rpc_cred = ret;
|
||||||
else
|
else
|
||||||
|
|
|
@ -313,7 +313,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
|
||||||
return clnt;
|
return clnt;
|
||||||
|
|
||||||
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
|
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
|
||||||
int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
|
int err = rpc_ping(clnt, RPC_TASK_SOFT);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
rpc_shutdown_client(clnt);
|
rpc_shutdown_client(clnt);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
@ -324,8 +324,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
|
||||||
if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
|
if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
|
||||||
clnt->cl_softrtry = 0;
|
clnt->cl_softrtry = 0;
|
||||||
|
|
||||||
if (args->flags & RPC_CLNT_CREATE_INTR)
|
|
||||||
clnt->cl_intr = 1;
|
|
||||||
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
|
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
|
||||||
clnt->cl_autobind = 1;
|
clnt->cl_autobind = 1;
|
||||||
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
|
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
|
||||||
|
@ -493,7 +491,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
|
||||||
clnt->cl_prog = program->number;
|
clnt->cl_prog = program->number;
|
||||||
clnt->cl_vers = version->number;
|
clnt->cl_vers = version->number;
|
||||||
clnt->cl_stats = program->stats;
|
clnt->cl_stats = program->stats;
|
||||||
err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
|
err = rpc_ping(clnt, RPC_TASK_SOFT);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
rpc_shutdown_client(clnt);
|
rpc_shutdown_client(clnt);
|
||||||
clnt = ERR_PTR(err);
|
clnt = ERR_PTR(err);
|
||||||
|
@ -515,46 +513,6 @@ static const struct rpc_call_ops rpc_default_ops = {
|
||||||
.rpc_call_done = rpc_default_callback,
|
.rpc_call_done = rpc_default_callback,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Export the signal mask handling for synchronous code that
|
|
||||||
* sleeps on RPC calls
|
|
||||||
*/
|
|
||||||
#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
|
|
||||||
|
|
||||||
static void rpc_save_sigmask(sigset_t *oldset, int intr)
|
|
||||||
{
|
|
||||||
unsigned long sigallow = sigmask(SIGKILL);
|
|
||||||
sigset_t sigmask;
|
|
||||||
|
|
||||||
/* Block all signals except those listed in sigallow */
|
|
||||||
if (intr)
|
|
||||||
sigallow |= RPC_INTR_SIGNALS;
|
|
||||||
siginitsetinv(&sigmask, sigallow);
|
|
||||||
sigprocmask(SIG_BLOCK, &sigmask, oldset);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
|
|
||||||
{
|
|
||||||
rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void rpc_restore_sigmask(sigset_t *oldset)
|
|
||||||
{
|
|
||||||
sigprocmask(SIG_SETMASK, oldset, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
|
|
||||||
{
|
|
||||||
rpc_save_sigmask(oldset, clnt->cl_intr);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rpc_clnt_sigmask);
|
|
||||||
|
|
||||||
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
|
|
||||||
{
|
|
||||||
rpc_restore_sigmask(oldset);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
|
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
|
||||||
* @task_setup_data: pointer to task initialisation data
|
* @task_setup_data: pointer to task initialisation data
|
||||||
|
@ -562,7 +520,6 @@ EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
|
||||||
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
||||||
{
|
{
|
||||||
struct rpc_task *task, *ret;
|
struct rpc_task *task, *ret;
|
||||||
sigset_t oldset;
|
|
||||||
|
|
||||||
task = rpc_new_task(task_setup_data);
|
task = rpc_new_task(task_setup_data);
|
||||||
if (task == NULL) {
|
if (task == NULL) {
|
||||||
|
@ -578,13 +535,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
atomic_inc(&task->tk_count);
|
atomic_inc(&task->tk_count);
|
||||||
/* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
|
rpc_execute(task);
|
||||||
if (!RPC_IS_ASYNC(task)) {
|
|
||||||
rpc_task_sigmask(task, &oldset);
|
|
||||||
rpc_execute(task);
|
|
||||||
rpc_restore_sigmask(&oldset);
|
|
||||||
} else
|
|
||||||
rpc_execute(task);
|
|
||||||
ret = task;
|
ret = task;
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -120,8 +120,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
|
||||||
.program = &rpcb_program,
|
.program = &rpcb_program,
|
||||||
.version = version,
|
.version = version,
|
||||||
.authflavor = RPC_AUTH_UNIX,
|
.authflavor = RPC_AUTH_UNIX,
|
||||||
.flags = (RPC_CLNT_CREATE_NOPING |
|
.flags = RPC_CLNT_CREATE_NOPING,
|
||||||
RPC_CLNT_CREATE_INTR),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
switch (srvaddr->sa_family) {
|
switch (srvaddr->sa_family) {
|
||||||
|
|
|
@ -245,9 +245,9 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
|
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
|
||||||
|
|
||||||
static int rpc_wait_bit_interruptible(void *word)
|
static int rpc_wait_bit_killable(void *word)
|
||||||
{
|
{
|
||||||
if (signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
schedule();
|
schedule();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -299,9 +299,9 @@ static void rpc_mark_complete_task(struct rpc_task *task)
|
||||||
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
|
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
|
||||||
{
|
{
|
||||||
if (action == NULL)
|
if (action == NULL)
|
||||||
action = rpc_wait_bit_interruptible;
|
action = rpc_wait_bit_killable;
|
||||||
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
|
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
|
||||||
action, TASK_INTERRUPTIBLE);
|
action, TASK_KILLABLE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
|
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
|
||||||
|
|
||||||
|
@ -696,10 +696,9 @@ static void __rpc_execute(struct rpc_task *task)
|
||||||
|
|
||||||
/* sync task: sleep here */
|
/* sync task: sleep here */
|
||||||
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
|
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
|
||||||
/* Note: Caller should be using rpc_clnt_sigmask() */
|
|
||||||
status = out_of_line_wait_on_bit(&task->tk_runstate,
|
status = out_of_line_wait_on_bit(&task->tk_runstate,
|
||||||
RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
|
RPC_TASK_QUEUED, rpc_wait_bit_killable,
|
||||||
TASK_INTERRUPTIBLE);
|
TASK_KILLABLE);
|
||||||
if (status == -ERESTARTSYS) {
|
if (status == -ERESTARTSYS) {
|
||||||
/*
|
/*
|
||||||
* When a sync task receives a signal, it exits with
|
* When a sync task receives a signal, it exits with
|
||||||
|
@ -840,8 +839,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
|
||||||
kref_get(&task->tk_client->cl_kref);
|
kref_get(&task->tk_client->cl_kref);
|
||||||
if (task->tk_client->cl_softrtry)
|
if (task->tk_client->cl_softrtry)
|
||||||
task->tk_flags |= RPC_TASK_SOFT;
|
task->tk_flags |= RPC_TASK_SOFT;
|
||||||
if (!task->tk_client->cl_intr)
|
|
||||||
task->tk_flags |= RPC_TASK_NOINTR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task->tk_ops->rpc_call_prepare != NULL)
|
if (task->tk_ops->rpc_call_prepare != NULL)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue