Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits) Remove commented-out code copied from NFS NFS: Switch from intr mount option to TASK_KILLABLE Add wait_for_completion_killable Add wait_event_killable Add schedule_timeout_killable Use mutex_lock_killable in vfs_readdir Add mutex_lock_killable Use lock_page_killable Add lock_page_killable Add fatal_signal_pending Add TASK_WAKEKILL exit: Use task_is_* signal: Use task_is_* sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL ptrace: Use task_is_* power: Use task_is_* wait: Use TASK_NORMAL proc/base.c: Use task_is_* proc/array.c: Use TASK_REPORT perfmon: Use task_is_* ... Fixed up conflicts in NFS/sunrpc manually..
This commit is contained in:
commit
75659ca0c1
38 changed files with 282 additions and 252 deletions
|
@ -44,6 +44,7 @@ static inline void init_completion(struct completion *x)
|
|||
|
||||
extern void wait_for_completion(struct completion *);
|
||||
extern int wait_for_completion_interruptible(struct completion *x);
|
||||
extern int wait_for_completion_killable(struct completion *x);
|
||||
extern unsigned long wait_for_completion_timeout(struct completion *x,
|
||||
unsigned long timeout);
|
||||
extern unsigned long wait_for_completion_interruptible_timeout(
|
||||
|
|
|
@ -125,15 +125,20 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
|
|||
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
|
||||
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
||||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
||||
#else
|
||||
extern void fastcall mutex_lock(struct mutex *lock);
|
||||
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
|
||||
extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
|
||||
|
||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -556,14 +556,7 @@ extern void * nfs_root_data(void);
|
|||
|
||||
#define nfs_wait_event(clnt, wq, condition) \
|
||||
({ \
|
||||
int __retval = 0; \
|
||||
if (clnt->cl_intr) { \
|
||||
sigset_t oldmask; \
|
||||
rpc_clnt_sigmask(clnt, &oldmask); \
|
||||
__retval = wait_event_interruptible(wq, condition); \
|
||||
rpc_clnt_sigunmask(clnt, &oldmask); \
|
||||
} else \
|
||||
wait_event(wq, condition); \
|
||||
int __retval = wait_event_killable(wq, condition); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ struct nfs_mount_data {
|
|||
/* bits in the flags field */
|
||||
|
||||
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
|
||||
#define NFS_MOUNT_INTR 0x0002 /* 1 */
|
||||
#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
|
||||
#define NFS_MOUNT_SECURE 0x0004 /* 1 */
|
||||
#define NFS_MOUNT_POSIX 0x0008 /* 1 */
|
||||
#define NFS_MOUNT_NOCTO 0x0010 /* 1 */
|
||||
|
|
|
@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
extern void FASTCALL(__lock_page(struct page *page));
|
||||
extern int FASTCALL(__lock_page_killable(struct page *page));
|
||||
extern void FASTCALL(__lock_page_nosync(struct page *page));
|
||||
extern void FASTCALL(unlock_page(struct page *page));
|
||||
|
||||
|
@ -170,6 +171,19 @@ static inline void lock_page(struct page *page)
|
|||
__lock_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_page_killable is like lock_page but can be interrupted by fatal
|
||||
* signals. It returns 0 if it locked the page and -EINTR if it was
|
||||
* killed while waiting.
|
||||
*/
|
||||
static inline int lock_page_killable(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (TestSetPageLocked(page))
|
||||
return __lock_page_killable(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_page_nosync should only be used if we can't pin the page's inode.
|
||||
* Doesn't play quite so well with block device plugging.
|
||||
|
|
|
@ -172,13 +172,35 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
|||
#define TASK_RUNNING 0
|
||||
#define TASK_INTERRUPTIBLE 1
|
||||
#define TASK_UNINTERRUPTIBLE 2
|
||||
#define TASK_STOPPED 4
|
||||
#define TASK_TRACED 8
|
||||
#define __TASK_STOPPED 4
|
||||
#define __TASK_TRACED 8
|
||||
/* in tsk->exit_state */
|
||||
#define EXIT_ZOMBIE 16
|
||||
#define EXIT_DEAD 32
|
||||
/* in tsk->state again */
|
||||
#define TASK_DEAD 64
|
||||
#define TASK_WAKEKILL 128
|
||||
|
||||
/* Convenience macros for the sake of set_task_state */
|
||||
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
|
||||
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
|
||||
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
|
||||
|
||||
/* Convenience macros for the sake of wake_up */
|
||||
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
|
||||
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
|
||||
|
||||
/* get_task_state() */
|
||||
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
|
||||
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
|
||||
__TASK_TRACED)
|
||||
|
||||
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
|
||||
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
|
||||
#define task_is_stopped_or_traced(task) \
|
||||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
|
@ -302,6 +324,7 @@ extern int in_sched_functions(unsigned long addr);
|
|||
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
|
||||
extern signed long FASTCALL(schedule_timeout(signed long timeout));
|
||||
extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
asmlinkage void schedule(void);
|
||||
|
||||
|
@ -1892,7 +1915,14 @@ static inline int signal_pending(struct task_struct *p)
|
|||
{
|
||||
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
||||
}
|
||||
|
||||
|
||||
extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
|
||||
|
||||
static inline int fatal_signal_pending(struct task_struct *p)
|
||||
{
|
||||
return signal_pending(p) && __fatal_signal_pending(p);
|
||||
}
|
||||
|
||||
static inline int need_resched(void)
|
||||
{
|
||||
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
|
||||
|
|
|
@ -41,7 +41,6 @@ struct rpc_clnt {
|
|||
struct rpc_iostats * cl_metrics; /* per-client statistics */
|
||||
|
||||
unsigned int cl_softrtry : 1,/* soft timeouts */
|
||||
cl_intr : 1,/* interruptible */
|
||||
cl_discrtry : 1,/* disconnect before retry */
|
||||
cl_autobind : 1;/* use getport() */
|
||||
|
||||
|
@ -111,7 +110,6 @@ struct rpc_create_args {
|
|||
|
||||
/* Values for "flags" field */
|
||||
#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
|
||||
#define RPC_CLNT_CREATE_INTR (1UL << 1)
|
||||
#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
|
||||
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
|
||||
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
|
||||
|
@ -137,8 +135,6 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
|
|||
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
|
||||
int flags);
|
||||
void rpc_restart_call(struct rpc_task *);
|
||||
void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
|
||||
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
|
||||
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
|
||||
size_t rpc_max_payload(struct rpc_clnt *);
|
||||
void rpc_force_rebind(struct rpc_clnt *);
|
||||
|
|
|
@ -137,7 +137,6 @@ struct rpc_task_setup {
|
|||
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
|
||||
#define RPC_TASK_KILLED 0x0100 /* task was killed */
|
||||
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
|
||||
#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
|
||||
|
||||
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
|
||||
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
|
||||
|
@ -145,7 +144,6 @@ struct rpc_task_setup {
|
|||
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
|
||||
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
|
||||
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
|
||||
#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
|
||||
|
||||
#define RPC_TASK_RUNNING 0
|
||||
#define RPC_TASK_QUEUED 1
|
||||
|
|
|
@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
|
|||
int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
|
||||
wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
|
||||
|
||||
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
|
||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
|
||||
#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
|
||||
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
||||
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
|
||||
#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
|
||||
|
||||
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
|
||||
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
|
||||
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
|
||||
#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
|
||||
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
|
||||
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
|
||||
|
||||
#define __wait_event(wq, condition) \
|
||||
do { \
|
||||
|
@ -345,6 +346,47 @@ do { \
|
|||
__ret; \
|
||||
})
|
||||
|
||||
#define __wait_event_killable(wq, condition, ret) \
|
||||
do { \
|
||||
DEFINE_WAIT(__wait); \
|
||||
\
|
||||
for (;;) { \
|
||||
prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
|
||||
if (condition) \
|
||||
break; \
|
||||
if (!fatal_signal_pending(current)) { \
|
||||
schedule(); \
|
||||
continue; \
|
||||
} \
|
||||
ret = -ERESTARTSYS; \
|
||||
break; \
|
||||
} \
|
||||
finish_wait(&wq, &__wait); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* wait_event_killable - sleep until a condition gets true
|
||||
* @wq: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
*
|
||||
* The process is put to sleep (TASK_KILLABLE) until the
|
||||
* @condition evaluates to true or a signal is received.
|
||||
* The @condition is checked each time the waitqueue @wq is woken up.
|
||||
*
|
||||
* wake_up() has to be called after changing any variable that could
|
||||
* change the result of the wait condition.
|
||||
*
|
||||
* The function will return -ERESTARTSYS if it was interrupted by a
|
||||
* signal and 0 if @condition evaluated to true.
|
||||
*/
|
||||
#define wait_event_killable(wq, condition) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (!(condition)) \
|
||||
__wait_event_killable(wq, condition, __ret); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Must be called with the spinlock in the wait_queue_head_t held.
|
||||
*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue