Merge commit 'v2.6.27-rc1' into x86/core
Conflicts: include/asm-x86/dma-mapping.h include/asm-x86/namei.h include/asm-x86/uaccess.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
15dd859cac
2069 changed files with 68780 additions and 24005 deletions
|
@ -84,6 +84,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
|
|||
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
|
||||
obj-$(CONFIG_MARKERS) += marker.o
|
||||
obj-$(CONFIG_LATENCYTOP) += latencytop.o
|
||||
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
|
||||
obj-$(CONFIG_FTRACE) += trace/
|
||||
obj-$(CONFIG_TRACING) += trace/
|
||||
obj-$(CONFIG_SMP) += sched_cpupri.o
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <linux/delayacct.h>
|
||||
#include <linux/cgroupstats.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/namei.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -1529,7 +1530,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
|
|||
return cft->read_seq_string(state->cgroup, cft, m);
|
||||
}
|
||||
|
||||
int cgroup_seqfile_release(struct inode *inode, struct file *file)
|
||||
static int cgroup_seqfile_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq = file->private_data;
|
||||
kfree(seq->private);
|
||||
|
|
41
kernel/cpu.c
41
kernel/cpu.c
|
@ -216,7 +216,6 @@ static int __ref take_cpu_down(void *_param)
|
|||
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
{
|
||||
int err, nr_calls = 0;
|
||||
struct task_struct *p;
|
||||
cpumask_t old_allowed, tmp;
|
||||
void *hcpu = (void *)(long)cpu;
|
||||
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
|
||||
|
@ -249,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|||
cpus_setall(tmp);
|
||||
cpu_clear(cpu, tmp);
|
||||
set_cpus_allowed_ptr(current, &tmp);
|
||||
tmp = cpumask_of_cpu(cpu);
|
||||
|
||||
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
|
||||
|
||||
if (IS_ERR(p) || cpu_online(cpu)) {
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
|
||||
hcpu) == NOTIFY_BAD)
|
||||
BUG();
|
||||
|
||||
if (IS_ERR(p)) {
|
||||
err = PTR_ERR(p);
|
||||
goto out_allowed;
|
||||
}
|
||||
goto out_thread;
|
||||
goto out_allowed;
|
||||
}
|
||||
BUG_ON(cpu_online(cpu));
|
||||
|
||||
/* Wait for it to sleep (leaving idle task). */
|
||||
while (!idle_cpu(cpu))
|
||||
|
@ -279,8 +275,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|||
|
||||
check_for_tasks(cpu);
|
||||
|
||||
out_thread:
|
||||
err = kthread_stop(p);
|
||||
out_allowed:
|
||||
set_cpus_allowed_ptr(current, &old_allowed);
|
||||
out_release:
|
||||
|
@ -461,3 +455,28 @@ out:
|
|||
#endif /* CONFIG_PM_SLEEP_SMP */
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* cpu_bit_bitmap[] is a special, "compressed" data structure that
|
||||
* represents all NR_CPUS bits binary values of 1<<nr.
|
||||
*
|
||||
* It is used by cpumask_of_cpu() to get a constant address to a CPU
|
||||
* mask value that has a single bit set only.
|
||||
*/
|
||||
|
||||
/* cpu_bit_bitmap[0] is empty - so we can back into it */
|
||||
#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
|
||||
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
|
||||
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
|
||||
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
|
||||
|
||||
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
|
||||
|
||||
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
|
||||
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
|
||||
#if BITS_PER_LONG > 32
|
||||
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
|
||||
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
|
||||
|
|
154
kernel/dma-coherent.c
Normal file
154
kernel/dma-coherent.c
Normal file
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
* Coherent per-device memory handling.
|
||||
* Borrowed from i386
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
struct dma_coherent_mem {
|
||||
void *virt_base;
|
||||
u32 device_base;
|
||||
int size;
|
||||
int flags;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags)
|
||||
{
|
||||
void __iomem *mem_base = NULL;
|
||||
int pages = size >> PAGE_SHIFT;
|
||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||
|
||||
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
||||
goto out;
|
||||
if (!size)
|
||||
goto out;
|
||||
if (dev->dma_mem)
|
||||
goto out;
|
||||
|
||||
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
||||
|
||||
mem_base = ioremap(bus_addr, size);
|
||||
if (!mem_base)
|
||||
goto out;
|
||||
|
||||
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
||||
if (!dev->dma_mem)
|
||||
goto out;
|
||||
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!dev->dma_mem->bitmap)
|
||||
goto free1_out;
|
||||
|
||||
dev->dma_mem->virt_base = mem_base;
|
||||
dev->dma_mem->device_base = device_addr;
|
||||
dev->dma_mem->size = pages;
|
||||
dev->dma_mem->flags = flags;
|
||||
|
||||
if (flags & DMA_MEMORY_MAP)
|
||||
return DMA_MEMORY_MAP;
|
||||
|
||||
return DMA_MEMORY_IO;
|
||||
|
||||
free1_out:
|
||||
kfree(dev->dma_mem);
|
||||
out:
|
||||
if (mem_base)
|
||||
iounmap(mem_base);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
||||
|
||||
void dma_release_declared_memory(struct device *dev)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
dev->dma_mem = NULL;
|
||||
iounmap(mem->virt_base);
|
||||
kfree(mem->bitmap);
|
||||
kfree(mem);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_declared_memory);
|
||||
|
||||
void *dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
int pos, err;
|
||||
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
|
||||
|
||||
pages >>= PAGE_SHIFT;
|
||||
|
||||
if (!mem)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
||||
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
|
||||
if (err != 0)
|
||||
return ERR_PTR(err);
|
||||
return mem->virt_base + (pos << PAGE_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
|
||||
/**
|
||||
* Try to allocate memory from the per-device coherent area.
|
||||
*
|
||||
* @dev: device from which we allocate memory
|
||||
* @size: size of requested memory area
|
||||
* @dma_handle: This will be filled with the correct dma handle
|
||||
* @ret: This pointer will be filled with the virtual address
|
||||
* to allocated area.
|
||||
*
|
||||
* This function should be only called from per-arch %dma_alloc_coherent()
|
||||
* to support allocation from per-device coherent memory pools.
|
||||
*
|
||||
* Returns 0 if dma_alloc_coherent should continue with allocating from
|
||||
* generic memory areas, or !0 if dma_alloc_coherent should return %ret.
|
||||
*/
|
||||
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle, void **ret)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
int order = get_order(size);
|
||||
|
||||
if (mem) {
|
||||
int page = bitmap_find_free_region(mem->bitmap, mem->size,
|
||||
order);
|
||||
if (page >= 0) {
|
||||
*dma_handle = mem->device_base + (page << PAGE_SHIFT);
|
||||
*ret = mem->virt_base + (page << PAGE_SHIFT);
|
||||
memset(*ret, 0, size);
|
||||
} else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
|
||||
*ret = NULL;
|
||||
}
|
||||
return (mem != NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to free the memory allocated from per-device coherent memory pool.
|
||||
* @dev: device from which the memory was allocated
|
||||
* @order: the order of pages allocated
|
||||
* @vaddr: virtual address of allocated pages
|
||||
*
|
||||
* This checks whether the memory was allocated from the per-device
|
||||
* coherent memory pool and if so, releases that memory.
|
||||
*
|
||||
* Returns 1 if we correctly released the memory, or 0 if
|
||||
* %dma_release_coherent() should proceed with releasing memory from
|
||||
* generic pools.
|
||||
*/
|
||||
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
||||
if (mem && vaddr >= mem->virt_base && vaddr <
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
|
||||
bitmap_release_region(mem->bitmap, page, order);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -168,7 +168,6 @@ __set_personality(u_long personality)
|
|||
current->personality = personality;
|
||||
oep = current_thread_info()->exec_domain;
|
||||
current_thread_info()->exec_domain = ep;
|
||||
set_fs_altroot();
|
||||
|
||||
module_put(oep->module);
|
||||
return 0;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <linux/resource.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <linux/tracehook.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
@ -120,18 +121,7 @@ static void __exit_signal(struct task_struct *tsk)
|
|||
sig->nivcsw += tsk->nivcsw;
|
||||
sig->inblock += task_io_get_inblock(tsk);
|
||||
sig->oublock += task_io_get_oublock(tsk);
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
sig->rchar += tsk->rchar;
|
||||
sig->wchar += tsk->wchar;
|
||||
sig->syscr += tsk->syscr;
|
||||
sig->syscw += tsk->syscw;
|
||||
#endif /* CONFIG_TASK_XACCT */
|
||||
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
||||
sig->ioac.read_bytes += tsk->ioac.read_bytes;
|
||||
sig->ioac.write_bytes += tsk->ioac.write_bytes;
|
||||
sig->ioac.cancelled_write_bytes +=
|
||||
tsk->ioac.cancelled_write_bytes;
|
||||
#endif /* CONFIG_TASK_IO_ACCOUNTING */
|
||||
task_io_accounting_add(&sig->ioac, &tsk->ioac);
|
||||
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
|
||||
sig = NULL; /* Marker for below. */
|
||||
}
|
||||
|
@ -162,27 +152,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
|
|||
put_task_struct(container_of(rhp, struct task_struct, rcu));
|
||||
}
|
||||
|
||||
/*
|
||||
* Do final ptrace-related cleanup of a zombie being reaped.
|
||||
*
|
||||
* Called with write_lock(&tasklist_lock) held.
|
||||
*/
|
||||
static void ptrace_release_task(struct task_struct *p)
|
||||
{
|
||||
BUG_ON(!list_empty(&p->ptraced));
|
||||
ptrace_unlink(p);
|
||||
BUG_ON(!list_empty(&p->ptrace_entry));
|
||||
}
|
||||
|
||||
void release_task(struct task_struct * p)
|
||||
{
|
||||
struct task_struct *leader;
|
||||
int zap_leader;
|
||||
repeat:
|
||||
tracehook_prepare_release_task(p);
|
||||
atomic_dec(&p->user->processes);
|
||||
proc_flush_task(p);
|
||||
write_lock_irq(&tasklist_lock);
|
||||
ptrace_release_task(p);
|
||||
tracehook_finish_release_task(p);
|
||||
__exit_signal(p);
|
||||
|
||||
/*
|
||||
|
@ -204,6 +184,13 @@ repeat:
|
|||
* that case.
|
||||
*/
|
||||
zap_leader = task_detached(leader);
|
||||
|
||||
/*
|
||||
* This maintains the invariant that release_task()
|
||||
* only runs on a task in EXIT_DEAD, just for sanity.
|
||||
*/
|
||||
if (zap_leader)
|
||||
leader->exit_state = EXIT_DEAD;
|
||||
}
|
||||
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
@ -567,8 +554,6 @@ void put_fs_struct(struct fs_struct *fs)
|
|||
if (atomic_dec_and_test(&fs->count)) {
|
||||
path_put(&fs->root);
|
||||
path_put(&fs->pwd);
|
||||
if (fs->altroot.dentry)
|
||||
path_put(&fs->altroot);
|
||||
kmem_cache_free(fs_cachep, fs);
|
||||
}
|
||||
}
|
||||
|
@ -887,7 +872,8 @@ static void forget_original_parent(struct task_struct *father)
|
|||
*/
|
||||
static void exit_notify(struct task_struct *tsk, int group_dead)
|
||||
{
|
||||
int state;
|
||||
int signal;
|
||||
void *cookie;
|
||||
|
||||
/*
|
||||
* This does two things:
|
||||
|
@ -924,22 +910,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
|
|||
!capable(CAP_KILL))
|
||||
tsk->exit_signal = SIGCHLD;
|
||||
|
||||
/* If something other than our normal parent is ptracing us, then
|
||||
* send it a SIGCHLD instead of honoring exit_signal. exit_signal
|
||||
* only has special meaning to our real parent.
|
||||
*/
|
||||
if (!task_detached(tsk) && thread_group_empty(tsk)) {
|
||||
int signal = ptrace_reparented(tsk) ?
|
||||
SIGCHLD : tsk->exit_signal;
|
||||
do_notify_parent(tsk, signal);
|
||||
} else if (tsk->ptrace) {
|
||||
do_notify_parent(tsk, SIGCHLD);
|
||||
}
|
||||
signal = tracehook_notify_death(tsk, &cookie, group_dead);
|
||||
if (signal > 0)
|
||||
signal = do_notify_parent(tsk, signal);
|
||||
|
||||
state = EXIT_ZOMBIE;
|
||||
if (task_detached(tsk) && likely(!tsk->ptrace))
|
||||
state = EXIT_DEAD;
|
||||
tsk->exit_state = state;
|
||||
tsk->exit_state = signal < 0 ? EXIT_DEAD : EXIT_ZOMBIE;
|
||||
|
||||
/* mt-exec, de_thread() is waiting for us */
|
||||
if (thread_group_leader(tsk) &&
|
||||
|
@ -949,8 +924,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
|
|||
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
tracehook_report_death(tsk, signal, cookie, group_dead);
|
||||
|
||||
/* If the process is dead, release it - nobody will wait for it */
|
||||
if (state == EXIT_DEAD)
|
||||
if (signal < 0)
|
||||
release_task(tsk);
|
||||
}
|
||||
|
||||
|
@ -1029,10 +1006,7 @@ NORET_TYPE void do_exit(long code)
|
|||
if (unlikely(!tsk->pid))
|
||||
panic("Attempted to kill the idle task!");
|
||||
|
||||
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
|
||||
current->ptrace_message = code;
|
||||
ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
|
||||
}
|
||||
tracehook_report_exit(&code);
|
||||
|
||||
/*
|
||||
* We're taking recursive faults here in do_exit. Safest is to just
|
||||
|
@ -1378,21 +1352,8 @@ static int wait_task_zombie(struct task_struct *p, int options,
|
|||
psig->coublock +=
|
||||
task_io_get_oublock(p) +
|
||||
sig->oublock + sig->coublock;
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
psig->rchar += p->rchar + sig->rchar;
|
||||
psig->wchar += p->wchar + sig->wchar;
|
||||
psig->syscr += p->syscr + sig->syscr;
|
||||
psig->syscw += p->syscw + sig->syscw;
|
||||
#endif /* CONFIG_TASK_XACCT */
|
||||
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
||||
psig->ioac.read_bytes +=
|
||||
p->ioac.read_bytes + sig->ioac.read_bytes;
|
||||
psig->ioac.write_bytes +=
|
||||
p->ioac.write_bytes + sig->ioac.write_bytes;
|
||||
psig->ioac.cancelled_write_bytes +=
|
||||
p->ioac.cancelled_write_bytes +
|
||||
sig->ioac.cancelled_write_bytes;
|
||||
#endif /* CONFIG_TASK_IO_ACCOUNTING */
|
||||
task_io_accounting_add(&psig->ioac, &p->ioac);
|
||||
task_io_accounting_add(&psig->ioac, &sig->ioac);
|
||||
spin_unlock_irq(&p->parent->sighand->siglock);
|
||||
}
|
||||
|
||||
|
|
101
kernel/fork.c
101
kernel/fork.c
|
@ -27,6 +27,7 @@
|
|||
#include <linux/key.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/capability.h>
|
||||
|
@ -37,6 +38,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/futex.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
@ -413,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
|
|||
|
||||
if (likely(!mm_alloc_pgd(mm))) {
|
||||
mm->def_flags = 0;
|
||||
mmu_notifier_mm_init(mm);
|
||||
return mm;
|
||||
}
|
||||
|
||||
|
@ -445,6 +448,7 @@ void __mmdrop(struct mm_struct *mm)
|
|||
BUG_ON(mm == &init_mm);
|
||||
mm_free_pgd(mm);
|
||||
destroy_context(mm);
|
||||
mmu_notifier_mm_destroy(mm);
|
||||
free_mm(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||
|
@ -656,13 +660,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
|
|||
path_get(&old->root);
|
||||
fs->pwd = old->pwd;
|
||||
path_get(&old->pwd);
|
||||
if (old->altroot.dentry) {
|
||||
fs->altroot = old->altroot;
|
||||
path_get(&old->altroot);
|
||||
} else {
|
||||
fs->altroot.mnt = NULL;
|
||||
fs->altroot.dentry = NULL;
|
||||
}
|
||||
read_unlock(&old->lock);
|
||||
}
|
||||
return fs;
|
||||
|
@ -812,12 +809,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
|||
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
|
||||
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
|
||||
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
sig->rchar = sig->wchar = sig->syscr = sig->syscw = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
||||
memset(&sig->ioac, 0, sizeof(sig->ioac));
|
||||
#endif
|
||||
task_io_accounting_init(&sig->ioac);
|
||||
sig->sum_sched_runtime = 0;
|
||||
INIT_LIST_HEAD(&sig->cpu_timers[0]);
|
||||
INIT_LIST_HEAD(&sig->cpu_timers[1]);
|
||||
|
@ -865,8 +857,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
|||
|
||||
new_flags &= ~PF_SUPERPRIV;
|
||||
new_flags |= PF_FORKNOEXEC;
|
||||
if (!(clone_flags & CLONE_PTRACE))
|
||||
p->ptrace = 0;
|
||||
new_flags |= PF_STARTING;
|
||||
p->flags = new_flags;
|
||||
clear_freeze_flag(p);
|
||||
}
|
||||
|
@ -907,7 +898,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
struct pt_regs *regs,
|
||||
unsigned long stack_size,
|
||||
int __user *child_tidptr,
|
||||
struct pid *pid)
|
||||
struct pid *pid,
|
||||
int trace)
|
||||
{
|
||||
int retval;
|
||||
struct task_struct *p;
|
||||
|
@ -1000,13 +992,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
p->last_switch_timestamp = 0;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
p->rchar = 0; /* I/O counter: bytes read */
|
||||
p->wchar = 0; /* I/O counter: bytes written */
|
||||
p->syscr = 0; /* I/O counter: read syscalls */
|
||||
p->syscw = 0; /* I/O counter: write syscalls */
|
||||
#endif
|
||||
task_io_accounting_init(p);
|
||||
task_io_accounting_init(&p->ioac);
|
||||
acct_clear_integrals(p);
|
||||
|
||||
p->it_virt_expires = cputime_zero;
|
||||
|
@ -1163,8 +1149,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
*/
|
||||
p->group_leader = p;
|
||||
INIT_LIST_HEAD(&p->thread_group);
|
||||
INIT_LIST_HEAD(&p->ptrace_entry);
|
||||
INIT_LIST_HEAD(&p->ptraced);
|
||||
|
||||
/* Now that the task is set up, run cgroup callbacks if
|
||||
* necessary. We need to run them before the task is visible
|
||||
|
@ -1195,7 +1179,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
p->real_parent = current->real_parent;
|
||||
else
|
||||
p->real_parent = current;
|
||||
p->parent = p->real_parent;
|
||||
|
||||
spin_lock(¤t->sighand->siglock);
|
||||
|
||||
|
@ -1237,8 +1220,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
|
||||
if (likely(p->pid)) {
|
||||
list_add_tail(&p->sibling, &p->real_parent->children);
|
||||
if (unlikely(p->ptrace & PT_PTRACED))
|
||||
__ptrace_link(p, current->parent);
|
||||
tracehook_finish_clone(p, clone_flags, trace);
|
||||
|
||||
if (thread_group_leader(p)) {
|
||||
if (clone_flags & CLONE_NEWPID)
|
||||
|
@ -1323,29 +1305,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
|
|||
struct pt_regs regs;
|
||||
|
||||
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
|
||||
&init_struct_pid);
|
||||
&init_struct_pid, 0);
|
||||
if (!IS_ERR(task))
|
||||
init_idle(task, cpu);
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
static int fork_traceflag(unsigned clone_flags)
|
||||
{
|
||||
if (clone_flags & CLONE_UNTRACED)
|
||||
return 0;
|
||||
else if (clone_flags & CLONE_VFORK) {
|
||||
if (current->ptrace & PT_TRACE_VFORK)
|
||||
return PTRACE_EVENT_VFORK;
|
||||
} else if ((clone_flags & CSIGNAL) != SIGCHLD) {
|
||||
if (current->ptrace & PT_TRACE_CLONE)
|
||||
return PTRACE_EVENT_CLONE;
|
||||
} else if (current->ptrace & PT_TRACE_FORK)
|
||||
return PTRACE_EVENT_FORK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, this is the main fork-routine.
|
||||
*
|
||||
|
@ -1380,14 +1346,14 @@ long do_fork(unsigned long clone_flags,
|
|||
}
|
||||
}
|
||||
|
||||
if (unlikely(current->ptrace)) {
|
||||
trace = fork_traceflag (clone_flags);
|
||||
if (trace)
|
||||
clone_flags |= CLONE_PTRACE;
|
||||
}
|
||||
/*
|
||||
* When called from kernel_thread, don't do user tracing stuff.
|
||||
*/
|
||||
if (likely(user_mode(regs)))
|
||||
trace = tracehook_prepare_clone(clone_flags);
|
||||
|
||||
p = copy_process(clone_flags, stack_start, regs, stack_size,
|
||||
child_tidptr, NULL);
|
||||
child_tidptr, NULL, trace);
|
||||
/*
|
||||
* Do this prior waking up the new thread - the thread pointer
|
||||
* might get invalid after that point, if the thread exits quickly.
|
||||
|
@ -1405,32 +1371,35 @@ long do_fork(unsigned long clone_flags,
|
|||
init_completion(&vfork);
|
||||
}
|
||||
|
||||
if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
|
||||
tracehook_report_clone(trace, regs, clone_flags, nr, p);
|
||||
|
||||
/*
|
||||
* We set PF_STARTING at creation in case tracing wants to
|
||||
* use this to distinguish a fully live task from one that
|
||||
* hasn't gotten to tracehook_report_clone() yet. Now we
|
||||
* clear it and set the child going.
|
||||
*/
|
||||
p->flags &= ~PF_STARTING;
|
||||
|
||||
if (unlikely(clone_flags & CLONE_STOPPED)) {
|
||||
/*
|
||||
* We'll start up with an immediate SIGSTOP.
|
||||
*/
|
||||
sigaddset(&p->pending.signal, SIGSTOP);
|
||||
set_tsk_thread_flag(p, TIF_SIGPENDING);
|
||||
}
|
||||
|
||||
if (!(clone_flags & CLONE_STOPPED))
|
||||
wake_up_new_task(p, clone_flags);
|
||||
else
|
||||
__set_task_state(p, TASK_STOPPED);
|
||||
|
||||
if (unlikely (trace)) {
|
||||
current->ptrace_message = nr;
|
||||
ptrace_notify ((trace << 8) | SIGTRAP);
|
||||
} else {
|
||||
wake_up_new_task(p, clone_flags);
|
||||
}
|
||||
|
||||
tracehook_report_clone_complete(trace, regs,
|
||||
clone_flags, nr, p);
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
freezer_do_not_count();
|
||||
wait_for_completion(&vfork);
|
||||
freezer_count();
|
||||
if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
|
||||
current->ptrace_message = nr;
|
||||
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
|
||||
}
|
||||
tracehook_report_vfork_done(p, nr);
|
||||
}
|
||||
} else {
|
||||
nr = PTR_ERR(p);
|
||||
|
@ -1442,7 +1411,7 @@ long do_fork(unsigned long clone_flags,
|
|||
#define ARCH_MIN_MMSTRUCT_ALIGN 0
|
||||
#endif
|
||||
|
||||
static void sighand_ctor(struct kmem_cache *cachep, void *data)
|
||||
static void sighand_ctor(void *data)
|
||||
{
|
||||
struct sighand_struct *sighand = data;
|
||||
|
||||
|
|
|
@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq)
|
|||
unsigned long flags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
|
||||
WARN_ON(1);
|
||||
WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq)
|
|||
unsigned long flags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
|
||||
WARN_ON(1);
|
||||
WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq)
|
|||
spin_lock_irqsave(&desc->lock, flags);
|
||||
if (desc->action) {
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n",
|
||||
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
|
||||
irq);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
desc->msi_desc = NULL;
|
||||
|
@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
|
|||
unsigned long flags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
|
||||
WARN_ON(1);
|
||||
WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -184,8 +184,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
|
|||
{
|
||||
switch (desc->depth) {
|
||||
case 0:
|
||||
printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
|
||||
WARN_ON(1);
|
||||
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
|
||||
break;
|
||||
case 1: {
|
||||
unsigned int status = desc->status & ~IRQ_DISABLED;
|
||||
|
|
104
kernel/kexec.c
104
kernel/kexec.c
|
@ -24,6 +24,12 @@
|
|||
#include <linux/utsrelease.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -242,6 +248,12 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
|
|||
goto out;
|
||||
}
|
||||
|
||||
image->swap_page = kimage_alloc_control_pages(image, 0);
|
||||
if (!image->swap_page) {
|
||||
printk(KERN_ERR "Could not allocate swap buffer\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
result = 0;
|
||||
out:
|
||||
if (result == 0)
|
||||
|
@ -589,14 +601,12 @@ static void kimage_free_extra_pages(struct kimage *image)
|
|||
kimage_free_page_list(&image->unuseable_pages);
|
||||
|
||||
}
|
||||
static int kimage_terminate(struct kimage *image)
|
||||
static void kimage_terminate(struct kimage *image)
|
||||
{
|
||||
if (*image->entry != 0)
|
||||
image->entry++;
|
||||
|
||||
*image->entry = IND_DONE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define for_each_kimage_entry(image, ptr, entry) \
|
||||
|
@ -988,6 +998,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
|||
if (result)
|
||||
goto out;
|
||||
|
||||
if (flags & KEXEC_PRESERVE_CONTEXT)
|
||||
image->preserve_context = 1;
|
||||
result = machine_kexec_prepare(image);
|
||||
if (result)
|
||||
goto out;
|
||||
|
@ -997,9 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
|||
if (result)
|
||||
goto out;
|
||||
}
|
||||
result = kimage_terminate(image);
|
||||
if (result)
|
||||
goto out;
|
||||
kimage_terminate(image);
|
||||
}
|
||||
/* Install the new kernel, and Uninstall the old */
|
||||
image = xchg(dest_image, image);
|
||||
|
@ -1415,3 +1425,85 @@ static int __init crash_save_vmcoreinfo_init(void)
|
|||
}
|
||||
|
||||
module_init(crash_save_vmcoreinfo_init)
|
||||
|
||||
/**
|
||||
* kernel_kexec - reboot the system
|
||||
*
|
||||
* Move into place and start executing a preloaded standalone
|
||||
* executable. If nothing was preloaded return an error.
|
||||
*/
|
||||
int kernel_kexec(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (xchg(&kexec_lock, 1))
|
||||
return -EBUSY;
|
||||
if (!kexec_image) {
|
||||
error = -EINVAL;
|
||||
goto Unlock;
|
||||
}
|
||||
|
||||
if (kexec_image->preserve_context) {
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
mutex_lock(&pm_mutex);
|
||||
pm_prepare_console();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
error = -EBUSY;
|
||||
goto Restore_console;
|
||||
}
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Resume_console;
|
||||
error = disable_nonboot_cpus();
|
||||
if (error)
|
||||
goto Resume_devices;
|
||||
local_irq_disable();
|
||||
/* At this point, device_suspend() has been called,
|
||||
* but *not* device_power_down(). We *must*
|
||||
* device_power_down() now. Otherwise, drivers for
|
||||
* some devices (e.g. interrupt controllers) become
|
||||
* desynchronized with the actual state of the
|
||||
* hardware at resume time, and evil weirdness ensues.
|
||||
*/
|
||||
error = device_power_down(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Enable_irqs;
|
||||
save_processor_state();
|
||||
#endif
|
||||
} else {
|
||||
blocking_notifier_call_chain(&reboot_notifier_list,
|
||||
SYS_RESTART, NULL);
|
||||
system_state = SYSTEM_RESTART;
|
||||
device_shutdown();
|
||||
sysdev_shutdown();
|
||||
printk(KERN_EMERG "Starting new kernel\n");
|
||||
machine_shutdown();
|
||||
}
|
||||
|
||||
machine_kexec(kexec_image);
|
||||
|
||||
if (kexec_image->preserve_context) {
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
restore_processor_state();
|
||||
device_power_up(PMSG_RESTORE);
|
||||
Enable_irqs:
|
||||
local_irq_enable();
|
||||
enable_nonboot_cpus();
|
||||
Resume_devices:
|
||||
device_resume(PMSG_RESTORE);
|
||||
Resume_console:
|
||||
resume_console();
|
||||
thaw_processes();
|
||||
Restore_console:
|
||||
pm_restore_console();
|
||||
mutex_unlock(&pm_mutex);
|
||||
#endif
|
||||
}
|
||||
|
||||
Unlock:
|
||||
xchg(&kexec_lock, 0);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
|
|||
return;
|
||||
}
|
||||
/* Must have done schedule() in kthread() before we set_task_cpu */
|
||||
wait_task_inactive(k);
|
||||
wait_task_inactive(k, 0);
|
||||
set_task_cpu(k, cpu);
|
||||
k->cpus_allowed = cpumask_of_cpu(cpu);
|
||||
k->rt.nr_cpus_allowed = 1;
|
||||
|
|
|
@ -325,18 +325,6 @@ static unsigned long find_symbol(const char *name,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* lookup symbol in given range of kernel_symbols */
|
||||
static const struct kernel_symbol *lookup_symbol(const char *name,
|
||||
const struct kernel_symbol *start,
|
||||
const struct kernel_symbol *stop)
|
||||
{
|
||||
const struct kernel_symbol *ks = start;
|
||||
for (; ks < stop; ks++)
|
||||
if (strcmp(ks->name, name) == 0)
|
||||
return ks;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Search for module by name: must hold module_mutex. */
|
||||
static struct module *find_module(const char *name)
|
||||
{
|
||||
|
@ -690,7 +678,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
|
|||
if (flags & O_NONBLOCK) {
|
||||
struct stopref sref = { mod, flags, forced };
|
||||
|
||||
return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
|
||||
return stop_machine(__try_stop_module, &sref, NULL);
|
||||
} else {
|
||||
/* We don't need to stop the machine for this. */
|
||||
mod->state = MODULE_STATE_GOING;
|
||||
|
@ -1428,7 +1416,7 @@ static int __unlink_module(void *_mod)
|
|||
static void free_module(struct module *mod)
|
||||
{
|
||||
/* Delete from various lists */
|
||||
stop_machine_run(__unlink_module, mod, NR_CPUS);
|
||||
stop_machine(__unlink_module, mod, NULL);
|
||||
remove_notes_attrs(mod);
|
||||
remove_sect_attrs(mod);
|
||||
mod_kobject_remove(mod);
|
||||
|
@ -1703,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
|
||||
/* lookup symbol in given range of kernel_symbols */
|
||||
static const struct kernel_symbol *lookup_symbol(const char *name,
|
||||
const struct kernel_symbol *start,
|
||||
const struct kernel_symbol *stop)
|
||||
{
|
||||
const struct kernel_symbol *ks = start;
|
||||
for (; ks < stop; ks++)
|
||||
if (strcmp(ks->name, name) == 0)
|
||||
return ks;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int is_exported(const char *name, const struct module *mod)
|
||||
{
|
||||
if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
|
||||
|
@ -2196,7 +2197,7 @@ static struct module *load_module(void __user *umod,
|
|||
/* Now sew it into the lists so we can get lockdep and oops
|
||||
* info during argument parsing. Noone should access us, since
|
||||
* strong_try_module_get() will fail. */
|
||||
stop_machine_run(__link_module, mod, NR_CPUS);
|
||||
stop_machine(__link_module, mod, NULL);
|
||||
|
||||
/* Size of section 0 is 0, so this works well if no params */
|
||||
err = parse_args(mod->name, mod->args,
|
||||
|
@ -2230,7 +2231,7 @@ static struct module *load_module(void __user *umod,
|
|||
return mod;
|
||||
|
||||
unlink:
|
||||
stop_machine_run(__unlink_module, mod, NR_CPUS);
|
||||
stop_machine(__unlink_module, mod, NULL);
|
||||
module_arch_cleanup(mod);
|
||||
cleanup:
|
||||
kobject_del(&mod->mkobj.kobj);
|
||||
|
|
|
@ -635,6 +635,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
|
|||
}
|
||||
if (status < 0)
|
||||
printk(err_suspend, status);
|
||||
|
||||
/* Some platforms can't detect that the alarm triggered the
|
||||
* wakeup, or (accordingly) disable it after it afterwards.
|
||||
* It's supposed to give oneshot behavior; cope.
|
||||
*/
|
||||
alm.enabled = false;
|
||||
rtc_set_alarm(rtc, &alm);
|
||||
}
|
||||
|
||||
static int __init has_wakealarm(struct device *dev, void *name_ptr)
|
||||
|
|
|
@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void);
|
|||
|
||||
extern int pfn_is_nosave(unsigned long);
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#define power_attr(_name) \
|
||||
static struct kobj_attribute _name##_attr = { \
|
||||
.attr = { \
|
||||
|
|
|
@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
|
|||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (!ret && !kill)
|
||||
wait_task_inactive(child);
|
||||
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
|
||||
|
||||
/* All systems go.. */
|
||||
return ret;
|
||||
|
|
|
@ -91,8 +91,8 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
|||
* rdp->cpu is the current cpu.
|
||||
*
|
||||
* cpu_online_map is updated by the _cpu_down()
|
||||
* using stop_machine_run(). Since we're in irqs disabled
|
||||
* section, stop_machine_run() is not exectuting, hence
|
||||
* using __stop_machine(). Since we're in irqs disabled
|
||||
* section, __stop_machine() is not exectuting, hence
|
||||
* the cpu_online_map is stable.
|
||||
*
|
||||
* However, a cpu might have been offlined _just_ before
|
||||
|
|
170
kernel/relay.c
170
kernel/relay.c
|
@ -407,6 +407,35 @@ void relay_reset(struct rchan *chan)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(relay_reset);
|
||||
|
||||
static inline void relay_set_buf_dentry(struct rchan_buf *buf,
|
||||
struct dentry *dentry)
|
||||
{
|
||||
buf->dentry = dentry;
|
||||
buf->dentry->d_inode->i_size = buf->early_bytes;
|
||||
}
|
||||
|
||||
static struct dentry *relay_create_buf_file(struct rchan *chan,
|
||||
struct rchan_buf *buf,
|
||||
unsigned int cpu)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
char *tmpname;
|
||||
|
||||
tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
|
||||
if (!tmpname)
|
||||
return NULL;
|
||||
snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
|
||||
|
||||
/* Create file in fs */
|
||||
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
|
||||
S_IRUSR, buf,
|
||||
&chan->is_global);
|
||||
|
||||
kfree(tmpname);
|
||||
|
||||
return dentry;
|
||||
}
|
||||
|
||||
/*
|
||||
* relay_open_buf - create a new relay channel buffer
|
||||
*
|
||||
|
@ -416,45 +445,34 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
|
|||
{
|
||||
struct rchan_buf *buf = NULL;
|
||||
struct dentry *dentry;
|
||||
char *tmpname;
|
||||
|
||||
if (chan->is_global)
|
||||
return chan->buf[0];
|
||||
|
||||
tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
|
||||
if (!tmpname)
|
||||
goto end;
|
||||
snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
|
||||
|
||||
buf = relay_create_buf(chan);
|
||||
if (!buf)
|
||||
goto free_name;
|
||||
return NULL;
|
||||
|
||||
if (chan->has_base_filename) {
|
||||
dentry = relay_create_buf_file(chan, buf, cpu);
|
||||
if (!dentry)
|
||||
goto free_buf;
|
||||
relay_set_buf_dentry(buf, dentry);
|
||||
}
|
||||
|
||||
buf->cpu = cpu;
|
||||
__relay_reset(buf, 1);
|
||||
|
||||
/* Create file in fs */
|
||||
dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
|
||||
buf, &chan->is_global);
|
||||
if (!dentry)
|
||||
goto free_buf;
|
||||
|
||||
buf->dentry = dentry;
|
||||
|
||||
if(chan->is_global) {
|
||||
chan->buf[0] = buf;
|
||||
buf->cpu = 0;
|
||||
}
|
||||
|
||||
goto free_name;
|
||||
return buf;
|
||||
|
||||
free_buf:
|
||||
relay_destroy_buf(buf);
|
||||
buf = NULL;
|
||||
free_name:
|
||||
kfree(tmpname);
|
||||
end:
|
||||
return buf;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -537,8 +555,8 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
|
|||
|
||||
/**
|
||||
* relay_open - create a new relay channel
|
||||
* @base_filename: base name of files to create
|
||||
* @parent: dentry of parent directory, %NULL for root directory
|
||||
* @base_filename: base name of files to create, %NULL for buffering only
|
||||
* @parent: dentry of parent directory, %NULL for root directory or buffer
|
||||
* @subbuf_size: size of sub-buffers
|
||||
* @n_subbufs: number of sub-buffers
|
||||
* @cb: client callback functions
|
||||
|
@ -560,8 +578,6 @@ struct rchan *relay_open(const char *base_filename,
|
|||
{
|
||||
unsigned int i;
|
||||
struct rchan *chan;
|
||||
if (!base_filename)
|
||||
return NULL;
|
||||
|
||||
if (!(subbuf_size && n_subbufs))
|
||||
return NULL;
|
||||
|
@ -576,7 +592,10 @@ struct rchan *relay_open(const char *base_filename,
|
|||
chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
|
||||
chan->parent = parent;
|
||||
chan->private_data = private_data;
|
||||
strlcpy(chan->base_filename, base_filename, NAME_MAX);
|
||||
if (base_filename) {
|
||||
chan->has_base_filename = 1;
|
||||
strlcpy(chan->base_filename, base_filename, NAME_MAX);
|
||||
}
|
||||
setup_callbacks(chan, cb);
|
||||
kref_init(&chan->kref);
|
||||
|
||||
|
@ -604,6 +623,94 @@ free_bufs:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(relay_open);
|
||||
|
||||
struct rchan_percpu_buf_dispatcher {
|
||||
struct rchan_buf *buf;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
/* Called in atomic context. */
|
||||
static void __relay_set_buf_dentry(void *info)
|
||||
{
|
||||
struct rchan_percpu_buf_dispatcher *p = info;
|
||||
|
||||
relay_set_buf_dentry(p->buf, p->dentry);
|
||||
}
|
||||
|
||||
/**
|
||||
* relay_late_setup_files - triggers file creation
|
||||
* @chan: channel to operate on
|
||||
* @base_filename: base name of files to create
|
||||
* @parent: dentry of parent directory, %NULL for root directory
|
||||
*
|
||||
* Returns 0 if successful, non-zero otherwise.
|
||||
*
|
||||
* Use to setup files for a previously buffer-only channel.
|
||||
* Useful to do early tracing in kernel, before VFS is up, for example.
|
||||
*/
|
||||
int relay_late_setup_files(struct rchan *chan,
|
||||
const char *base_filename,
|
||||
struct dentry *parent)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned int i, curr_cpu;
|
||||
unsigned long flags;
|
||||
struct dentry *dentry;
|
||||
struct rchan_percpu_buf_dispatcher disp;
|
||||
|
||||
if (!chan || !base_filename)
|
||||
return -EINVAL;
|
||||
|
||||
strlcpy(chan->base_filename, base_filename, NAME_MAX);
|
||||
|
||||
mutex_lock(&relay_channels_mutex);
|
||||
/* Is chan already set up? */
|
||||
if (unlikely(chan->has_base_filename))
|
||||
return -EEXIST;
|
||||
chan->has_base_filename = 1;
|
||||
chan->parent = parent;
|
||||
curr_cpu = get_cpu();
|
||||
/*
|
||||
* The CPU hotplug notifier ran before us and created buffers with
|
||||
* no files associated. So it's safe to call relay_setup_buf_file()
|
||||
* on all currently online CPUs.
|
||||
*/
|
||||
for_each_online_cpu(i) {
|
||||
if (unlikely(!chan->buf[i])) {
|
||||
printk(KERN_ERR "relay_late_setup_files: CPU %u "
|
||||
"has no buffer, it must have!\n", i);
|
||||
BUG();
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
dentry = relay_create_buf_file(chan, chan->buf[i], i);
|
||||
if (unlikely(!dentry)) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (curr_cpu == i) {
|
||||
local_irq_save(flags);
|
||||
relay_set_buf_dentry(chan->buf[i], dentry);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
disp.buf = chan->buf[i];
|
||||
disp.dentry = dentry;
|
||||
smp_mb();
|
||||
/* relay_channels_mutex must be held, so wait. */
|
||||
err = smp_call_function_single(i,
|
||||
__relay_set_buf_dentry,
|
||||
&disp, 1);
|
||||
}
|
||||
if (unlikely(err))
|
||||
break;
|
||||
}
|
||||
put_cpu();
|
||||
mutex_unlock(&relay_channels_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* relay_switch_subbuf - switch to a new sub-buffer
|
||||
* @buf: channel buffer
|
||||
|
@ -627,8 +734,13 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
|
|||
old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
|
||||
buf->padding[old_subbuf] = buf->prev_padding;
|
||||
buf->subbufs_produced++;
|
||||
buf->dentry->d_inode->i_size += buf->chan->subbuf_size -
|
||||
buf->padding[old_subbuf];
|
||||
if (buf->dentry)
|
||||
buf->dentry->d_inode->i_size +=
|
||||
buf->chan->subbuf_size -
|
||||
buf->padding[old_subbuf];
|
||||
else
|
||||
buf->early_bytes += buf->chan->subbuf_size -
|
||||
buf->padding[old_subbuf];
|
||||
smp_mb();
|
||||
if (waitqueue_active(&buf->read_wait))
|
||||
/*
|
||||
|
@ -1237,4 +1349,4 @@ static __init int relay_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
module_init(relay_init);
|
||||
early_initcall(relay_init);
|
||||
|
|
|
@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
|||
/*
|
||||
* wait_task_inactive - wait for a thread to unschedule.
|
||||
*
|
||||
* If @match_state is nonzero, it's the @p->state value just checked and
|
||||
* not expected to change. If it changes, i.e. @p might have woken up,
|
||||
* then return zero. When we succeed in waiting for @p to be off its CPU,
|
||||
* we return a positive number (its total switch count). If a second call
|
||||
* a short while later returns the same number, the caller can be sure that
|
||||
* @p has remained unscheduled the whole time.
|
||||
*
|
||||
* The caller must ensure that the task *will* unschedule sometime soon,
|
||||
* else this function might spin for a *long* time. This function can't
|
||||
* be called with interrupts off, or it may introduce deadlock with
|
||||
* smp_call_function() if an IPI is sent by the same process we are
|
||||
* waiting to become inactive.
|
||||
*/
|
||||
void wait_task_inactive(struct task_struct *p)
|
||||
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
||||
{
|
||||
unsigned long flags;
|
||||
int running, on_rq;
|
||||
unsigned long ncsw;
|
||||
struct rq *rq;
|
||||
|
||||
for (;;) {
|
||||
|
@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
|
|||
* return false if the runqueue has changed and p
|
||||
* is actually now running somewhere else!
|
||||
*/
|
||||
while (task_running(rq, p))
|
||||
while (task_running(rq, p)) {
|
||||
if (match_state && unlikely(p->state != match_state))
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, time to look more closely! We need the rq
|
||||
|
@ -1910,8 +1921,20 @@ void wait_task_inactive(struct task_struct *p)
|
|||
rq = task_rq_lock(p, &flags);
|
||||
running = task_running(rq, p);
|
||||
on_rq = p->se.on_rq;
|
||||
ncsw = 0;
|
||||
if (!match_state || p->state == match_state) {
|
||||
ncsw = p->nivcsw + p->nvcsw;
|
||||
if (unlikely(!ncsw))
|
||||
ncsw = 1;
|
||||
}
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
/*
|
||||
* If it changed from the expected state, bail out now.
|
||||
*/
|
||||
if (unlikely(!ncsw))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Was it really running after all now that we
|
||||
* checked with the proper locks actually held?
|
||||
|
@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
|
|||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
return ncsw;
|
||||
}
|
||||
|
||||
/***
|
||||
|
@ -6389,7 +6414,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
|
|||
.priority = 10
|
||||
};
|
||||
|
||||
void __init migration_init(void)
|
||||
static int __init migration_init(void)
|
||||
{
|
||||
void *cpu = (void *)(long)smp_processor_id();
|
||||
int err;
|
||||
|
@ -6399,7 +6424,10 @@ void __init migration_init(void)
|
|||
BUG_ON(err == NOTIFY_BAD);
|
||||
migration_call(&migration_notifier, CPU_ONLINE, cpu);
|
||||
register_cpu_notifier(&migration_notifier);
|
||||
|
||||
return err;
|
||||
}
|
||||
early_initcall(migration_init);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/ptrace.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/signalfd.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
|
@ -39,24 +40,21 @@
|
|||
|
||||
static struct kmem_cache *sigqueue_cachep;
|
||||
|
||||
static int __sig_ignored(struct task_struct *t, int sig)
|
||||
static void __user *sig_handler(struct task_struct *t, int sig)
|
||||
{
|
||||
void __user *handler;
|
||||
return t->sighand->action[sig - 1].sa.sa_handler;
|
||||
}
|
||||
|
||||
static int sig_handler_ignored(void __user *handler, int sig)
|
||||
{
|
||||
/* Is it explicitly or implicitly ignored? */
|
||||
|
||||
handler = t->sighand->action[sig - 1].sa.sa_handler;
|
||||
return handler == SIG_IGN ||
|
||||
(handler == SIG_DFL && sig_kernel_ignore(sig));
|
||||
}
|
||||
|
||||
static int sig_ignored(struct task_struct *t, int sig)
|
||||
{
|
||||
/*
|
||||
* Tracers always want to know about signals..
|
||||
*/
|
||||
if (t->ptrace & PT_PTRACED)
|
||||
return 0;
|
||||
void __user *handler;
|
||||
|
||||
/*
|
||||
* Blocked signals are never ignored, since the
|
||||
|
@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig)
|
|||
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
|
||||
return 0;
|
||||
|
||||
return __sig_ignored(t, sig);
|
||||
handler = sig_handler(t, sig);
|
||||
if (!sig_handler_ignored(handler, sig))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Tracers may want to know about even ignored signals.
|
||||
*/
|
||||
return !tracehook_consider_ignored_signal(t, sig, handler);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
|
|||
|
||||
void recalc_sigpending(void)
|
||||
{
|
||||
if (!recalc_sigpending_tsk(current) && !freezing(current))
|
||||
if (unlikely(tracehook_force_sigpending()))
|
||||
set_thread_flag(TIF_SIGPENDING);
|
||||
else if (!recalc_sigpending_tsk(current) && !freezing(current))
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
|
||||
}
|
||||
|
@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default)
|
|||
|
||||
int unhandled_signal(struct task_struct *tsk, int sig)
|
||||
{
|
||||
void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
|
||||
if (is_global_init(tsk))
|
||||
return 1;
|
||||
if (tsk->ptrace & PT_PTRACED)
|
||||
if (handler != SIG_IGN && handler != SIG_DFL)
|
||||
return 0;
|
||||
return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
|
||||
(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
|
||||
return !tracehook_consider_fatal_signal(tsk, sig, handler);
|
||||
}
|
||||
|
||||
|
||||
|
@ -591,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info,
|
|||
return security_task_kill(t, info, sig, 0);
|
||||
}
|
||||
|
||||
/* forward decl */
|
||||
static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
|
||||
|
||||
/*
|
||||
* Handle magic process-wide effects of stop/continue signals. Unlike
|
||||
* the signal actions, these happen immediately at signal-generation
|
||||
|
@ -756,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group)
|
|||
if (sig_fatal(p, sig) &&
|
||||
!(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
|
||||
!sigismember(&t->real_blocked, sig) &&
|
||||
(sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
|
||||
(sig == SIGKILL ||
|
||||
!tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
|
||||
/*
|
||||
* This signal will be fatal to the whole group.
|
||||
*/
|
||||
|
@ -1323,9 +1328,11 @@ static inline void __wake_up_parent(struct task_struct *p,
|
|||
/*
|
||||
* Let a parent know about the death of a child.
|
||||
* For a stopped/continued status change, use do_notify_parent_cldstop instead.
|
||||
*
|
||||
* Returns -1 if our parent ignored us and so we've switched to
|
||||
* self-reaping, or else @sig.
|
||||
*/
|
||||
|
||||
void do_notify_parent(struct task_struct *tsk, int sig)
|
||||
int do_notify_parent(struct task_struct *tsk, int sig)
|
||||
{
|
||||
struct siginfo info;
|
||||
unsigned long flags;
|
||||
|
@ -1396,12 +1403,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
|
|||
*/
|
||||
tsk->exit_signal = -1;
|
||||
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
|
||||
sig = 0;
|
||||
sig = -1;
|
||||
}
|
||||
if (valid_signal(sig) && sig > 0)
|
||||
__group_send_sig_info(sig, &info, tsk->parent);
|
||||
__wake_up_parent(tsk, tsk->parent);
|
||||
spin_unlock_irqrestore(&psig->siglock, flags);
|
||||
|
||||
return sig;
|
||||
}
|
||||
|
||||
static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
|
||||
|
@ -1599,7 +1608,7 @@ finish_stop(int stop_count)
|
|||
* a group stop in progress and we are the last to stop,
|
||||
* report to the parent. When ptraced, every thread reports itself.
|
||||
*/
|
||||
if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
|
||||
if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
|
||||
read_lock(&tasklist_lock);
|
||||
do_notify_parent_cldstop(current, CLD_STOPPED);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
@ -1735,6 +1744,9 @@ relock:
|
|||
signal->flags &= ~SIGNAL_CLD_MASK;
|
||||
spin_unlock_irq(&sighand->siglock);
|
||||
|
||||
if (unlikely(!tracehook_notify_jctl(1, why)))
|
||||
goto relock;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_notify_parent_cldstop(current->group_leader, why);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
@ -1748,17 +1760,33 @@ relock:
|
|||
do_signal_stop(0))
|
||||
goto relock;
|
||||
|
||||
signr = dequeue_signal(current, ¤t->blocked, info);
|
||||
if (!signr)
|
||||
break; /* will return 0 */
|
||||
/*
|
||||
* Tracing can induce an artifical signal and choose sigaction.
|
||||
* The return value in @signr determines the default action,
|
||||
* but @info->si_signo is the signal number we will report.
|
||||
*/
|
||||
signr = tracehook_get_signal(current, regs, info, return_ka);
|
||||
if (unlikely(signr < 0))
|
||||
goto relock;
|
||||
if (unlikely(signr != 0))
|
||||
ka = return_ka;
|
||||
else {
|
||||
signr = dequeue_signal(current, ¤t->blocked,
|
||||
info);
|
||||
|
||||
if (signr != SIGKILL) {
|
||||
signr = ptrace_signal(signr, info, regs, cookie);
|
||||
if (!signr)
|
||||
continue;
|
||||
break; /* will return 0 */
|
||||
|
||||
if (signr != SIGKILL) {
|
||||
signr = ptrace_signal(signr, info,
|
||||
regs, cookie);
|
||||
if (!signr)
|
||||
continue;
|
||||
}
|
||||
|
||||
ka = &sighand->action[signr-1];
|
||||
}
|
||||
|
||||
ka = &sighand->action[signr-1];
|
||||
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
|
||||
continue;
|
||||
if (ka->sa.sa_handler != SIG_DFL) {
|
||||
|
@ -1806,7 +1834,7 @@ relock:
|
|||
spin_lock_irq(&sighand->siglock);
|
||||
}
|
||||
|
||||
if (likely(do_signal_stop(signr))) {
|
||||
if (likely(do_signal_stop(info->si_signo))) {
|
||||
/* It released the siglock. */
|
||||
goto relock;
|
||||
}
|
||||
|
@ -1827,7 +1855,7 @@ relock:
|
|||
|
||||
if (sig_kernel_coredump(signr)) {
|
||||
if (print_fatal_signals)
|
||||
print_fatal_signal(regs, signr);
|
||||
print_fatal_signal(regs, info->si_signo);
|
||||
/*
|
||||
* If it was able to dump core, this kills all
|
||||
* other threads in the group and synchronizes with
|
||||
|
@ -1836,13 +1864,13 @@ relock:
|
|||
* first and our do_group_exit call below will use
|
||||
* that value and ignore the one we pass it.
|
||||
*/
|
||||
do_coredump((long)signr, signr, regs);
|
||||
do_coredump(info->si_signo, info->si_signo, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Death signals, no core dump.
|
||||
*/
|
||||
do_group_exit(signr);
|
||||
do_group_exit(info->si_signo);
|
||||
/* NOTREACHED */
|
||||
}
|
||||
spin_unlock_irq(&sighand->siglock);
|
||||
|
@ -1884,7 +1912,7 @@ void exit_signals(struct task_struct *tsk)
|
|||
out:
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
|
||||
if (unlikely(group_stop)) {
|
||||
if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
|
||||
read_lock(&tasklist_lock);
|
||||
do_notify_parent_cldstop(tsk, CLD_STOPPED);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
@ -1895,7 +1923,6 @@ EXPORT_SYMBOL(recalc_sigpending);
|
|||
EXPORT_SYMBOL_GPL(dequeue_signal);
|
||||
EXPORT_SYMBOL(flush_signals);
|
||||
EXPORT_SYMBOL(force_sig);
|
||||
EXPORT_SYMBOL(ptrace_notify);
|
||||
EXPORT_SYMBOL(send_sig);
|
||||
EXPORT_SYMBOL(send_sig_info);
|
||||
EXPORT_SYMBOL(sigprocmask);
|
||||
|
@ -2299,7 +2326,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
|
|||
* (for example, SIGCHLD), shall cause the pending signal to
|
||||
* be discarded, whether or not it is blocked"
|
||||
*/
|
||||
if (__sig_ignored(t, sig)) {
|
||||
if (sig_handler_ignored(sig_handler(t, sig), sig)) {
|
||||
sigemptyset(&mask);
|
||||
sigaddset(&mask, sig);
|
||||
rm_from_queue_full(&mask, &t->signal->shared_pending);
|
||||
|
|
|
@ -33,7 +33,7 @@ struct call_single_queue {
|
|||
spinlock_t lock;
|
||||
};
|
||||
|
||||
void __cpuinit init_call_single_data(void)
|
||||
static int __cpuinit init_call_single_data(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void)
|
|||
spin_lock_init(&q->lock);
|
||||
INIT_LIST_HEAD(&q->list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_call_single_data);
|
||||
|
||||
static void csd_flag_wait(struct call_single_data *data)
|
||||
{
|
||||
|
|
|
@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
|
|||
.notifier_call = cpu_callback
|
||||
};
|
||||
|
||||
__init int spawn_ksoftirqd(void)
|
||||
static __init int spawn_ksoftirqd(void)
|
||||
{
|
||||
void *cpu = (void *)(long)smp_processor_id();
|
||||
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
|
||||
|
@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void)
|
|||
register_cpu_notifier(&cpu_nfb);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(spawn_ksoftirqd);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
|
|
|
@ -338,14 +338,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
|
|||
.notifier_call = cpu_callback
|
||||
};
|
||||
|
||||
__init void spawn_softlockup_task(void)
|
||||
static int __initdata nosoftlockup;
|
||||
|
||||
static int __init nosoftlockup_setup(char *str)
|
||||
{
|
||||
nosoftlockup = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("nosoftlockup", nosoftlockup_setup);
|
||||
|
||||
static int __init spawn_softlockup_task(void)
|
||||
{
|
||||
void *cpu = (void *)(long)smp_processor_id();
|
||||
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
|
||||
int err;
|
||||
|
||||
BUG_ON(err == NOTIFY_BAD);
|
||||
if (nosoftlockup)
|
||||
return 0;
|
||||
|
||||
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
|
||||
if (err == NOTIFY_BAD) {
|
||||
BUG();
|
||||
return 1;
|
||||
}
|
||||
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
|
||||
register_cpu_notifier(&cpu_nfb);
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(spawn_softlockup_task);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
|
||||
/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
|
||||
* GPL v2 and any later version.
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
|
@ -13,204 +13,178 @@
|
|||
#include <asm/atomic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* Since we effect priority and affinity (both of which are visible
|
||||
* to, and settable by outside processes) we do indirection via a
|
||||
* kthread. */
|
||||
|
||||
/* Thread to stop each CPU in user context. */
|
||||
/* This controls the threads on each CPU. */
|
||||
enum stopmachine_state {
|
||||
STOPMACHINE_WAIT,
|
||||
/* Dummy starting state for thread. */
|
||||
STOPMACHINE_NONE,
|
||||
/* Awaiting everyone to be scheduled. */
|
||||
STOPMACHINE_PREPARE,
|
||||
/* Disable interrupts. */
|
||||
STOPMACHINE_DISABLE_IRQ,
|
||||
/* Run the function */
|
||||
STOPMACHINE_RUN,
|
||||
/* Exit */
|
||||
STOPMACHINE_EXIT,
|
||||
};
|
||||
|
||||
static enum stopmachine_state stopmachine_state;
|
||||
static unsigned int stopmachine_num_threads;
|
||||
static atomic_t stopmachine_thread_ack;
|
||||
|
||||
static int stopmachine(void *cpu)
|
||||
{
|
||||
int irqs_disabled = 0;
|
||||
int prepared = 0;
|
||||
cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
|
||||
|
||||
set_cpus_allowed_ptr(current, cpumask);
|
||||
|
||||
/* Ack: we are alive */
|
||||
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
|
||||
/* Simple state machine */
|
||||
while (stopmachine_state != STOPMACHINE_EXIT) {
|
||||
if (stopmachine_state == STOPMACHINE_DISABLE_IRQ
|
||||
&& !irqs_disabled) {
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
irqs_disabled = 1;
|
||||
/* Ack: irqs disabled. */
|
||||
smp_mb(); /* Must read state first. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
} else if (stopmachine_state == STOPMACHINE_PREPARE
|
||||
&& !prepared) {
|
||||
/* Everyone is in place, hold CPU. */
|
||||
preempt_disable();
|
||||
prepared = 1;
|
||||
smp_mb(); /* Must read state first. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
}
|
||||
/* Yield in first stage: migration threads need to
|
||||
* help our sisters onto their CPUs. */
|
||||
if (!prepared && !irqs_disabled)
|
||||
yield();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Ack: we are exiting. */
|
||||
smp_mb(); /* Must read state first. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
|
||||
if (irqs_disabled)
|
||||
local_irq_enable();
|
||||
if (prepared)
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Change the thread state */
|
||||
static void stopmachine_set_state(enum stopmachine_state state)
|
||||
{
|
||||
atomic_set(&stopmachine_thread_ack, 0);
|
||||
smp_wmb();
|
||||
stopmachine_state = state;
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static int stop_machine(void)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
atomic_set(&stopmachine_thread_ack, 0);
|
||||
stopmachine_num_threads = 0;
|
||||
stopmachine_state = STOPMACHINE_WAIT;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (i == raw_smp_processor_id())
|
||||
continue;
|
||||
ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
stopmachine_num_threads++;
|
||||
}
|
||||
|
||||
/* Wait for them all to come to life. */
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
|
||||
yield();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* If some failed, kill them all. */
|
||||
if (ret < 0) {
|
||||
stopmachine_set_state(STOPMACHINE_EXIT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Now they are all started, make them hold the CPUs, ready. */
|
||||
preempt_disable();
|
||||
stopmachine_set_state(STOPMACHINE_PREPARE);
|
||||
|
||||
/* Make them disable irqs. */
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void restart_machine(void)
|
||||
{
|
||||
stopmachine_set_state(STOPMACHINE_EXIT);
|
||||
local_irq_enable();
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
static enum stopmachine_state state;
|
||||
|
||||
struct stop_machine_data {
|
||||
int (*fn)(void *);
|
||||
void *data;
|
||||
struct completion done;
|
||||
int fnret;
|
||||
};
|
||||
|
||||
static int do_stop(void *_smdata)
|
||||
/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
|
||||
static unsigned int num_threads;
|
||||
static atomic_t thread_ack;
|
||||
static struct completion finished;
|
||||
static DEFINE_MUTEX(lock);
|
||||
|
||||
static void set_state(enum stopmachine_state newstate)
|
||||
{
|
||||
struct stop_machine_data *smdata = _smdata;
|
||||
int ret;
|
||||
|
||||
ret = stop_machine();
|
||||
if (ret == 0) {
|
||||
ret = smdata->fn(smdata->data);
|
||||
restart_machine();
|
||||
}
|
||||
|
||||
/* We're done: you can kthread_stop us now */
|
||||
complete(&smdata->done);
|
||||
|
||||
/* Wait for kthread_stop */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
return ret;
|
||||
/* Reset ack counter. */
|
||||
atomic_set(&thread_ack, num_threads);
|
||||
smp_wmb();
|
||||
state = newstate;
|
||||
}
|
||||
|
||||
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
|
||||
unsigned int cpu)
|
||||
/* Last one to ack a state moves to the next state. */
|
||||
static void ack_state(void)
|
||||
{
|
||||
static DEFINE_MUTEX(stopmachine_mutex);
|
||||
struct stop_machine_data smdata;
|
||||
struct task_struct *p;
|
||||
if (atomic_dec_and_test(&thread_ack)) {
|
||||
/* If we're the last one to ack the EXIT, we're finished. */
|
||||
if (state == STOPMACHINE_EXIT)
|
||||
complete(&finished);
|
||||
else
|
||||
set_state(state + 1);
|
||||
}
|
||||
}
|
||||
|
||||
smdata.fn = fn;
|
||||
smdata.data = data;
|
||||
init_completion(&smdata.done);
|
||||
/* This is the actual thread which stops the CPU. It exits by itself rather
|
||||
* than waiting for kthread_stop(), because it's easier for hotplug CPU. */
|
||||
static int stop_cpu(struct stop_machine_data *smdata)
|
||||
{
|
||||
enum stopmachine_state curstate = STOPMACHINE_NONE;
|
||||
int uninitialized_var(ret);
|
||||
|
||||
mutex_lock(&stopmachine_mutex);
|
||||
/* Simple state machine */
|
||||
do {
|
||||
/* Chill out and ensure we re-read stopmachine_state. */
|
||||
cpu_relax();
|
||||
if (state != curstate) {
|
||||
curstate = state;
|
||||
switch (curstate) {
|
||||
case STOPMACHINE_DISABLE_IRQ:
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
break;
|
||||
case STOPMACHINE_RUN:
|
||||
/* |= allows error detection if functions on
|
||||
* multiple CPUs. */
|
||||
smdata->fnret |= smdata->fn(smdata->data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ack_state();
|
||||
}
|
||||
} while (curstate != STOPMACHINE_EXIT);
|
||||
|
||||
/* If they don't care which CPU fn runs on, bind to any online one. */
|
||||
if (cpu == NR_CPUS)
|
||||
cpu = raw_smp_processor_id();
|
||||
local_irq_enable();
|
||||
do_exit(0);
|
||||
}
|
||||
|
||||
p = kthread_create(do_stop, &smdata, "kstopmachine");
|
||||
if (!IS_ERR(p)) {
|
||||
/* Callback for CPUs which aren't supposed to do anything. */
|
||||
static int chill(void *unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
|
||||
{
|
||||
int i, err;
|
||||
struct stop_machine_data active, idle;
|
||||
struct task_struct **threads;
|
||||
|
||||
active.fn = fn;
|
||||
active.data = data;
|
||||
active.fnret = 0;
|
||||
idle.fn = chill;
|
||||
idle.data = NULL;
|
||||
|
||||
/* This could be too big for stack on large machines. */
|
||||
threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
|
||||
if (!threads)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set up initial state. */
|
||||
mutex_lock(&lock);
|
||||
init_completion(&finished);
|
||||
num_threads = num_online_cpus();
|
||||
set_state(STOPMACHINE_PREPARE);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct stop_machine_data *smdata = &idle;
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
||||
|
||||
/* One high-prio thread per cpu. We'll do this one. */
|
||||
sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
|
||||
kthread_bind(p, cpu);
|
||||
wake_up_process(p);
|
||||
wait_for_completion(&smdata.done);
|
||||
if (!cpus) {
|
||||
if (i == first_cpu(cpu_online_map))
|
||||
smdata = &active;
|
||||
} else {
|
||||
if (cpu_isset(i, *cpus))
|
||||
smdata = &active;
|
||||
}
|
||||
|
||||
threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
|
||||
i);
|
||||
if (IS_ERR(threads[i])) {
|
||||
err = PTR_ERR(threads[i]);
|
||||
threads[i] = NULL;
|
||||
goto kill_threads;
|
||||
}
|
||||
|
||||
/* Place it onto correct cpu. */
|
||||
kthread_bind(threads[i], i);
|
||||
|
||||
/* Make it highest prio. */
|
||||
if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m))
|
||||
BUG();
|
||||
}
|
||||
mutex_unlock(&stopmachine_mutex);
|
||||
return p;
|
||||
|
||||
/* We've created all the threads. Wake them all: hold this CPU so one
|
||||
* doesn't hit this CPU until we're ready. */
|
||||
get_cpu();
|
||||
for_each_online_cpu(i)
|
||||
wake_up_process(threads[i]);
|
||||
|
||||
/* This will release the thread on our CPU. */
|
||||
put_cpu();
|
||||
wait_for_completion(&finished);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
kfree(threads);
|
||||
|
||||
return active.fnret;
|
||||
|
||||
kill_threads:
|
||||
for_each_online_cpu(i)
|
||||
if (threads[i])
|
||||
kthread_stop(threads[i]);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
kfree(threads);
|
||||
return err;
|
||||
}
|
||||
|
||||
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
|
||||
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int ret;
|
||||
|
||||
/* No CPUs can come up or down during this. */
|
||||
get_online_cpus();
|
||||
p = __stop_machine_run(fn, data, cpu);
|
||||
if (!IS_ERR(p))
|
||||
ret = kthread_stop(p);
|
||||
else
|
||||
ret = PTR_ERR(p);
|
||||
ret = __stop_machine(fn, data, cpus);
|
||||
put_online_cpus();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stop_machine_run);
|
||||
EXPORT_SYMBOL_GPL(stop_machine);
|
||||
|
|
31
kernel/sys.c
31
kernel/sys.c
|
@ -301,26 +301,6 @@ void kernel_restart(char *cmd)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kernel_restart);
|
||||
|
||||
/**
|
||||
* kernel_kexec - reboot the system
|
||||
*
|
||||
* Move into place and start executing a preloaded standalone
|
||||
* executable. If nothing was preloaded return an error.
|
||||
*/
|
||||
static void kernel_kexec(void)
|
||||
{
|
||||
#ifdef CONFIG_KEXEC
|
||||
struct kimage *image;
|
||||
image = xchg(&kexec_image, NULL);
|
||||
if (!image)
|
||||
return;
|
||||
kernel_restart_prepare(NULL);
|
||||
printk(KERN_EMERG "Starting new kernel\n");
|
||||
machine_shutdown();
|
||||
machine_kexec(image);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void kernel_shutdown_prepare(enum system_states state)
|
||||
{
|
||||
blocking_notifier_call_chain(&reboot_notifier_list,
|
||||
|
@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
|
|||
kernel_restart(buffer);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
case LINUX_REBOOT_CMD_KEXEC:
|
||||
kernel_kexec();
|
||||
unlock_kernel();
|
||||
return -EINVAL;
|
||||
{
|
||||
int ret;
|
||||
ret = kernel_kexec();
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
case LINUX_REBOOT_CMD_SW_SUSPEND:
|
||||
|
|
168
kernel/sysctl.c
168
kernel/sysctl.c
|
@ -160,12 +160,13 @@ static struct ctl_table root_table[];
|
|||
static struct ctl_table_root sysctl_table_root;
|
||||
static struct ctl_table_header root_table_header = {
|
||||
.ctl_table = root_table,
|
||||
.ctl_entry = LIST_HEAD_INIT(sysctl_table_root.header_list),
|
||||
.ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
|
||||
.root = &sysctl_table_root,
|
||||
.set = &sysctl_table_root.default_set,
|
||||
};
|
||||
static struct ctl_table_root sysctl_table_root = {
|
||||
.root_list = LIST_HEAD_INIT(sysctl_table_root.root_list),
|
||||
.header_list = LIST_HEAD_INIT(root_table_header.ctl_entry),
|
||||
.default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry),
|
||||
};
|
||||
|
||||
static struct ctl_table kern_table[];
|
||||
|
@ -1386,6 +1387,9 @@ static void start_unregistering(struct ctl_table_header *p)
|
|||
spin_unlock(&sysctl_lock);
|
||||
wait_for_completion(&wait);
|
||||
spin_lock(&sysctl_lock);
|
||||
} else {
|
||||
/* anything non-NULL; we'll never dereference it */
|
||||
p->unregistering = ERR_PTR(-EINVAL);
|
||||
}
|
||||
/*
|
||||
* do not remove from the list until nobody holds it; walking the
|
||||
|
@ -1394,6 +1398,32 @@ static void start_unregistering(struct ctl_table_header *p)
|
|||
list_del_init(&p->ctl_entry);
|
||||
}
|
||||
|
||||
void sysctl_head_get(struct ctl_table_header *head)
|
||||
{
|
||||
spin_lock(&sysctl_lock);
|
||||
head->count++;
|
||||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
|
||||
void sysctl_head_put(struct ctl_table_header *head)
|
||||
{
|
||||
spin_lock(&sysctl_lock);
|
||||
if (!--head->count)
|
||||
kfree(head);
|
||||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
|
||||
struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
|
||||
{
|
||||
if (!head)
|
||||
BUG();
|
||||
spin_lock(&sysctl_lock);
|
||||
if (!use_table(head))
|
||||
head = ERR_PTR(-ENOENT);
|
||||
spin_unlock(&sysctl_lock);
|
||||
return head;
|
||||
}
|
||||
|
||||
void sysctl_head_finish(struct ctl_table_header *head)
|
||||
{
|
||||
if (!head)
|
||||
|
@ -1403,14 +1433,20 @@ void sysctl_head_finish(struct ctl_table_header *head)
|
|||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
|
||||
static struct ctl_table_set *
|
||||
lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces)
|
||||
{
|
||||
struct ctl_table_set *set = &root->default_set;
|
||||
if (root->lookup)
|
||||
set = root->lookup(root, namespaces);
|
||||
return set;
|
||||
}
|
||||
|
||||
static struct list_head *
|
||||
lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces)
|
||||
{
|
||||
struct list_head *header_list;
|
||||
header_list = &root->header_list;
|
||||
if (root->lookup)
|
||||
header_list = root->lookup(root, namespaces);
|
||||
return header_list;
|
||||
struct ctl_table_set *set = lookup_header_set(root, namespaces);
|
||||
return &set->list;
|
||||
}
|
||||
|
||||
struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
|
||||
|
@ -1480,9 +1516,9 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
|
|||
int op = 0, rc;
|
||||
|
||||
if (oldval)
|
||||
op |= 004;
|
||||
op |= MAY_READ;
|
||||
if (newval)
|
||||
op |= 002;
|
||||
op |= MAY_WRITE;
|
||||
if (sysctl_perm(root, table, op))
|
||||
return -EPERM;
|
||||
|
||||
|
@ -1524,7 +1560,7 @@ repeat:
|
|||
if (n == table->ctl_name) {
|
||||
int error;
|
||||
if (table->child) {
|
||||
if (sysctl_perm(root, table, 001))
|
||||
if (sysctl_perm(root, table, MAY_EXEC))
|
||||
return -EPERM;
|
||||
name++;
|
||||
nlen--;
|
||||
|
@ -1599,7 +1635,7 @@ static int test_perm(int mode, int op)
|
|||
mode >>= 6;
|
||||
else if (in_egroup_p(0))
|
||||
mode >>= 3;
|
||||
if ((mode & op & 0007) == op)
|
||||
if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0)
|
||||
return 0;
|
||||
return -EACCES;
|
||||
}
|
||||
|
@ -1609,7 +1645,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
|
|||
int error;
|
||||
int mode;
|
||||
|
||||
error = security_sysctl(table, op);
|
||||
error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -1644,6 +1680,54 @@ static __init int sysctl_init(void)
|
|||
|
||||
core_initcall(sysctl_init);
|
||||
|
||||
static struct ctl_table *is_branch_in(struct ctl_table *branch,
|
||||
struct ctl_table *table)
|
||||
{
|
||||
struct ctl_table *p;
|
||||
const char *s = branch->procname;
|
||||
|
||||
/* branch should have named subdirectory as its first element */
|
||||
if (!s || !branch->child)
|
||||
return NULL;
|
||||
|
||||
/* ... and nothing else */
|
||||
if (branch[1].procname || branch[1].ctl_name)
|
||||
return NULL;
|
||||
|
||||
/* table should contain subdirectory with the same name */
|
||||
for (p = table; p->procname || p->ctl_name; p++) {
|
||||
if (!p->child)
|
||||
continue;
|
||||
if (p->procname && strcmp(p->procname, s) == 0)
|
||||
return p;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* see if attaching q to p would be an improvement */
|
||||
static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
|
||||
{
|
||||
struct ctl_table *to = p->ctl_table, *by = q->ctl_table;
|
||||
struct ctl_table *next;
|
||||
int is_better = 0;
|
||||
int not_in_parent = !p->attached_by;
|
||||
|
||||
while ((next = is_branch_in(by, to)) != NULL) {
|
||||
if (by == q->attached_by)
|
||||
is_better = 1;
|
||||
if (to == p->attached_by)
|
||||
not_in_parent = 1;
|
||||
by = by->child;
|
||||
to = next->child;
|
||||
}
|
||||
|
||||
if (is_better && not_in_parent) {
|
||||
q->attached_by = by;
|
||||
q->attached_to = to;
|
||||
q->parent = p;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __register_sysctl_paths - register a sysctl hierarchy
|
||||
* @root: List of sysctl headers to register on
|
||||
|
@ -1720,10 +1804,10 @@ struct ctl_table_header *__register_sysctl_paths(
|
|||
struct nsproxy *namespaces,
|
||||
const struct ctl_path *path, struct ctl_table *table)
|
||||
{
|
||||
struct list_head *header_list;
|
||||
struct ctl_table_header *header;
|
||||
struct ctl_table *new, **prevp;
|
||||
unsigned int n, npath;
|
||||
struct ctl_table_set *set;
|
||||
|
||||
/* Count the path components */
|
||||
for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath)
|
||||
|
@ -1765,6 +1849,7 @@ struct ctl_table_header *__register_sysctl_paths(
|
|||
header->unregistering = NULL;
|
||||
header->root = root;
|
||||
sysctl_set_parent(NULL, header->ctl_table);
|
||||
header->count = 1;
|
||||
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
|
||||
if (sysctl_check_table(namespaces, header->ctl_table)) {
|
||||
kfree(header);
|
||||
|
@ -1772,8 +1857,20 @@ struct ctl_table_header *__register_sysctl_paths(
|
|||
}
|
||||
#endif
|
||||
spin_lock(&sysctl_lock);
|
||||
header_list = lookup_header_list(root, namespaces);
|
||||
list_add_tail(&header->ctl_entry, header_list);
|
||||
header->set = lookup_header_set(root, namespaces);
|
||||
header->attached_by = header->ctl_table;
|
||||
header->attached_to = root_table;
|
||||
header->parent = &root_table_header;
|
||||
for (set = header->set; set; set = set->parent) {
|
||||
struct ctl_table_header *p;
|
||||
list_for_each_entry(p, &set->list, ctl_entry) {
|
||||
if (p->unregistering)
|
||||
continue;
|
||||
try_attach(p, header);
|
||||
}
|
||||
}
|
||||
header->parent->count++;
|
||||
list_add_tail(&header->ctl_entry, &header->set->list);
|
||||
spin_unlock(&sysctl_lock);
|
||||
|
||||
return header;
|
||||
|
@ -1828,8 +1925,37 @@ void unregister_sysctl_table(struct ctl_table_header * header)
|
|||
|
||||
spin_lock(&sysctl_lock);
|
||||
start_unregistering(header);
|
||||
if (!--header->parent->count) {
|
||||
WARN_ON(1);
|
||||
kfree(header->parent);
|
||||
}
|
||||
if (!--header->count)
|
||||
kfree(header);
|
||||
spin_unlock(&sysctl_lock);
|
||||
kfree(header);
|
||||
}
|
||||
|
||||
int sysctl_is_seen(struct ctl_table_header *p)
|
||||
{
|
||||
struct ctl_table_set *set = p->set;
|
||||
int res;
|
||||
spin_lock(&sysctl_lock);
|
||||
if (p->unregistering)
|
||||
res = 0;
|
||||
else if (!set->is_seen)
|
||||
res = 1;
|
||||
else
|
||||
res = set->is_seen(set);
|
||||
spin_unlock(&sysctl_lock);
|
||||
return res;
|
||||
}
|
||||
|
||||
void setup_sysctl_set(struct ctl_table_set *p,
|
||||
struct ctl_table_set *parent,
|
||||
int (*is_seen)(struct ctl_table_set *))
|
||||
{
|
||||
INIT_LIST_HEAD(&p->list);
|
||||
p->parent = parent ? parent : &sysctl_table_root.default_set;
|
||||
p->is_seen = is_seen;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SYSCTL */
|
||||
|
@ -1848,6 +1974,16 @@ void unregister_sysctl_table(struct ctl_table_header * table)
|
|||
{
|
||||
}
|
||||
|
||||
void setup_sysctl_set(struct ctl_table_set *p,
|
||||
struct ctl_table_set *parent,
|
||||
int (*is_seen)(struct ctl_table_set *))
|
||||
{
|
||||
}
|
||||
|
||||
void sysctl_head_put(struct ctl_table_header *head)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
/*
|
||||
|
|
|
@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
|||
struct tick_device *td;
|
||||
int cpu, ret = NOTIFY_OK;
|
||||
unsigned long flags;
|
||||
cpumask_of_cpu_ptr_declare(cpumask);
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpumask_of_cpu_ptr_next(cpumask, cpu);
|
||||
if (!cpu_isset(cpu, newdev->cpumask))
|
||||
goto out_bc;
|
||||
|
||||
|
@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
|||
curdev = td->evtdev;
|
||||
|
||||
/* cpu local device ? */
|
||||
if (!cpus_equal(newdev->cpumask, *cpumask)) {
|
||||
if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
|
||||
|
||||
/*
|
||||
* If the cpu affinity of the device interrupt can not
|
||||
|
@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
|||
* If we have a cpu local device already, do not replace it
|
||||
* by a non cpu local device
|
||||
*/
|
||||
if (curdev && cpus_equal(curdev->cpumask, *cpumask))
|
||||
if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
|
||||
goto out_bc;
|
||||
}
|
||||
|
||||
|
@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
|||
curdev = NULL;
|
||||
}
|
||||
clockevents_exchange_device(curdev, newdev);
|
||||
tick_setup_device(td, newdev, cpu, cpumask);
|
||||
tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
|
||||
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
|
||||
tick_oneshot_notify();
|
||||
|
||||
|
|
|
@ -587,7 +587,7 @@ static int __ftrace_modify_code(void *data)
|
|||
|
||||
static void ftrace_run_update_code(int command)
|
||||
{
|
||||
stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
|
||||
stop_machine(__ftrace_modify_code, &command, NULL);
|
||||
}
|
||||
|
||||
void ftrace_disable_daemon(void)
|
||||
|
@ -787,7 +787,7 @@ static int ftrace_update_code(void)
|
|||
!ftrace_enabled || !ftraced_trigger)
|
||||
return 0;
|
||||
|
||||
stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
|
||||
stop_machine(__ftrace_update_code, NULL, NULL);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -1564,7 +1564,7 @@ static int __init ftrace_dynamic_init(void)
|
|||
|
||||
addr = (unsigned long)ftrace_record_ip;
|
||||
|
||||
stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
|
||||
stop_machine(ftrace_dyn_arch_init, &addr, NULL);
|
||||
|
||||
/* ftrace_dyn_arch_init places the return code in addr */
|
||||
if (addr) {
|
||||
|
|
|
@ -1183,7 +1183,6 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
|
|||
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct trace_iterator *iter = m->private;
|
||||
void *last_ent = iter->ent;
|
||||
int i = (int)*pos;
|
||||
void *ent;
|
||||
|
||||
|
@ -1203,9 +1202,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
|||
|
||||
iter->pos = *pos;
|
||||
|
||||
if (last_ent && !ent)
|
||||
seq_puts(m, "\n\nvim:ft=help\n");
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
|
|
@ -253,12 +253,14 @@ void start_critical_timings(void)
|
|||
if (preempt_trace() || irq_trace())
|
||||
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(start_critical_timings);
|
||||
|
||||
void stop_critical_timings(void)
|
||||
{
|
||||
if (preempt_trace() || irq_trace())
|
||||
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stop_critical_timings);
|
||||
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
|
@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|||
#ifdef CONFIG_PREEMPT_TRACER
|
||||
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
||||
{
|
||||
stop_critical_timing(a0, a1);
|
||||
if (preempt_trace())
|
||||
stop_critical_timing(a0, a1);
|
||||
}
|
||||
|
||||
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
||||
{
|
||||
start_critical_timing(a0, a1);
|
||||
if (preempt_trace())
|
||||
start_critical_timing(a0, a1);
|
||||
}
|
||||
#endif /* CONFIG_PREEMPT_TRACER */
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
|
|||
static int wakeup_cpu;
|
||||
static unsigned wakeup_prio = -1;
|
||||
|
||||
static DEFINE_SPINLOCK(wakeup_lock);
|
||||
static raw_spinlock_t wakeup_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static void __wakeup_reset(struct trace_array *tr);
|
||||
|
||||
|
@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
|||
if (unlikely(disabled != 1))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&wakeup_lock, flags);
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
|
||||
if (unlikely(!wakeup_task))
|
||||
goto unlock;
|
||||
|
@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
|||
trace_function(tr, data, ip, parent_ip, flags);
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&wakeup_lock, flags);
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
out:
|
||||
atomic_dec(&data->disabled);
|
||||
|
@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
|
|||
if (likely(disabled != 1))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&wakeup_lock, flags);
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
|
||||
/* We could race with grabbing wakeup_lock */
|
||||
if (unlikely(!tracer_enabled || next != wakeup_task))
|
||||
|
@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
|
|||
|
||||
out_unlock:
|
||||
__wakeup_reset(tr);
|
||||
spin_unlock_irqrestore(&wakeup_lock, flags);
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
local_irq_restore(flags);
|
||||
out:
|
||||
atomic_dec(&tr->data[cpu]->disabled);
|
||||
}
|
||||
|
@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
|
|||
struct trace_array_cpu *data;
|
||||
int cpu;
|
||||
|
||||
assert_spin_locked(&wakeup_lock);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
data = tr->data[cpu];
|
||||
tracing_reset(data);
|
||||
|
@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&wakeup_lock, flags);
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
__wakeup_reset(tr);
|
||||
spin_unlock_irqrestore(&wakeup_lock, flags);
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
|
|||
goto out;
|
||||
|
||||
/* interrupts should be off from try_to_wake_up */
|
||||
spin_lock(&wakeup_lock);
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
|
||||
/* check for races. */
|
||||
if (!tracer_enabled || p->prio >= wakeup_prio)
|
||||
|
@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
|
|||
CALLER_ADDR1, CALLER_ADDR2, flags);
|
||||
|
||||
out_locked:
|
||||
spin_unlock(&wakeup_lock);
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
out:
|
||||
atomic_dec(&tr->data[cpu]->disabled);
|
||||
}
|
||||
|
|
|
@ -213,9 +213,7 @@ static void start_stack_timers(void)
|
|||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
cpumask_of_cpu_ptr(new_mask, cpu);
|
||||
|
||||
set_cpus_allowed_ptr(current, new_mask);
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
|
||||
start_stack_timer(cpu);
|
||||
}
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
|
|
|
@ -94,10 +94,10 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
|
|||
stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
|
||||
mmput(mm);
|
||||
}
|
||||
stats->read_char = p->rchar;
|
||||
stats->write_char = p->wchar;
|
||||
stats->read_syscalls = p->syscr;
|
||||
stats->write_syscalls = p->syscw;
|
||||
stats->read_char = p->ioac.rchar;
|
||||
stats->write_char = p->ioac.wchar;
|
||||
stats->read_syscalls = p->ioac.syscr;
|
||||
stats->write_syscalls = p->ioac.syscw;
|
||||
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
||||
stats->read_bytes = p->ioac.read_bytes;
|
||||
stats->write_bytes = p->ioac.write_bytes;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue