Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/cadence/macb.c

Overlapping changes in macb driver, mostly fixes and cleanups
in 'net' overlapping with the integration of at91_ether into
macb in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-03-09 23:38:02 -04:00
commit 3cef5c5b0b
307 changed files with 3138 additions and 1730 deletions

View file

@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
if (cp == root_cs)
continue;
/* skip the whole subtree if @cp doesn't have any CPU */
if (cpumask_empty(cp->cpus_allowed)) {
pos_css = css_rightmost_descendant(pos_css);
@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some CPUs.
*/
if (cpumask_empty(new_cpus))
if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
cpumask_copy(new_cpus, parent->effective_cpus);
/* Skip the whole subtree if the cpumask remains the same. */
@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
if (nodes_empty(*new_mems))
if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);

View file

@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
* otherwise we'll have trouble later trying to figure out
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*
* Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
* it cannot be set along with IRQF_NO_SUSPEND.
*/
if ((irqflags & IRQF_SHARED) && !dev_id)
if (((irqflags & IRQF_SHARED) && !dev_id) ||
(!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
return -EINVAL;
desc = irq_to_desc(irq);

View file

@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
if (action->flags & IRQF_NO_SUSPEND)
desc->no_suspend_depth++;
else if (action->flags & IRQF_COND_SUSPEND)
desc->cond_suspend_depth++;
WARN_ON_ONCE(desc->no_suspend_depth &&
desc->no_suspend_depth != desc->nr_actions);
(desc->no_suspend_depth +
desc->cond_suspend_depth) != desc->nr_actions);
}
/*
@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
if (action->flags & IRQF_NO_SUSPEND)
desc->no_suspend_depth--;
else if (action->flags & IRQF_COND_SUSPEND)
desc->cond_suspend_depth--;
}
static bool suspend_device_irq(struct irq_desc *desc, int irq)

View file

@ -248,11 +248,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name,
/* first, check if it's an exported symbol */
preempt_disable();
sym = find_symbol(name, NULL, NULL, true, true);
preempt_enable();
if (sym) {
*addr = sym->value;
preempt_enable();
return 0;
}
preempt_enable();
/* otherwise check if it's in another .o within the patch module */
return klp_find_object_symbol(pmod->name, name, addr);

View file

@ -2313,11 +2313,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
mod->core_size += strtab_size;
mod->core_size = debug_align(mod->core_size);
/* Put string table section at end of init part of module. */
strsect->sh_flags |= SHF_ALLOC;
strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
info->index.str) | INIT_OFFSET_MASK;
mod->init_size = debug_align(mod->init_size);
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
}

View file

@ -3,7 +3,7 @@
struct console_cmdline
{
char name[8]; /* Name of the driver */
char name[16]; /* Name of the driver */
int index; /* Minor dev. to use */
char *options; /* Options for the driver */
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE

View file

@ -2464,6 +2464,7 @@ void register_console(struct console *newcon)
for (i = 0, c = console_cmdline;
i < MAX_CMDLINECONSOLES && c->name[0];
i++, c++) {
BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
if (strcmp(c->name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&

View file

@ -82,6 +82,7 @@ static void cpuidle_idle_call(void)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
unsigned int broadcast;
bool reflect;
/*
* Check if the idle task must be rescheduled. If it is the
@ -105,6 +106,9 @@ static void cpuidle_idle_call(void)
*/
rcu_idle_enter();
if (cpuidle_not_available(drv, dev))
goto use_default;
/*
* Suspend-to-idle ("freeze") is a system state in which all user space
* has been frozen, all I/O devices have been suspended and the only
@ -115,30 +119,24 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens.
*/
if (idle_should_freeze()) {
cpuidle_enter_freeze();
local_irq_enable();
goto exit_idle;
}
/*
* Ask the cpuidle framework to choose a convenient idle state.
* Fall back to the default arch idle method on errors.
*/
next_state = cpuidle_select(drv, dev);
if (next_state < 0) {
use_default:
/*
* We can't use the cpuidle framework, let's use the default
* idle routine.
*/
if (current_clr_polling_and_test())
entered_state = cpuidle_enter_freeze(drv, dev);
if (entered_state >= 0) {
local_irq_enable();
else
arch_cpu_idle();
goto exit_idle;
}
goto exit_idle;
reflect = false;
next_state = cpuidle_find_deepest_state(drv, dev);
} else {
reflect = true;
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev);
}
/* Fall back to the default arch idle method on errors. */
if (next_state < 0)
goto use_default;
/*
* The idle task must be scheduled, it is pointless to
@ -183,7 +181,8 @@ use_default:
/*
* Give the governor an opportunity to reflect on the outcome
*/
cpuidle_reflect(dev, entered_state);
if (reflect)
cpuidle_reflect(dev, entered_state);
exit_idle:
__current_set_polling();
@ -196,6 +195,19 @@ exit_idle:
rcu_idle_exit();
start_critical_timings();
return;
use_default:
/*
* We can't use the cpuidle framework, let's use the default
* idle routine.
*/
if (current_clr_polling_and_test())
local_irq_enable();
else
arch_cpu_idle();
goto exit_idle;
}
/*

View file

@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
#else
# define ftrace_graph_active 0
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
static struct ftrace_ops *removed_ops;
@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
if (!ftrace_rec_count(rec))
rec->flags = 0;
else
/* Just disable the record (keep REGS state) */
rec->flags &= ~FTRACE_FL_ENABLED;
/*
* Just disable the record, but keep the ops TRAMP
* and REGS states. The _EN flags must be disabled though.
*/
rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
FTRACE_FL_REGS_EN);
}
return FTRACE_UPDATE_MAKE_NOP;
@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
static void ftrace_startup_sysctl(void)
{
int command;
if (unlikely(ftrace_disabled))
return;
/* Force update next time */
saved_ftrace_func = NULL;
/* ftrace_start_up is true if we want ftrace running */
if (ftrace_start_up)
ftrace_run_update_code(FTRACE_UPDATE_CALLS);
if (ftrace_start_up) {
command = FTRACE_UPDATE_CALLS;
if (ftrace_graph_active)
command |= FTRACE_START_FUNC_RET;
ftrace_startup_enable(command);
}
}
static void ftrace_shutdown_sysctl(void)
{
int command;
if (unlikely(ftrace_disabled))
return;
/* ftrace_start_up is true if ftrace is running */
if (ftrace_start_up)
ftrace_run_update_code(FTRACE_DISABLE_CALLS);
if (ftrace_start_up) {
command = FTRACE_DISABLE_CALLS;
if (ftrace_graph_active)
command |= FTRACE_STOP_FUNC_RET;
ftrace_run_update_code(command);
}
}
static cycle_t ftrace_update_time;
@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
if (ftrace_enabled) {
ftrace_startup_sysctl();
/* we are starting ftrace again */
if (ftrace_ops_list != &ftrace_list_end)
update_ftrace_function();
ftrace_startup_sysctl();
} else {
/* stopping ftrace calls (just send to ftrace_stub) */
ftrace_trace_function = ftrace_stub;
@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = {
ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
};
static int ftrace_graph_active;
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;

View file

@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(flush_work);
struct cwt_wait {
wait_queue_t wait;
struct work_struct *work;
};
static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
if (cwait->work != key)
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
/*
* If someone else is canceling, wait for the same event it
* would be waiting for before retrying.
* If someone else is already canceling, wait for it to
* finish. flush_work() doesn't work for PREEMPT_NONE
* because we may get scheduled between @work's completion
* and the other canceling task resuming and clearing
* CANCELING - flush_work() will return false immediately
* as @work is no longer busy, try_to_grab_pending() will
* return -ENOENT as @work is still being canceled and the
* other canceling task won't be able to clear CANCELING as
* we're hogging the CPU.
*
* Let's wait for completion using a waitqueue. As this
* may lead to the thundering herd problem, use a custom
* wake function which matches @work along with exclusive
* wait and wakeup.
*/
if (unlikely(ret == -ENOENT))
flush_work(work);
if (unlikely(ret == -ENOENT)) {
struct cwt_wait cwait;
init_wait(&cwait.wait);
cwait.wait.func = cwt_wakefn;
cwait.work = work;
prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
TASK_UNINTERRUPTIBLE);
if (work_is_canceling(work))
schedule();
finish_wait(&cancel_waitq, &cwait.wait);
}
} while (unlikely(ret < 0));
/* tell other tasks trying to grab @work to back off */
@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
flush_work(work);
clear_work_data(work);
/*
* Paired with prepare_to_wait() above so that either
* waitqueue_active() is visible here or !work_is_canceling() is
* visible there.
*/
smp_mb();
if (waitqueue_active(&cancel_waitq))
__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
return ret;
}