Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/rocker/rocker.c

The rocker commit was two overlapping changes, one to rename
the ->vport member to ->pport, and another making the bitmask
expression use '1ULL' instead of plain '1'.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-03-03 21:16:48 -05:00
commit 71a83a6db6
1279 changed files with 49656 additions and 20344 deletions

View file

@ -604,7 +604,7 @@ return_normal:
online_cpus)
cpu_relax();
if (!time_left)
pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
pr_crit("Timed out waiting for secondary CPUs.\n");
/*
* At this point the primary processor is completely
@ -696,6 +696,14 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
if (arch_kgdb_ops.enable_nmi)
arch_kgdb_ops.enable_nmi(0);
/*
* Avoid entering the debugger if we were triggered due to an oops
* but panic_timeout indicates the system should automatically
* reboot on panic. We don't want to get stuck waiting for input
* on such systems, especially if its "just" an oops.
*/
if (signo != SIGTRAP && panic_timeout)
return 1;
memset(ks, 0, sizeof(struct kgdb_state));
ks->cpu = raw_smp_processor_id();
@ -828,6 +836,15 @@ static int kgdb_panic_event(struct notifier_block *self,
unsigned long val,
void *data)
{
/*
* Avoid entering the debugger if we were triggered due to a panic
* We don't want to get stuck waiting for input from user in such case.
* panic_timeout indicates the system should automatically
* reboot on panic.
*/
if (panic_timeout)
return NOTIFY_DONE;
if (dbg_kdb_mode)
kdb_printf("PANIC: %s\n", (char *)data);
kgdb_breakpoint();

View file

@ -439,7 +439,7 @@ poll_again:
* substituted for %d, %x or %o in the prompt.
*/
char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
{
if (prompt && kdb_prompt_str != prompt)
strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor)
return 0;
}
int vkdb_printf(const char *fmt, va_list ap)
int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
{
int diag;
int linecount;
@ -680,6 +680,12 @@ int vkdb_printf(const char *fmt, va_list ap)
size_avail = sizeof(kdb_buffer) - len;
goto kdb_print_out;
}
if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
/*
* This was a interactive search (using '/' at more
* prompt) and it has completed. Clear the flag.
*/
kdb_grepping_flag = 0;
/*
* at this point the string is a full line and
* should be printed, up to the null.
@ -691,19 +697,20 @@ kdb_printit:
* Write to all consoles.
*/
retlen = strlen(kdb_buffer);
cp = (char *) printk_skip_level(kdb_buffer);
if (!dbg_kdb_mode && kgdb_connected) {
gdbstub_msg_write(kdb_buffer, retlen);
gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
} else {
if (dbg_io_ops && !dbg_io_ops->is_console) {
len = retlen;
cp = kdb_buffer;
len = retlen - (cp - kdb_buffer);
cp2 = cp;
while (len--) {
dbg_io_ops->write_char(*cp);
cp++;
dbg_io_ops->write_char(*cp2);
cp2++;
}
}
while (c) {
c->write(c, kdb_buffer, retlen);
c->write(c, cp, retlen - (cp - kdb_buffer));
touch_nmi_watchdog();
c = c->next;
}
@ -711,7 +718,10 @@ kdb_printit:
if (logging) {
saved_loglevel = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_SILENT;
printk(KERN_INFO "%s", kdb_buffer);
if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
printk("%s", kdb_buffer);
else
pr_info("%s", kdb_buffer);
}
if (KDB_STATE(PAGER)) {
@ -794,11 +804,23 @@ kdb_printit:
kdb_nextline = linecount - 1;
kdb_printf("\r");
suspend_grep = 1; /* for this recursion */
} else if (buf1[0] == '/' && !kdb_grepping_flag) {
kdb_printf("\r");
kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
kdbgetenv("SEARCHPROMPT") ?: "search> ");
*strchrnul(kdb_grep_string, '\n') = '\0';
kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
suspend_grep = 1; /* for this recursion */
} else if (buf1[0] && buf1[0] != '\n') {
/* user hit something other than enter */
suspend_grep = 1; /* for this recursion */
kdb_printf("\nOnly 'q' or 'Q' are processed at more "
"prompt, input ignored\n");
if (buf1[0] != '/')
kdb_printf(
"\nOnly 'q', 'Q' or '/' are processed at "
"more prompt, input ignored\n");
else
kdb_printf("\n'/' cannot be used during | "
"grep filtering, input ignored\n");
} else if (kdb_grepping_flag) {
/* user hit enter */
suspend_grep = 1; /* for this recursion */
@ -844,7 +866,7 @@ int kdb_printf(const char *fmt, ...)
int r;
va_start(ap, fmt);
r = vkdb_printf(fmt, ap);
r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
va_end(ap);
return r;

View file

@ -50,8 +50,7 @@
static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
char kdb_grep_string[KDB_GREP_STRLEN];
int kdb_grepping_flag;
EXPORT_SYMBOL(kdb_grepping_flag);
int kdb_grep_leading;
@ -870,7 +869,7 @@ static void parse_grep(const char *str)
len = strlen(cp);
if (!len)
return;
if (len >= GREP_LEN) {
if (len >= KDB_GREP_STRLEN) {
kdb_printf("search string too long\n");
return;
}
@ -915,13 +914,12 @@ int kdb_parse(const char *cmdstr)
char *cp;
char *cpp, quoted;
kdbtab_t *tp;
int i, escaped, ignore_errors = 0, check_grep;
int i, escaped, ignore_errors = 0, check_grep = 0;
/*
* First tokenize the command string.
*/
cp = (char *)cmdstr;
kdb_grepping_flag = check_grep = 0;
if (KDB_FLAG(CMD_INTERRUPT)) {
/* Previous command was interrupted, newline must not
@ -1247,7 +1245,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_printf("due to NonMaskable Interrupt @ "
kdb_machreg_fmt "\n",
instruction_pointer(regs));
kdb_dumpregs(regs);
break;
case KDB_REASON_SSTEP:
case KDB_REASON_BREAK:
@ -1281,6 +1278,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*/
kdb_nextline = 1;
KDB_STATE_CLEAR(SUPPRESS);
kdb_grepping_flag = 0;
/* ensure the old search does not leak into '/' commands */
kdb_grep_string[0] = '\0';
cmdbuf = cmd_cur;
*cmdbuf = '\0';
@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
/*
* Validate cpunum
*/
if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv)
#define K(x) ((x) << (PAGE_SHIFT - 10))
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
"Buffers: %8lu kB\n",
val.totalram, val.freeram, val.bufferram);
K(val.totalram), K(val.freeram), K(val.bufferram));
return 0;
}

View file

@ -196,7 +196,9 @@ extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
/* Miscellaneous functions and data areas */
extern int kdb_grepping_flag;
#define KDB_GREPPING_FLAG_SEARCH 0x8000
extern char kdb_grep_string[];
#define KDB_GREP_STRLEN 256
extern int kdb_grep_leading;
extern int kdb_grep_trailing;
extern char *kdb_cmds[];
@ -209,7 +211,7 @@ extern void kdb_ps1(const struct task_struct *p);
extern void kdb_print_nameval(const char *name, unsigned long val);
extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
extern void kdb_meminfo_proc_show(void);
extern char *kdb_getstr(char *, size_t, char *);
extern char *kdb_getstr(char *, size_t, const char *);
extern void kdb_gdb_state_pass(char *buf);
/* Defines for kdb_symbol_print */

View file

@ -1,33 +1,7 @@
ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
# if-lt
# Usage VAR := $(call if-lt, $(a), $(b))
# Returns 1 if (a < b)
if-lt = $(shell [ $(1) -lt $(2) ] && echo 1)
ifeq ($(CONFIG_GCOV_FORMAT_3_4),y)
cc-ver := 0304
else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
cc-ver := 0407
else
# Use cc-version if available, otherwise set 0
#
# scripts/Kbuild.include, which contains cc-version function, is not included
# during make clean "make -f scripts/Makefile.clean obj=kernel/gcov"
# Meaning cc-ver is empty causing if-lt test to fail with
# "/bin/sh: line 0: [: -lt: unary operator expected" error mesage.
# This has no affect on the clean phase, but the error message could be
# confusing/annoying. So this dummy workaround sets cc-ver to zero if cc-version
# is not available. We can probably move if-lt to Kbuild.include, so it's also
# not defined during clean or to include Kbuild.include in
# scripts/Makefile.clean. But the following workaround seems least invasive.
cc-ver := $(if $(call cc-version),$(call cc-version),0)
endif
obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o
ifeq ($(call if-lt, $(cc-ver), 0407),1)
obj-$(CONFIG_GCOV_KERNEL) += gcc_3_4.o
else
obj-$(CONFIG_GCOV_KERNEL) += gcc_4_7.o
endif
obj-y := base.o fs.o
obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \
gcc_3_4.o, gcc_4_7.o)

View file

@ -314,12 +314,12 @@ static void notrace klp_ftrace_handler(unsigned long ip,
rcu_read_lock();
func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
stack_node);
rcu_read_unlock();
if (WARN_ON_ONCE(!func))
return;
goto unlock;
klp_arch_set_pc(regs, (unsigned long)func->new_func);
unlock:
rcu_read_unlock();
}
static int klp_disable_func(struct klp_func *func)
@ -731,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->state = KLP_DISABLED;
return kobject_init_and_add(&func->kobj, &klp_ktype_func,
obj->kobj, func->old_name);
obj->kobj, "%s", func->old_name);
}
/* parts of the initialization that is done only when the object is loaded */
@ -807,7 +807,7 @@ static int klp_init_patch(struct klp_patch *patch)
patch->state = KLP_DISABLED;
ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
klp_root_kobj, patch->mod->name);
klp_root_kobj, "%s", patch->mod->name);
if (ret)
goto unlock;

View file

@ -1193,7 +1193,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
if (unlikely(ret)) {
remove_waiter(lock, &waiter);
__set_current_state(TASK_RUNNING);
if (rt_mutex_has_waiters(lock))
remove_waiter(lock, &waiter);
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
}

View file

@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args)
#ifdef CONFIG_KGDB_KDB
if (unlikely(kdb_trap_printk)) {
r = vkdb_printf(fmt, args);
r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
return r;
}
#endif

View file

@ -326,6 +326,7 @@ void rcu_read_unlock_special(struct task_struct *t)
special = t->rcu_read_unlock_special;
if (special.b.need_qs) {
rcu_preempt_qs();
t->rcu_read_unlock_special.b.need_qs = false;
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;

View file

@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
* so we don't have to move tasks around upon policy change,
* or flail around trying to allocate bandwidth on the fly.
* A bandwidth exception in __sched_setscheduler() allows
* the policy change to proceed. Thereafter, task_group()
* returns &root_task_group, so zero bandwidth is required.
* the policy change to proceed.
*/
free_rt_sched_group(tg);
tg->rt_se = root_task_group.rt_se;
@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
if (tg != &root_task_group)
return false;
if (p->sched_class != &fair_sched_class)
return false;
/*
* We can only assume the task group can't go away on us if
* autogroup_move_group() can see us on ->thread_group list.

View file

@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x)
* first without taking the lock so we can
* return early in the blocking case.
*/
if (!ACCESS_ONCE(x->done))
if (!READ_ONCE(x->done))
return 0;
spin_lock_irqsave(&x->wait.lock, flags);
@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion);
*/
bool completion_done(struct completion *x)
{
return !!ACCESS_ONCE(x->done);
if (!READ_ONCE(x->done))
return false;
/*
* If ->done, we need to wait for complete() to release ->wait.lock
* otherwise we can end up freeing the completion before complete()
* is done referencing it.
*
* The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
* the loads of ->done and ->wait.lock such that we cannot observe
* the lock before complete() acquires it while observing the ->done
* after it's acquired the lock.
*/
smp_rmb();
spin_unlock_wait(&x->wait.lock);
return true;
}
EXPORT_SYMBOL(completion_done);

View file

@ -306,66 +306,6 @@ __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
/*
* __task_rq_lock - lock the rq @p resides on.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
}
}
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
}
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
raw_spin_unlock(&rq->lock);
}
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
__releases(rq->lock)
__releases(p->pi_lock)
{
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
@ -2899,7 +2839,7 @@ void __sched schedule_preempt_disabled(void)
preempt_disable();
}
static void preempt_schedule_common(void)
static void __sched notrace preempt_schedule_common(void)
{
do {
__preempt_count_add(PREEMPT_ACTIVE);
@ -4418,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to);
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
int old_iowait = current->in_iowait;
struct rq *rq;
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
if (old_iowait)
blk_schedule_flush_plug(current);
else
blk_flush_plug(current);
delayacct_blkio_start();
rq = raw_rq();
atomic_inc(&rq->nr_iowait);
ret = schedule_timeout(timeout);
current->in_iowait = 0;
current->in_iowait = old_iowait;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
EXPORT_SYMBOL(io_schedule_timeout);
/**
* sys_sched_get_priority_max - return maximum RT priority.
@ -7642,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
/*
* Autogroups do not have RT tasks; see autogroup_create().
*/
if (task_group_is_autogroup(tg))
return 0;
for_each_process_thread(g, p) {
if (rt_task(p) && task_group(p) == tg)
return 1;
@ -7734,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
{
int i, err = 0;
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;
/* No period doesn't make any sense. */
if (rt_period == 0)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
@ -7790,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_period == 0)
return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}

View file

@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
struct sched_dl_entity,
dl_timer);
struct task_struct *p = dl_task_of(dl_se);
unsigned long flags;
struct rq *rq;
again:
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (rq != task_rq(p)) {
/* Task was moved, retrying. */
raw_spin_unlock(&rq->lock);
goto again;
}
rq = task_rq_lock(current, &flags);
/*
* We need to take care of several possible races here:
@ -541,6 +535,26 @@ again:
sched_clock_tick();
update_rq_clock(rq);
/*
* If the throttle happened during sched-out; like:
*
* schedule()
* deactivate_task()
* dequeue_task_dl()
* update_curr_dl()
* start_dl_timer()
* __dequeue_task_dl()
* prev->on_rq = 0;
*
* We can be both throttled and !queued. Replenish the counter
* but do not enqueue -- wait for our wakeup to do that.
*/
if (!task_on_rq_queued(p)) {
replenish_dl_entity(dl_se, dl_se);
goto unlock;
}
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
@ -555,7 +569,7 @@ again:
push_dl_task(rq);
#endif
unlock:
raw_spin_unlock(&rq->lock);
task_rq_unlock(rq, current, &flags);
return HRTIMER_NORESTART;
}
@ -898,6 +912,7 @@ static void yield_task_dl(struct rq *rq)
rq->curr->dl.dl_yielded = 1;
p->dl.runtime = 0;
}
update_rq_clock(rq);
update_curr_dl(rq);
}

View file

@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { }
extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
/*
* __task_rq_lock - lock the rq @p resides on.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
}
}
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
/*
* move_queued_task() task_rq_lock()
*
* ACQUIRE (rq->lock)
* [S] ->on_rq = MIGRATING [L] rq = task_rq()
* WMB (__set_task_cpu()) ACQUIRE (rq->lock);
* [S] ->cpu = new_cpu [L] task_rq()
* [L] ->on_rq
* RELEASE (rq->lock)
*
* If we observe the old cpu in task_rq_lock, the acquire of
* the old rq->lock will fully serialize against the stores.
*
* If we observe the new cpu in task_rq_lock, the acquire will
* pair with the WMB to ensure we must then also see migrating.
*/
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
}
}
static inline void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
raw_spin_unlock(&rq->lock);
}
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
__releases(rq->lock)
__releases(p->pi_lock)
{
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT

View file

@ -97,6 +97,12 @@
#ifndef MPX_DISABLE_MANAGEMENT
# define MPX_DISABLE_MANAGEMENT(a) (-EINVAL)
#endif
#ifndef GET_FP_MODE
# define GET_FP_MODE(a) (-EINVAL)
#endif
#ifndef SET_FP_MODE
# define SET_FP_MODE(a,b) (-EINVAL)
#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
@ -1102,6 +1108,7 @@ DECLARE_RWSEM(uts_sem);
/*
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
* And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
*/
static int override_release(char __user *release, size_t len)
{
@ -1121,7 +1128,7 @@ static int override_release(char __user *release, size_t len)
break;
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
copy = clamp_t(size_t, len, 1, sizeof(buf));
copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
ret = copy_to_user(release, buf, copy + 1);
@ -2219,6 +2226,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
return -EINVAL;
error = MPX_DISABLE_MANAGEMENT(me);
break;
case PR_SET_FP_MODE:
error = SET_FP_MODE(me, arg2);
break;
case PR_GET_FP_MODE:
error = GET_FP_MODE(me);
break;
default:
error = -EINVAL;
break;

View file

@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
return -EPERM;
if (txc->modes & ADJ_FREQUENCY) {
if (LONG_MIN / PPM_SCALE > txc->freq)
/*
* Check for potential multiplication overflows that can
* only happen on 64-bit systems:
*/
if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
if (LLONG_MIN / PPM_SCALE > txc->freq)
return -EINVAL;
if (LONG_MAX / PPM_SCALE < txc->freq)
if (LLONG_MAX / PPM_SCALE < txc->freq)
return -EINVAL;
}