Linux 3.13-rc7
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJSyJVbAAoJEHm+PkMAQRiGa28H/0m7GpZSpT8mvBthITxzqWCq JRkSPS4KTurAWlA5CJMJePyCM30DgN90s06bYUen9sTecZUwnL+qSV5OqAmg2r+0 PrfwtXtGZR6/Y12XlZ/3oFxVfUxjmgJyDAS76TIH1IvIum52nvJmLrR+6AyVphIX DkgBOuapdA7lia+U+ZM1cRkeHxUOKTUEw9v611VgoN3LYZyzyRb6d0rB7JtZN1RV dnXRi27enaPhwxelsCnORioRjsByMwD40CERxfLHmr5CGhmvCehBjO6bJ+KAdp14 52bfwWcNdbFMzUobcR7qlfS3Hy3AYJci+P6JzeeZ+kWEdv/eh5/1lvNuXtBJRlc= =iwzJ -----END PGP SIGNATURE----- Merge tag 'v3.13-rc7' into x86/efi-kexec to resolve conflicts Conflicts: arch/x86/platform/efi/efi.c drivers/firmware/efi/Kconfig Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ef0b8b9a52
1338 changed files with 14322 additions and 6858 deletions
|
|
@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
|
|||
*/
|
||||
static inline int atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
|
|||
*/
|
||||
static inline int atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
|
|||
*/
|
||||
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
|
|||
*/
|
||||
static inline int atomic64_add_negative(long i, atomic64_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
|
|||
*/
|
||||
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
|
|||
*/
|
||||
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
|
|||
*/
|
||||
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
|
||||
}
|
||||
|
||||
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l)
|
|||
*/
|
||||
static inline int local_sub_and_test(long i, local_t *l)
|
||||
{
|
||||
GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e");
|
||||
GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l)
|
|||
*/
|
||||
static inline int local_add_negative(long i, local_t *l)
|
||||
{
|
||||
GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s");
|
||||
GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
|
|||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
static inline int pte_accessible(pte_t a)
|
||||
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
{
|
||||
return pte_flags(a) & _PAGE_PRESENT;
|
||||
if (pte_flags(a) & _PAGE_PRESENT)
|
||||
return true;
|
||||
|
||||
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
|
||||
mm_tlb_flush_pending(mm))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int pte_hidden(pte_t pte)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,12 @@
|
|||
|
||||
DECLARE_PER_CPU(int, __preempt_count);
|
||||
|
||||
/*
|
||||
* We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
|
||||
* that a decrement hitting 0 means we can and should reschedule.
|
||||
*/
|
||||
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
||||
|
||||
/*
|
||||
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
|
||||
* that think a non-zero value indicates we cannot preempt.
|
||||
|
|
@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
|
|||
__this_cpu_add_4(__preempt_count, -val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
|
||||
* a decrement which hits zero means we have no preempt_count and should
|
||||
* reschedule.
|
||||
*/
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ cc_label: \
|
|||
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc)
|
||||
|
||||
#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val))
|
||||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
|
||||
|
||||
#else /* !CC_HAVE_ASM_GOTO */
|
||||
|
||||
|
|
@ -33,8 +33,8 @@ do { \
|
|||
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc)
|
||||
|
||||
#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val))
|
||||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
|
||||
|
||||
#endif /* CC_HAVE_ASM_GOTO */
|
||||
|
||||
|
|
|
|||
|
|
@ -71,6 +71,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
|
|||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(irq_work);
|
||||
|
||||
/*
|
||||
* We must dis-allow sampling irq_work_exit() because perf event sampling
|
||||
* itself can cause irq_work, which would lead to an infinite loop;
|
||||
*
|
||||
* 1) irq_work_exit happens
|
||||
* 2) generates perf sample
|
||||
* 3) generates irq_work
|
||||
* 4) goto 1
|
||||
*/
|
||||
TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
|
||||
|
||||
/*
|
||||
* call_function - called when entering/exiting a call function interrupt
|
||||
* vector handler
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue