Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core kernel fixes from Ingo Molnar:
 "This contains the nohz/atomic cleanup/fix for the fetch_or() ugliness
  you noted during the original nohz pull request, plus there's also
  misc fixes:

   - fix liblockdep build bug
   - fix uapi header build bug
   - print more lockdep hash collision info to help debug recent reports
     of hash collisions
   - update MAINTAINERS email address"

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  MAINTAINERS: Update my email address
  locking/lockdep: Print chain_key collision information
  uapi/linux/stddef.h: Provide __always_inline to userspace headers
  tools/lib/lockdep: Fix unsupported 'basename -s' in run_tests.sh
  locking/atomic, sched: Unexport fetch_or()
  timers/nohz: Convert tick dependency mask to atomic_t
  locking/atomic: Introduce atomic_fetch_or()
This commit is contained in:
Linus Torvalds 2016-04-03 07:06:53 -05:00
commit 7b367f5dba
9 changed files with 158 additions and 58 deletions

View file

@ -1999,6 +1999,77 @@ static inline int get_first_held_lock(struct task_struct *curr,
return ++i;
}
/*
* Returns the next chain_key iteration
*/
static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
{
u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
printk(" class_idx:%d -> chain_key:%016Lx",
class_idx,
(unsigned long long)new_chain_key);
return new_chain_key;
}
static void
print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
{
struct held_lock *hlock;
u64 chain_key = 0;
int depth = curr->lockdep_depth;
int i;
printk("depth: %u\n", depth + 1);
for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
hlock = curr->held_locks + i;
chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
print_lock(hlock);
}
print_chain_key_iteration(hlock_next->class_idx, chain_key);
print_lock(hlock_next);
}
static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
u64 chain_key = 0;
int class_id;
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
chain_key = print_chain_key_iteration(class_id + 1, chain_key);
print_lock_name(lock_classes + class_id);
printk("\n");
}
}
static void print_collision(struct task_struct *curr,
struct held_lock *hlock_next,
struct lock_chain *chain)
{
printk("\n");
printk("======================\n");
printk("[chain_key collision ]\n");
print_kernel_ident();
printk("----------------------\n");
printk("%s/%d: ", current->comm, task_pid_nr(current));
printk("Hash chain already cached but the contents don't match!\n");
printk("Held locks:");
print_chain_keys_held_locks(curr, hlock_next);
printk("Locks in cached chain:");
print_chain_keys_chain(chain);
printk("\nstack backtrace:\n");
dump_stack();
}
/*
* Checks whether the chain and the current held locks are consistent
* in depth and also in content. If they are not it most likely means
@ -2014,14 +2085,18 @@ static int check_no_collision(struct task_struct *curr,
i = get_first_held_lock(curr, hlock);
if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1)))
if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
print_collision(curr, hlock, chain);
return 0;
}
for (j = 0; j < chain->depth - 1; j++, i++) {
id = curr->held_locks[i].class_idx - 1;
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id))
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
print_collision(curr, hlock, chain);
return 0;
}
}
#endif
return 1;

View file

@ -321,6 +321,24 @@ static inline void init_hrtick(void)
}
#endif /* CONFIG_SCHED_HRTICK */
/*
* cmpxchg based fetch_or, macro so it works for different integer types
*/
#define fetch_or(ptr, mask) \
({ \
typeof(ptr) _ptr = (ptr); \
typeof(mask) _mask = (mask); \
typeof(*_ptr) _old, _val = *_ptr; \
\
for (;;) { \
_old = cmpxchg(_ptr, _val, _val | _mask); \
if (_old == _val) \
break; \
_val = _old; \
} \
_old; \
})
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
* Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,

View file

@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
cpumask_var_t tick_nohz_full_mask;
cpumask_var_t housekeeping_mask;
bool tick_nohz_full_running;
static unsigned long tick_dep_mask;
static atomic_t tick_dep_mask;
static void trace_tick_dependency(unsigned long dep)
static bool check_tick_dependency(atomic_t *dep)
{
if (dep & TICK_DEP_MASK_POSIX_TIMER) {
int val = atomic_read(dep);
if (val & TICK_DEP_MASK_POSIX_TIMER) {
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
return;
return true;
}
if (dep & TICK_DEP_MASK_PERF_EVENTS) {
if (val & TICK_DEP_MASK_PERF_EVENTS) {
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
return;
return true;
}
if (dep & TICK_DEP_MASK_SCHED) {
if (val & TICK_DEP_MASK_SCHED) {
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
return;
return true;
}
if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE)
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
return true;
}
return false;
}
static bool can_stop_full_tick(struct tick_sched *ts)
{
WARN_ON_ONCE(!irqs_disabled());
if (tick_dep_mask) {
trace_tick_dependency(tick_dep_mask);
if (check_tick_dependency(&tick_dep_mask))
return false;
}
if (ts->tick_dep_mask) {
trace_tick_dependency(ts->tick_dep_mask);
if (check_tick_dependency(&ts->tick_dep_mask))
return false;
}
if (current->tick_dep_mask) {
trace_tick_dependency(current->tick_dep_mask);
if (check_tick_dependency(&current->tick_dep_mask))
return false;
}
if (current->signal->tick_dep_mask) {
trace_tick_dependency(current->signal->tick_dep_mask);
if (check_tick_dependency(&current->signal->tick_dep_mask))
return false;
}
return true;
}
@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void)
preempt_enable();
}
static void tick_nohz_dep_set_all(unsigned long *dep,
static void tick_nohz_dep_set_all(atomic_t *dep,
enum tick_dep_bits bit)
{
unsigned long prev;
int prev;
prev = fetch_or(dep, BIT_MASK(bit));
prev = atomic_fetch_or(dep, BIT(bit));
if (!prev)
tick_nohz_full_kick_all();
}
@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit)
void tick_nohz_dep_clear(enum tick_dep_bits bit)
{
clear_bit(bit, &tick_dep_mask);
atomic_andnot(BIT(bit), &tick_dep_mask);
}
/*
@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit)
*/
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
{
unsigned long prev;
int prev;
struct tick_sched *ts;
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit));
prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
if (!prev) {
preempt_disable();
/* Perf needs local kick that is NMI safe */
@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
clear_bit(bit, &ts->tick_dep_mask);
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
/*
@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
clear_bit(bit, &tsk->tick_dep_mask);
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
/*
@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
clear_bit(bit, &sig->tick_dep_mask);
atomic_andnot(BIT(bit), &sig->tick_dep_mask);
}
/*
@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void)
ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->tick_stopped) {
if (current->tick_dep_mask || current->signal->tick_dep_mask)
if (atomic_read(&current->tick_dep_mask) ||
atomic_read(&current->signal->tick_dep_mask))
tick_nohz_full_kick();
}
out:

View file

@ -60,7 +60,7 @@ struct tick_sched {
u64 next_timer;
ktime_t idle_expires;
int do_timer_last;
unsigned long tick_dep_mask;
atomic_t tick_dep_mask;
};
extern struct tick_sched *tick_get_tick_sched(int cpu);