The atomic64 library uses a handful of static spin locks to implement
atomic 64-bit operations on architectures without support for atomic
64-bit instructions.
Unfortunately, the spinlocks are initialized in a pure initcall and that
is too late for the vfs namespace code which wants to use atomic64
operations before the initcall is run.
This became a problem as of commit 8823c079ba: "vfs: Add setns support
for the mount namespace".
This leads to BUG messages such as:
  BUG: spinlock bad magic on CPU#0, swapper/0/0
   lock: atomic64_lock+0x240/0x400, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
    do_raw_spin_lock+0x158/0x198
    _raw_spin_lock_irqsave+0x4c/0x58
    atomic64_add_return+0x30/0x5c
    alloc_mnt_ns.clone.14+0x44/0xac
    create_mnt_ns+0xc/0x54
    mnt_init+0x120/0x1d4
    vfs_caches_init+0xe0/0x10c
    start_kernel+0x29c/0x300
coming out early on during boot when spinlock debugging is enabled.
Fix this by initializing the spinlocks statically at compile time.
Reported-and-tested-by: Vaibhav Bedia <vaibhav.bedia@ti.com>
Tested-by: Tony Lindgren <tony@atomide.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			179 lines
		
	
	
	
		
			4.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			179 lines
		
	
	
	
		
			4.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Generic implementation of 64-bit atomics using spinlocks,
 | |
|  * useful on processors that don't have 64-bit atomic instructions.
 | |
|  *
 | |
|  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or
 | |
|  * modify it under the terms of the GNU General Public License
 | |
|  * as published by the Free Software Foundation; either version
 | |
|  * 2 of the License, or (at your option) any later version.
 | |
|  */
 | |
| #include <linux/types.h>
 | |
| #include <linux/cache.h>
 | |
| #include <linux/spinlock.h>
 | |
| #include <linux/init.h>
 | |
| #include <linux/export.h>
 | |
| #include <linux/atomic.h>
 | |
| 
 | |
| /*
 | |
|  * We use a hashed array of spinlocks to provide exclusive access
 | |
|  * to each atomic64_t variable.  Since this is expected to used on
 | |
|  * systems with small numbers of CPUs (<= 4 or so), we use a
 | |
|  * relatively small array of 16 spinlocks to avoid wasting too much
 | |
|  * memory on the spinlock array.
 | |
|  */
 | |
| #define NR_LOCKS	16
 | |
| 
 | |
| /*
 | |
|  * Ensure each lock is in a separate cacheline.
 | |
|  */
 | |
| static union {
 | |
| 	raw_spinlock_t lock;
 | |
| 	char pad[L1_CACHE_BYTES];
 | |
| } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 | |
| 	[0 ... (NR_LOCKS - 1)] = {
 | |
| 		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
 | |
| 	},
 | |
| };
 | |
| 
 | |
| static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
 | |
| {
 | |
| 	unsigned long addr = (unsigned long) v;
 | |
| 
 | |
| 	addr >>= L1_CACHE_SHIFT;
 | |
| 	addr ^= (addr >> 8) ^ (addr >> 16);
 | |
| 	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
 | |
| }
 | |
| 
 | |
| long long atomic64_read(const atomic64_t *v)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	long long val;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	val = v->counter;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return val;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_read);
 | |
| 
 | |
| void atomic64_set(atomic64_t *v, long long i)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	v->counter = i;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_set);
 | |
| 
 | |
| void atomic64_add(long long a, atomic64_t *v)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	v->counter += a;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_add);
 | |
| 
 | |
| long long atomic64_add_return(long long a, atomic64_t *v)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	long long val;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	val = v->counter += a;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return val;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_add_return);
 | |
| 
 | |
| void atomic64_sub(long long a, atomic64_t *v)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	v->counter -= a;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_sub);
 | |
| 
 | |
| long long atomic64_sub_return(long long a, atomic64_t *v)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	long long val;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	val = v->counter -= a;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return val;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_sub_return);
 | |
| 
 | |
| long long atomic64_dec_if_positive(atomic64_t *v)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	long long val;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	val = v->counter - 1;
 | |
| 	if (val >= 0)
 | |
| 		v->counter = val;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return val;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_dec_if_positive);
 | |
| 
 | |
| long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	long long val;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	val = v->counter;
 | |
| 	if (val == o)
 | |
| 		v->counter = n;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return val;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_cmpxchg);
 | |
| 
 | |
| long long atomic64_xchg(atomic64_t *v, long long new)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	long long val;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	val = v->counter;
 | |
| 	v->counter = new;
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return val;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_xchg);
 | |
| 
 | |
| int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	raw_spinlock_t *lock = lock_addr(v);
 | |
| 	int ret = 0;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lock, flags);
 | |
| 	if (v->counter != u) {
 | |
| 		v->counter += a;
 | |
| 		ret = 1;
 | |
| 	}
 | |
| 	raw_spin_unlock_irqrestore(lock, flags);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL(atomic64_add_unless);
 |