Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller:
 1) Fix NULL oops in Schizo PCI controller error handler.
 2) Fix race between xchg and other operations on 32-bit sparc, from
    Andreas Larsson.
 3) swab*() helpers need a dummy memory input operand to show data flow
    on 64-bit sparc.
 4) Fix RCU warnings due to missing irq_{enter,exit}() around
    generic_smp_call_function*() calls.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Fix constraints on swab helpers.
  sparc32: Implement xchg and atomic_xchg using ATOMIC_HASH locks
  sparc64: Do irq_{enter,exit}() around generic_smp_call_function*().
  sparc64: Fix crashes in schizo_pcierr_intr_other().
	
	
This commit is contained in:
		
				commit
				
					
						435e46f5d3
					
				
			
		
					 6 changed files with 43 additions and 20 deletions
				
			
		| 
						 | 
					@ -22,7 +22,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int atomic_add_return(int, atomic_t *);
 | 
					int atomic_add_return(int, atomic_t *);
 | 
				
			||||||
int atomic_cmpxchg(atomic_t *, int, int);
 | 
					int atomic_cmpxchg(atomic_t *, int, int);
 | 
				
			||||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 | 
					int atomic_xchg(atomic_t *, int);
 | 
				
			||||||
int __atomic_add_unless(atomic_t *, int, int);
 | 
					int __atomic_add_unless(atomic_t *, int, int);
 | 
				
			||||||
void atomic_set(atomic_t *, int);
 | 
					void atomic_set(atomic_t *, int);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,22 +11,14 @@
 | 
				
			||||||
#ifndef __ARCH_SPARC_CMPXCHG__
 | 
					#ifndef __ARCH_SPARC_CMPXCHG__
 | 
				
			||||||
#define __ARCH_SPARC_CMPXCHG__
 | 
					#define __ARCH_SPARC_CMPXCHG__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
 | 
					unsigned long __xchg_u32(volatile u32 *m, u32 new);
 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	__asm__ __volatile__("swap [%2], %0"
 | 
					 | 
				
			||||||
			     : "=&r" (val)
 | 
					 | 
				
			||||||
			     : "0" (val), "r" (m)
 | 
					 | 
				
			||||||
			     : "memory");
 | 
					 | 
				
			||||||
	return val;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __xchg_called_with_bad_pointer(void);
 | 
					void __xchg_called_with_bad_pointer(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 | 
					static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	switch (size) {
 | 
						switch (size) {
 | 
				
			||||||
	case 4:
 | 
						case 4:
 | 
				
			||||||
		return xchg_u32(ptr, x);
 | 
							return __xchg_u32(ptr, x);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	__xchg_called_with_bad_pointer();
 | 
						__xchg_called_with_bad_pointer();
 | 
				
			||||||
	return x;
 | 
						return x;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__u16 ret;
 | 
						__u16 ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__asm__ __volatile__ ("lduha [%1] %2, %0"
 | 
						__asm__ __volatile__ ("lduha [%2] %3, %0"
 | 
				
			||||||
			      : "=r" (ret)
 | 
								      : "=r" (ret)
 | 
				
			||||||
			      : "r" (addr), "i" (ASI_PL));
 | 
								      : "m" (*addr), "r" (addr), "i" (ASI_PL));
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#define __arch_swab16p __arch_swab16p
 | 
					#define __arch_swab16p __arch_swab16p
 | 
				
			||||||
| 
						 | 
					@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__u32 ret;
 | 
						__u32 ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__asm__ __volatile__ ("lduwa [%1] %2, %0"
 | 
						__asm__ __volatile__ ("lduwa [%2] %3, %0"
 | 
				
			||||||
			      : "=r" (ret)
 | 
								      : "=r" (ret)
 | 
				
			||||||
			      : "r" (addr), "i" (ASI_PL));
 | 
								      : "m" (*addr), "r" (addr), "i" (ASI_PL));
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#define __arch_swab32p __arch_swab32p
 | 
					#define __arch_swab32p __arch_swab32p
 | 
				
			||||||
| 
						 | 
					@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__u64 ret;
 | 
						__u64 ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__asm__ __volatile__ ("ldxa [%1] %2, %0"
 | 
						__asm__ __volatile__ ("ldxa [%2] %3, %0"
 | 
				
			||||||
			      : "=r" (ret)
 | 
								      : "=r" (ret)
 | 
				
			||||||
			      : "r" (addr), "i" (ASI_PL));
 | 
								      : "m" (*addr), "r" (addr), "i" (ASI_PL));
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#define __arch_swab64p __arch_swab64p
 | 
					#define __arch_swab64p __arch_swab64p
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long csr_reg, csr, csr_error_bits;
 | 
						unsigned long csr_reg, csr, csr_error_bits;
 | 
				
			||||||
	irqreturn_t ret = IRQ_NONE;
 | 
						irqreturn_t ret = IRQ_NONE;
 | 
				
			||||||
	u16 stat;
 | 
						u32 stat;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
 | 
						csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
 | 
				
			||||||
	csr = upa_readq(csr_reg);
 | 
						csr = upa_readq(csr_reg);
 | 
				
			||||||
| 
						 | 
					@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
 | 
				
			||||||
			       pbm->name);
 | 
								       pbm->name);
 | 
				
			||||||
		ret = IRQ_HANDLED;
 | 
							ret = IRQ_HANDLED;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
 | 
						pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
 | 
				
			||||||
	if (stat & (PCI_STATUS_PARITY |
 | 
						if (stat & (PCI_STATUS_PARITY |
 | 
				
			||||||
		    PCI_STATUS_SIG_TARGET_ABORT |
 | 
							    PCI_STATUS_SIG_TARGET_ABORT |
 | 
				
			||||||
		    PCI_STATUS_REC_TARGET_ABORT |
 | 
							    PCI_STATUS_REC_TARGET_ABORT |
 | 
				
			||||||
| 
						 | 
					@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
 | 
				
			||||||
		    PCI_STATUS_SIG_SYSTEM_ERROR)) {
 | 
							    PCI_STATUS_SIG_SYSTEM_ERROR)) {
 | 
				
			||||||
		printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
 | 
							printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
 | 
				
			||||||
		       pbm->name, stat);
 | 
							       pbm->name, stat);
 | 
				
			||||||
		pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
 | 
							pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
 | 
				
			||||||
		ret = IRQ_HANDLED;
 | 
							ret = IRQ_HANDLED;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu)
 | 
				
			||||||
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 | 
					void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	clear_softint(1 << irq);
 | 
						clear_softint(1 << irq);
 | 
				
			||||||
 | 
						irq_enter();
 | 
				
			||||||
	generic_smp_call_function_interrupt();
 | 
						generic_smp_call_function_interrupt();
 | 
				
			||||||
 | 
						irq_exit();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 | 
					void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	clear_softint(1 << irq);
 | 
						clear_softint(1 << irq);
 | 
				
			||||||
 | 
						irq_enter();
 | 
				
			||||||
	generic_smp_call_function_single_interrupt();
 | 
						generic_smp_call_function_single_interrupt();
 | 
				
			||||||
 | 
						irq_exit();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tsb_sync(void *info)
 | 
					static void tsb_sync(void *info)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,6 +45,19 @@ ATOMIC_OP(add, +=)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#undef ATOMIC_OP
 | 
					#undef ATOMIC_OP
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int atomic_xchg(atomic_t *v, int new)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						spin_lock_irqsave(ATOMIC_HASH(v), flags);
 | 
				
			||||||
 | 
						ret = v->counter;
 | 
				
			||||||
 | 
						v->counter = new;
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(atomic_xchg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int atomic_cmpxchg(atomic_t *v, int old, int new)
 | 
					int atomic_cmpxchg(atomic_t *v, int old, int new)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
| 
						 | 
					@ -137,3 +150,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
 | 
				
			||||||
	return (unsigned long)prev;
 | 
						return (unsigned long)prev;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__cmpxchg_u32);
 | 
					EXPORT_SYMBOL(__cmpxchg_u32);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
						u32 prev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
 | 
				
			||||||
 | 
						prev = *ptr;
 | 
				
			||||||
 | 
						*ptr = new;
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return (unsigned long)prev;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(__xchg_u32);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue