/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV IRQ functions * * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ #include #include #include #include #include #include /* MMR offset and pnode of hub sourcing interrupts for a given irq */ struct uv_irq_2_mmr_pnode{ struct rb_node list; unsigned long offset; int pnode; int irq; }; static spinlock_t uv_irq_lock; static struct rb_root uv_irq_root; static void uv_noop(unsigned int irq) { } static unsigned int uv_noop_ret(unsigned int irq) { return 0; } static void uv_ack_apic(unsigned int irq) { ack_APIC_irq(); } struct irq_chip uv_irq_chip = { .name = "UV-CORE", .startup = uv_noop_ret, .shutdown = uv_noop, .enable = uv_noop, .disable = uv_noop, .ack = uv_noop, .mask = uv_noop, .unmask = uv_noop, .eoi = uv_ack_apic, .end = uv_noop, .set_affinity = uv_set_irq_affinity, }; /* * Add offset and pnode information of the hub sourcing interrupts to the * rb tree for a specific irq. */ static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) { struct rb_node **link = &uv_irq_root.rb_node; struct rb_node *parent = NULL; struct uv_irq_2_mmr_pnode *n; struct uv_irq_2_mmr_pnode *e; unsigned long irqflags; n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, uv_blade_to_memory_nid(blade)); if (!n) return -ENOMEM; n->irq = irq; n->offset = offset; n->pnode = uv_blade_to_pnode(blade); spin_lock_irqsave(&uv_irq_lock, irqflags); /* Find the right place in the rbtree: */ while (*link) { parent = *link; e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); if (unlikely(irq == e->irq)) { /* irq entry exists */ e->pnode = uv_blade_to_pnode(blade); e->offset = offset; spin_unlock_irqrestore(&uv_irq_lock, irqflags); kfree(n); return 0; } if (irq < e->irq) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* Insert the node into the rbtree. */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &uv_irq_root); spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } /* Retrieve offset and pnode information from the rb tree for a specific irq */ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { *offset = e->offset; *pnode = e->pnode; spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); return -1; } /* * Set up a mapping of an available irq and vector, and enable the specified * MMR that defines the MSI that is to be sent to the specified CPU when an * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int restrict) { int irq, ret; irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, restrict); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else destroy_irq(irq); return ret; } EXPORT_SYMBOL_GPL(uv_setup_irq); /* * Tear down a mapping of an irq and vector, and disable the specified MMR that * defined the MSI that was to be sent to the specified CPU when an interrupt * was raised. * * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). */ void uv_teardown_irq(unsigned int irq) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { arch_disable_uv_irq(e->pnode, e->offset); rb_erase(n, &uv_irq_root); kfree(e); break; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); destroy_irq(irq); } EXPORT_SYMBOL_GPL(uv_teardown_irq);