Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:

	drivers/net/cpmac.c
	net/mac80211/mlme.c
This commit is contained in:
David S. Miller 2008-05-25 23:26:10 -07:00
commit 43154d08d6
93 changed files with 1106 additions and 1071 deletions

View file

@ -1,4 +1,3 @@
/* $Id: fore200e.h,v 1.4 2000/04/14 10:10:34 davem Exp $ */
#ifndef _FORE200E_H #ifndef _FORE200E_H
#define _FORE200E_H #define _FORE200E_H

View file

@ -1,6 +1,4 @@
/* /*
$Id: fore200e_mkfirm.c,v 1.1 2000/02/21 16:04:32 davem Exp $
mkfirm.c: generates a C readable file from a binary firmware image mkfirm.c: generates a C readable file from a binary firmware image
Christophe Lizzi (lizzi@{csti.fr, cnam.fr}), June 1999. Christophe Lizzi (lizzi@{csti.fr, cnam.fr}), June 1999.

View file

@ -1,5 +1,3 @@
/* $Id: he.h,v 1.4 2003/05/06 22:48:00 chas Exp $ */
/* /*
he.h he.h

View file

@ -1,8 +1,4 @@
/******************************************************************* /*******************************************************************
* ident "$Id: idt77252.c,v 1.2 2001/11/11 08:13:54 ecd Exp $"
*
* $Author: ecd $
* $Date: 2001/11/11 08:13:54 $
* *
* Copyright (c) 2000 ATecoM GmbH * Copyright (c) 2000 ATecoM GmbH
* *
@ -29,9 +25,6 @@
* 675 Mass Ave, Cambridge, MA 02139, USA. * 675 Mass Ave, Cambridge, MA 02139, USA.
* *
*******************************************************************/ *******************************************************************/
static char const rcsid[] =
"$Id: idt77252.c,v 1.2 2001/11/11 08:13:54 ecd Exp $";
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>

View file

@ -1,8 +1,4 @@
/******************************************************************* /*******************************************************************
* ident "$Id: idt77252.h,v 1.2 2001/11/11 08:13:54 ecd Exp $"
*
* $Author: ecd $
* $Date: 2001/11/11 08:13:54 $
* *
* Copyright (c) 2000 ATecoM GmbH * Copyright (c) 2000 ATecoM GmbH
* *

View file

@ -13,7 +13,7 @@
* *
* Modified to work with the IDT7721 nicstar -- AAL5 (tested) only. * Modified to work with the IDT7721 nicstar -- AAL5 (tested) only.
* *
* R. D. Rechenmacher <ron@fnal.gov>, Aug. 6, 1997 $Revision: 1.1 $ $Date: 1999/08/20 11:00:11 $ * R. D. Rechenmacher <ron@fnal.gov>, Aug. 6, 1997
* *
* Linux driver for the IDT77201 NICStAR PCI ATM controller. * Linux driver for the IDT77201 NICStAR PCI ATM controller.
* PHY component is expected to be 155 Mbps S/UNI-Lite or IDT 77155; * PHY component is expected to be 155 Mbps S/UNI-Lite or IDT 77155;

View file

@ -1063,7 +1063,6 @@ el3_rx(struct net_device *dev)
struct sk_buff *skb; struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len+5); skb = dev_alloc_skb(pkt_len+5);
dev->stats.rx_bytes += pkt_len;
if (el3_debug > 4) if (el3_debug > 4)
printk("Receiving packet size %d status %4.4x.\n", printk("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status); pkt_len, rx_status);
@ -1078,6 +1077,7 @@ el3_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb,dev); skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb); netif_rx(skb);
dev->last_rx = jiffies; dev->last_rx = jiffies;
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++; dev->stats.rx_packets++;
continue; continue;
} }

View file

@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev)
*/ */
static irqreturn_t au1000_interrupt(int irq, void *dev_id) static irqreturn_t au1000_interrupt(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id; struct net_device *dev = dev_id;
if (dev == NULL) {
printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
return IRQ_RETVAL(1);
}
/* Handle RX interrupts first to minimize chance of overrun */ /* Handle RX interrupts first to minimize chance of overrun */

View file

@ -22,7 +22,6 @@
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>

View file

@ -142,8 +142,8 @@
#define DRV_MODULE_NAME "cassini" #define DRV_MODULE_NAME "cassini"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.5" #define DRV_MODULE_VERSION "1.6"
#define DRV_MODULE_RELDATE "4 Jan 2008" #define DRV_MODULE_RELDATE "21 May 2008"
#define CAS_DEF_MSG_ENABLE \ #define CAS_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \ (NETIF_MSG_DRV | \
@ -2136,9 +2136,12 @@ end_copy_pkt:
if (addr) if (addr)
cas_page_unmap(addr); cas_page_unmap(addr);
} }
skb->csum = csum_unfold(~csum);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->protocol = eth_type_trans(skb, cp->dev); skb->protocol = eth_type_trans(skb, cp->dev);
if (skb->protocol == htons(ETH_P_IP)) {
skb->csum = csum_unfold(~csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else
skb->ip_summed = CHECKSUM_NONE;
return len; return len;
} }

View file

@ -38,6 +38,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/gpio.h> #include <asm/gpio.h>
#include <asm/atomic.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
@ -187,6 +188,7 @@ struct cpmac_desc {
#define CPMAC_EOQ 0x1000 #define CPMAC_EOQ 0x1000
struct sk_buff *skb; struct sk_buff *skb;
struct cpmac_desc *next; struct cpmac_desc *next;
struct cpmac_desc *prev;
dma_addr_t mapping; dma_addr_t mapping;
dma_addr_t data_mapping; dma_addr_t data_mapping;
}; };
@ -208,6 +210,7 @@ struct cpmac_priv {
struct work_struct reset_work; struct work_struct reset_work;
struct platform_device *pdev; struct platform_device *pdev;
struct napi_struct napi; struct napi_struct napi;
atomic_t reset_pending;
}; };
static irqreturn_t cpmac_irq(int, void *); static irqreturn_t cpmac_irq(int, void *);
@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
printk("\n"); printk("\n");
} }
static void cpmac_dump_all_desc(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
struct cpmac_desc *dump = priv->rx_head;
do {
cpmac_dump_desc(dev, dump);
dump = dump->next;
} while (dump != priv->rx_head);
}
static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
{ {
int i; int i;
@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
static int cpmac_poll(struct napi_struct *napi, int budget) static int cpmac_poll(struct napi_struct *napi, int budget)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct cpmac_desc *desc; struct cpmac_desc *desc, *restart;
int received = 0;
struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
int received = 0, processed = 0;
spin_lock(&priv->rx_lock); spin_lock(&priv->rx_lock);
if (unlikely(!priv->rx_head)) { if (unlikely(!priv->rx_head)) {
if (netif_msg_rx_err(priv) && net_ratelimit()) if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx: polling, but no queue\n", printk(KERN_WARNING "%s: rx: polling, but no queue\n",
priv->dev->name); priv->dev->name);
spin_unlock(&priv->rx_lock);
netif_rx_complete(priv->dev, napi); netif_rx_complete(priv->dev, napi);
return 0; return 0;
} }
desc = priv->rx_head; desc = priv->rx_head;
restart = NULL;
while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
processed++;
if ((desc->dataflags & CPMAC_EOQ) != 0) {
/* The last update to eoq->hw_next didn't happen
* soon enough, and the receiver stopped here.
*Remember this descriptor so we can restart
* the receiver after freeing some space.
*/
if (unlikely(restart)) {
if (netif_msg_rx_err(priv))
printk(KERN_ERR "%s: poll found a"
" duplicate EOQ: %p and %p\n",
priv->dev->name, restart, desc);
goto fatal_error;
}
restart = desc->next;
}
skb = cpmac_rx_one(priv, desc); skb = cpmac_rx_one(priv, desc);
if (likely(skb)) { if (likely(skb)) {
netif_receive_skb(skb); netif_receive_skb(skb);
@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
desc = desc->next; desc = desc->next;
} }
if (desc != priv->rx_head) {
/* We freed some buffers, but not the whole ring,
* add what we did free to the rx list */
desc->prev->hw_next = (u32)0;
priv->rx_head->prev->hw_next = priv->rx_head->mapping;
}
/* Optimization: If we did not actually process an EOQ (perhaps because
* of quota limits), check to see if the tail of the queue has EOQ set.
* We should immediately restart in that case so that the receiver can
* restart and run in parallel with more packet processing.
* This lets us handle slightly larger bursts before running
* out of ring space (assuming dev->weight < ring_size) */
if (!restart &&
(priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
== CPMAC_EOQ &&
(priv->rx_head->dataflags & CPMAC_OWN) != 0) {
/* reset EOQ so the poll loop (above) doesn't try to
* restart this when it eventually gets to this descriptor.
*/
priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
restart = priv->rx_head;
}
if (restart) {
priv->dev->stats.rx_errors++;
priv->dev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx dma ring overrun\n",
priv->dev->name);
if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: cpmac_poll is trying to "
"restart rx from a descriptor that's "
"not free: %p\n",
priv->dev->name, restart);
goto fatal_error;
}
cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
}
priv->rx_head = desc; priv->rx_head = desc;
spin_unlock(&priv->rx_lock); spin_unlock(&priv->rx_lock);
if (unlikely(netif_msg_rx_status(priv))) if (unlikely(netif_msg_rx_status(priv)))
printk(KERN_DEBUG "%s: poll processed %d packets\n", printk(KERN_DEBUG "%s: poll processed %d packets\n",
priv->dev->name, received); priv->dev->name, received);
if (desc->dataflags & CPMAC_OWN) { if (processed == 0) {
/* we ran out of packets to read,
* revert to interrupt-driven mode */
netif_rx_complete(priv->dev, napi); netif_rx_complete(priv->dev, napi);
cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
return 0; return 0;
} }
return 1; return 1;
fatal_error:
/* Something went horribly wrong.
* Reset hardware to try to recover rather than wedging. */
if (netif_msg_drv(priv)) {
printk(KERN_ERR "%s: cpmac_poll is confused. "
"Resetting hardware\n", priv->dev->name);
cpmac_dump_all_desc(priv->dev);
printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
priv->dev->name,
cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
}
spin_unlock(&priv->rx_lock);
netif_rx_complete(priv->dev, napi);
netif_stop_queue(priv->dev);
napi_disable(&priv->napi);
atomic_inc(&priv->reset_pending);
cpmac_hw_stop(priv->dev);
if (!schedule_work(&priv->reset_work))
atomic_dec(&priv->reset_pending);
return 0;
} }
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpmac_desc *desc; struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev); struct cpmac_priv *priv = netdev_priv(dev);
if (unlikely(atomic_read(&priv->reset_pending)))
return NETDEV_TX_BUSY;
if (unlikely(skb_padto(skb, ETH_ZLEN))) if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev)
desc->dataflags = CPMAC_OWN; desc->dataflags = CPMAC_OWN;
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
} }
desc->hw_next = desc->next->mapping;
desc = desc->next; desc = desc->next;
} }
priv->rx_head->prev->hw_next = 0;
} }
static void cpmac_clear_tx(struct net_device *dev) static void cpmac_clear_tx(struct net_device *dev)
@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev)
priv->desc_ring[i].dataflags = 0; priv->desc_ring[i].dataflags = 0;
if (priv->desc_ring[i].skb) { if (priv->desc_ring[i].skb) {
dev_kfree_skb_any(priv->desc_ring[i].skb); dev_kfree_skb_any(priv->desc_ring[i].skb);
if (netif_subqueue_stopped(dev, i)) priv->desc_ring[i].skb = NULL;
netif_wake_subqueue(dev, i);
} }
} }
} }
static void cpmac_hw_error(struct work_struct *work) static void cpmac_hw_error(struct work_struct *work)
{ {
int i;
struct cpmac_priv *priv = struct cpmac_priv *priv =
container_of(work, struct cpmac_priv, reset_work); container_of(work, struct cpmac_priv, reset_work);
@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work)
spin_unlock(&priv->rx_lock); spin_unlock(&priv->rx_lock);
cpmac_clear_tx(priv->dev); cpmac_clear_tx(priv->dev);
cpmac_hw_start(priv->dev); cpmac_hw_start(priv->dev);
napi_enable(&priv->napi); barrier();
netif_start_queue(priv->dev); atomic_dec(&priv->reset_pending);
for (i = 0; i < CPMAC_QUEUES; i++)
netif_wake_subqueue(priv->dev, i);
netif_wake_queue(priv->dev);
cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
}
static void cpmac_check_status(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
int rx_channel = (macstatus >> 8) & 7;
int rx_code = (macstatus >> 12) & 15;
int tx_channel = (macstatus >> 16) & 7;
int tx_code = (macstatus >> 20) & 15;
if (rx_code || tx_code) {
if (netif_msg_drv(priv) && net_ratelimit()) {
/* Can't find any documentation on what these
*error codes actually are. So just log them and hope..
*/
if (rx_code)
printk(KERN_WARNING "%s: host error %d on rx "
"channel %d (macstatus %08x), resetting\n",
dev->name, rx_code, rx_channel, macstatus);
if (tx_code)
printk(KERN_WARNING "%s: host error %d on tx "
"channel %d (macstatus %08x), resetting\n",
dev->name, tx_code, tx_channel, macstatus);
}
netif_stop_queue(dev);
cpmac_hw_stop(dev);
if (schedule_work(&priv->reset_work))
atomic_inc(&priv->reset_pending);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_regs(dev);
}
cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
} }
static irqreturn_t cpmac_irq(int irq, void *dev_id) static irqreturn_t cpmac_irq(int irq, void *dev_id)
@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
if (netif_msg_drv(priv) && net_ratelimit()) cpmac_check_status(dev);
printk(KERN_ERR "%s: hw error, resetting...\n",
dev->name);
netif_stop_queue(dev);
napi_disable(&priv->napi);
cpmac_hw_stop(dev);
schedule_work(&priv->reset_work);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_regs(dev);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void cpmac_tx_timeout(struct net_device *dev) static void cpmac_tx_timeout(struct net_device *dev)
{ {
struct cpmac_priv *priv = netdev_priv(dev);
int i; int i;
struct cpmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock); spin_lock(&priv->lock);
dev->stats.tx_errors++; dev->stats.tx_errors++;
spin_unlock(&priv->lock); spin_unlock(&priv->lock);
if (netif_msg_tx_err(priv) && net_ratelimit()) if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: transmit timeout\n", dev->name); printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
/*
* FIXME: waking up random queue is not the best thing to atomic_inc(&priv->reset_pending);
* do... on the other hand why we got here at all? barrier();
*/ cpmac_clear_tx(dev);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE barrier();
atomic_dec(&priv->reset_pending);
netif_wake_queue(priv->dev);
for (i = 0; i < CPMAC_QUEUES; i++) for (i = 0; i < CPMAC_QUEUES; i++)
if (priv->desc_ring[i].skb) { netif_wake_subqueue(dev, i);
priv->desc_ring[i].dataflags = 0;
dev_kfree_skb_any(priv->desc_ring[i].skb);
netif_wake_subqueue(dev, i);
break;
}
#else
priv->desc_ring[0].dataflags = 0;
if (priv->desc_ring[0].skb)
dev_kfree_skb_any(priv->desc_ring[0].skb);
netif_wake_queue(dev);
#endif
} }
static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev)
desc->buflen = CPMAC_SKB_SIZE; desc->buflen = CPMAC_SKB_SIZE;
desc->dataflags = CPMAC_OWN; desc->dataflags = CPMAC_OWN;
desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
desc->next->prev = desc;
desc->hw_next = (u32)desc->next->mapping; desc->hw_next = (u32)desc->next->mapping;
} }
priv->rx_head->prev->hw_next = (u32)0;
if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
dev->name, dev))) { dev->name, dev))) {
if (netif_msg_drv(priv)) if (netif_msg_drv(priv))
@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev)
goto fail_irq; goto fail_irq;
} }
atomic_set(&priv->reset_pending, 0);
INIT_WORK(&priv->reset_work, cpmac_hw_error); INIT_WORK(&priv->reset_work, cpmac_hw_error);
cpmac_hw_start(dev); cpmac_hw_start(dev);
@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
if (phy_id == PHY_MAX_ADDR) { if (phy_id == PHY_MAX_ADDR) {
if (external_switch || dumb_switch) { if (external_switch || dumb_switch) {
struct fixed_phy_status status = {}; mdio_bus_id = 0; /* fixed phys bus */
phy_id = pdev->id;
/*
* FIXME: this should be in the platform code!
* Since there is not platform code at all (that is,
* no mainline users of that driver), place it here
* for now.
*/
phy_id = 0;
status.link = 1;
status.duplex = 1;
status.speed = 100;
fixed_phy_add(PHY_POLL, phy_id, &status);
} else { } else {
printk(KERN_ERR "cpmac: no PHY present\n"); dev_err(&pdev->dev, "no PHY present\n");
return -ENODEV; return -ENODEV;
} }
} }
@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff); priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id,
&cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) { if (IS_ERR(priv->phy)) {
if (netif_msg_drv(priv)) if (netif_msg_drv(priv))
printk(KERN_ERR "%s: Could not attach to PHY\n", printk(KERN_ERR "%s: Could not attach to PHY\n",

View file

@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev)
if (netif_msg_ifdown(db)) if (netif_msg_ifdown(db))
dev_dbg(db->dev, "shutting down %s\n", ndev->name); dev_dbg(db->dev, "shutting down %s\n", ndev->name);
cancel_delayed_work(&db->phy_poll); cancel_delayed_work_sync(&db->phy_poll);
netif_stop_queue(ndev); netif_stop_queue(ndev);
netif_carrier_off(ndev); netif_carrier_off(ndev);

View file

@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
struct e1000_adapter *adapter; struct e1000_adapter *adapter;
struct e1000_hw *hw; struct e1000_hw *hw;
const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len; resource_size_t mmio_start, mmio_len;
unsigned long flash_start, flash_len; resource_size_t flash_start, flash_len;
static int cards_found; static int cards_found;
int i, err, pci_using_dac; int i, err, pci_using_dac;

View file

@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
goto out; goto out;
} }
memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1); H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) if (hret != H_SUCCESS)
@ -3178,11 +3176,12 @@ out_err:
static void ehea_shutdown_single_port(struct ehea_port *port) static void ehea_shutdown_single_port(struct ehea_port *port)
{ {
struct ehea_adapter *adapter = port->adapter;
unregister_netdev(port->netdev); unregister_netdev(port->netdev);
ehea_unregister_port(port); ehea_unregister_port(port);
kfree(port->mc_list); kfree(port->mc_list);
free_netdev(port->netdev); free_netdev(port->netdev);
port->adapter->active_ports--; adapter->active_ports--;
} }
static int ehea_setup_ports(struct ehea_adapter *adapter) static int ehea_setup_ports(struct ehea_adapter *adapter)

View file

@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev)
writel(txreg, base + NvRegTransmitPoll); writel(txreg, base + NvRegTransmitPoll);
rc = nv_open(dev); rc = nv_open(dev);
nv_set_multicast(dev);
out: out:
return rc; return rc;
} }

View file

@ -1093,7 +1093,7 @@ err:
if (registered) if (registered)
unregister_netdev(ndev); unregister_netdev(ndev);
if (fep != NULL) { if (fep && fep->ops) {
(*fep->ops->free_bd)(ndev); (*fep->ops->free_bd)(ndev);
(*fep->ops->cleanup_data)(ndev); (*fep->ops->cleanup_data)(ndev);
} }

View file

@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns
case PARAM_RTS: case PARAM_RTS:
if ( !(scc->wreg[R5] & RTS) ) if ( !(scc->wreg[R5] & RTS) )
{ {
if (arg != TX_OFF) if (arg != TX_OFF) {
scc_key_trx(scc, TX_ON); scc_key_trx(scc, TX_ON);
scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
}
} else { } else {
if (arg == TX_OFF) if (arg == TX_OFF)
{ {

View file

@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
return status; return status;
} }
int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
{ {
struct myri10ge_cmd cmd; struct myri10ge_cmd cmd;
int status; int status;

View file

@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
cardtype = CONTEC; cardtype = CONTEC;
break; break;
case MANFID_FUJITSU: case MANFID_FUJITSU:
if (link->card_id == PRODID_FUJITSU_MBH10302) if (link->conf.ConfigBase == 0x0fe0)
cardtype = MBH10302;
else if (link->card_id == PRODID_FUJITSU_MBH10302)
/* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
but these are MBH10304 based card. */ but these are MBH10304 based card. */
cardtype = MBH10304; cardtype = MBH10304;

View file

@ -1461,22 +1461,25 @@ static void
set_multicast_list(struct net_device *dev) set_multicast_list(struct net_device *dev)
{ {
unsigned int ioaddr = dev->base_addr; unsigned int ioaddr = dev->base_addr;
unsigned value;
SelectPage(0x42); SelectPage(0x42);
value = GetByte(XIRCREG42_SWC1) & 0xC0;
if (dev->flags & IFF_PROMISC) { /* snoop */ if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
} else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
} else if (dev->mc_count) { } else if (dev->mc_count) {
/* the chip can filter 9 addresses perfectly */ /* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, 0x01); PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40); SelectPage(0x40);
PutByte(XIRCREG40_CMD0, Offline); PutByte(XIRCREG40_CMD0, Offline);
set_addresses(dev); set_addresses(dev);
SelectPage(0x40); SelectPage(0x40);
PutByte(XIRCREG40_CMD0, EnableRecv | Online); PutByte(XIRCREG40_CMD0, EnableRecv | Online);
} else { /* standard usage */ } else { /* standard usage */
PutByte(XIRCREG42_SWC1, 0x00); PutByte(XIRCREG42_SWC1, value | 0x00);
} }
SelectPage(0); SelectPage(0);
} }
@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full)
/* enable receiver and put the mac online */ /* enable receiver and put the mac online */
if (full) { if (full) {
set_multicast_list(dev);
SelectPage(0x40); SelectPage(0x40);
PutByte(XIRCREG40_CMD0, EnableRecv | Online); PutByte(XIRCREG40_CMD0, EnableRecv | Online);
} }

View file

@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr); void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev); static void pcnet32_purge_tx_ring(struct net_device *dev);
static int pcnet32_alloc_ring(struct net_device *dev, char *name); static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
static void pcnet32_free_ring(struct net_device *dev); static void pcnet32_free_ring(struct net_device *dev);
static void pcnet32_check_media(struct net_device *dev, int verbose); static void pcnet32_check_media(struct net_device *dev, int verbose);
@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
} }
/* if any allocation fails, caller must also call pcnet32_free_ring */ /* if any allocation fails, caller must also call pcnet32_free_ring */
static int pcnet32_alloc_ring(struct net_device *dev, char *name) static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
{ {
struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_private *lp = netdev_priv(dev);

View file

@ -5,7 +5,7 @@
menuconfig PHYLIB menuconfig PHYLIB
tristate "PHY Device support and infrastructure" tristate "PHY Device support and infrastructure"
depends on !S390 depends on !S390
depends on NET_ETHERNET && (BROKEN || !S390) depends on NET_ETHERNET
help help
Ethernet controllers are usually attached to PHY Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for devices. This option provides infrastructure for

View file

@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
return 0; return 0;
} }
EXPORT_SYMBOL(get_phy_id);
/** /**
* get_phy_device - reads the specified PHY device and returns its @phy_device struct * get_phy_device - reads the specified PHY device and returns its @phy_device struct

View file

@ -980,6 +980,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
__wsum csum = 0; __wsum csum = 0;
struct udphdr *uh; struct udphdr *uh;
unsigned int len; unsigned int len;
int old_headroom;
int new_headroom;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort; goto abort;
@ -1001,16 +1003,18 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
/* Check that there's enough headroom in the skb to insert IP, /* Check that there's enough headroom in the skb to insert IP,
* UDP and L2TP and PPP headers. If not enough, expand it to * UDP and L2TP and PPP headers. If not enough, expand it to
* make room. Note that a new skb (or a clone) is * make room. Adjust truesize.
* allocated. If we return an error from this point on, make
* sure we free the new skb but do not free the original skb
* since that is done by the caller for the error case.
*/ */
headroom = NET_SKB_PAD + sizeof(struct iphdr) + headroom = NET_SKB_PAD + sizeof(struct iphdr) +
sizeof(struct udphdr) + hdr_len + sizeof(ppph); sizeof(struct udphdr) + hdr_len + sizeof(ppph);
old_headroom = skb_headroom(skb);
if (skb_cow_head(skb, headroom)) if (skb_cow_head(skb, headroom))
goto abort; goto abort;
new_headroom = skb_headroom(skb);
skb_orphan(skb);
skb->truesize += new_headroom - old_headroom;
/* Setup PPP header */ /* Setup PPP header */
__skb_push(skb, sizeof(ppph)); __skb_push(skb, sizeof(ppph));
skb->data[0] = ppph[0]; skb->data[0] = ppph[0];
@ -1065,7 +1069,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
/* Get routing info from the tunnel socket */ /* Get routing info from the tunnel socket */
dst_release(skb->dst); dst_release(skb->dst);
skb->dst = dst_clone(__sk_dst_get(sk_tun)); skb->dst = dst_clone(__sk_dst_get(sk_tun));
skb_orphan(skb);
skb->sk = sk_tun; skb->sk = sk_tun;
/* Queue the packet to IP for output */ /* Queue the packet to IP for output */

View file

@ -250,7 +250,7 @@ struct XENA_dev_config {
u64 tx_mat0_n[0x8]; u64 tx_mat0_n[0x8];
#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
u8 unused_1[0x8]; u64 xmsi_mask_reg;
u64 stat_byte_cnt; u64 stat_byte_cnt;
#define STAT_BC(n) vBIT(n,4,12) #define STAT_BC(n) vBIT(n,4,12)

View file

@ -86,7 +86,7 @@
#include "s2io.h" #include "s2io.h"
#include "s2io-regs.h" #include "s2io-regs.h"
#define DRV_VERSION "2.0.26.23" #define DRV_VERSION "2.0.26.24"
/* S2io Driver name & version. */ /* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion"; static char s2io_driver_name[] = "Neterion";
@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
struct pci_dev *tdev = NULL; struct pci_dev *tdev = NULL;
while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
if (tdev->bus == s2io_pdev->bus->parent) if (tdev->bus == s2io_pdev->bus->parent) {
pci_dev_put(tdev); pci_dev_put(tdev);
return 1; return 1;
}
} }
} }
return 0; return 0;
@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link)
TTI_DATA1_MEM_TX_URNG_B(0x10) | TTI_DATA1_MEM_TX_URNG_B(0x10) |
TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_URNG_C(0x30) |
TTI_DATA1_MEM_TX_TIMER_AC_EN; TTI_DATA1_MEM_TX_TIMER_AC_EN;
if (i == 0)
if (use_continuous_tx_intrs && (link == LINK_UP)) if (use_continuous_tx_intrs && (link == LINK_UP))
val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
writeq(val64, &bar0->tti_data1_mem); writeq(val64, &bar0->tti_data1_mem);
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | if (nic->config.intr_type == MSI_X) {
TTI_DATA2_MEM_TX_UFC_B(0x20) | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_B(0x100) |
TTI_DATA2_MEM_TX_UFC_D(0x80); TTI_DATA2_MEM_TX_UFC_C(0x200) |
TTI_DATA2_MEM_TX_UFC_D(0x300);
} else {
if ((nic->config.tx_steering_type ==
TX_DEFAULT_STEERING) &&
(config->tx_fifo_num > 1) &&
(i >= nic->udp_fifo_idx) &&
(i < (nic->udp_fifo_idx +
nic->total_udp_fifos)))
val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
TTI_DATA2_MEM_TX_UFC_B(0x80) |
TTI_DATA2_MEM_TX_UFC_C(0x100) |
TTI_DATA2_MEM_TX_UFC_D(0x120);
else
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_B(0x20) |
TTI_DATA2_MEM_TX_UFC_C(0x40) |
TTI_DATA2_MEM_TX_UFC_D(0x80);
}
writeq(val64, &bar0->tti_data2_mem); writeq(val64, &bar0->tti_data2_mem);
@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp)
} }
} }
static int s2io_chk_rx_buffers(struct ring_info *ring)
{
if (fill_rx_buffers(ring) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
return 0;
}
/** /**
* s2io_poll - Rx interrupt handler for NAPI support * s2io_poll - Rx interrupt handler for NAPI support
* @napi : pointer to the napi structure. * @napi : pointer to the napi structure.
@ -2826,57 +2854,72 @@ static void free_rx_buffers(struct s2io_nic *sp)
* 0 on success and 1 if there are No Rx packets to be processed. * 0 on success and 1 if there are No Rx packets to be processed.
*/ */
static int s2io_poll(struct napi_struct *napi, int budget) static int s2io_poll_msix(struct napi_struct *napi, int budget)
{
struct ring_info *ring = container_of(napi, struct ring_info, napi);
struct net_device *dev = ring->dev;
struct config_param *config;
struct mac_info *mac_control;
int pkts_processed = 0;
u8 *addr = NULL, val8 = 0;
struct s2io_nic *nic = dev->priv;
struct XENA_dev_config __iomem *bar0 = nic->bar0;
int budget_org = budget;
config = &nic->config;
mac_control = &nic->mac_control;
if (unlikely(!is_s2io_card_up(nic)))
return 0;
pkts_processed = rx_intr_handler(ring, budget);
s2io_chk_rx_buffers(ring);
if (pkts_processed < budget_org) {
netif_rx_complete(dev, napi);
/*Re Enable MSI-Rx Vector*/
addr = (u8 *)&bar0->xmsi_mask_reg;
addr += 7 - ring->ring_no;
val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
writeb(val8, addr);
val8 = readb(addr);
}
return pkts_processed;
}
static int s2io_poll_inta(struct napi_struct *napi, int budget)
{ {
struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
struct ring_info *ring;
struct net_device *dev = nic->dev; struct net_device *dev = nic->dev;
int pkt_cnt = 0, org_pkts_to_process;
struct mac_info *mac_control;
struct config_param *config; struct config_param *config;
struct mac_info *mac_control;
int pkts_processed = 0;
int ring_pkts_processed, i;
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
int i; int budget_org = budget;
mac_control = &nic->mac_control;
config = &nic->config; config = &nic->config;
mac_control = &nic->mac_control;
nic->pkts_to_process = budget; if (unlikely(!is_s2io_card_up(nic)))
org_pkts_to_process = nic->pkts_to_process; return 0;
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
readl(&bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
rx_intr_handler(&mac_control->rings[i]); ring = &mac_control->rings[i];
pkt_cnt = org_pkts_to_process - nic->pkts_to_process; ring_pkts_processed = rx_intr_handler(ring, budget);
if (!nic->pkts_to_process) { s2io_chk_rx_buffers(ring);
/* Quota for the current iteration has been met */ pkts_processed += ring_pkts_processed;
goto no_rx; budget -= ring_pkts_processed;
} if (budget <= 0)
}
netif_rx_complete(dev, napi);
for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
break; break;
}
} }
/* Re enable the Rx interrupts. */ if (pkts_processed < budget_org) {
writeq(0x0, &bar0->rx_traffic_mask); netif_rx_complete(dev, napi);
readl(&bar0->rx_traffic_mask); /* Re enable the Rx interrupts for the ring */
return pkt_cnt; writeq(0, &bar0->rx_traffic_mask);
readl(&bar0->rx_traffic_mask);
no_rx:
for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
break;
}
} }
return pkt_cnt; return pkts_processed;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
@ -2918,7 +2961,7 @@ static void s2io_netpoll(struct net_device *dev)
/* check for received packet and indicate up to network */ /* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++)
rx_intr_handler(&mac_control->rings[i]); rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
@ -2934,7 +2977,8 @@ static void s2io_netpoll(struct net_device *dev)
/** /**
* rx_intr_handler - Rx interrupt handler * rx_intr_handler - Rx interrupt handler
* @nic: device private variable. * @ring_info: per ring structure.
* @budget: budget for napi processing.
* Description: * Description:
* If the interrupt is because of a received frame or if the * If the interrupt is because of a received frame or if the
* receive ring contains fresh as yet un-processed frames,this function is * receive ring contains fresh as yet un-processed frames,this function is
@ -2942,15 +2986,15 @@ static void s2io_netpoll(struct net_device *dev)
* stopped and sends the skb to the OSM's Rx handler and then increments * stopped and sends the skb to the OSM's Rx handler and then increments
* the offset. * the offset.
* Return Value: * Return Value:
* NONE. * No. of napi packets processed.
*/ */
static void rx_intr_handler(struct ring_info *ring_data) static int rx_intr_handler(struct ring_info *ring_data, int budget)
{ {
int get_block, put_block; int get_block, put_block;
struct rx_curr_get_info get_info, put_info; struct rx_curr_get_info get_info, put_info;
struct RxD_t *rxdp; struct RxD_t *rxdp;
struct sk_buff *skb; struct sk_buff *skb;
int pkt_cnt = 0; int pkt_cnt = 0, napi_pkts = 0;
int i; int i;
struct RxD1* rxdp1; struct RxD1* rxdp1;
struct RxD3* rxdp3; struct RxD3* rxdp3;
@ -2977,7 +3021,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
DBG_PRINT(ERR_DBG, "%s: The skb is ", DBG_PRINT(ERR_DBG, "%s: The skb is ",
ring_data->dev->name); ring_data->dev->name);
DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
return; return 0;
} }
if (ring_data->rxd_mode == RXD_MODE_1) { if (ring_data->rxd_mode == RXD_MODE_1) {
rxdp1 = (struct RxD1*)rxdp; rxdp1 = (struct RxD1*)rxdp;
@ -3014,9 +3058,10 @@ static void rx_intr_handler(struct ring_info *ring_data)
rxdp = ring_data->rx_blocks[get_block].block_virt_addr; rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
} }
if(ring_data->nic->config.napi){ if (ring_data->nic->config.napi) {
ring_data->nic->pkts_to_process -= 1; budget--;
if (!ring_data->nic->pkts_to_process) napi_pkts++;
if (!budget)
break; break;
} }
pkt_cnt++; pkt_cnt++;
@ -3034,6 +3079,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
} }
} }
} }
return(napi_pkts);
} }
/** /**
@ -3730,14 +3776,19 @@ static void restore_xmsi_data(struct s2io_nic *nic)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64; u64 val64;
int i; int i, msix_index;
if (nic->device_type == XFRAME_I_DEVICE)
return;
for (i=0; i < MAX_REQUESTED_MSI_X; i++) { for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
msix_index = (i) ? ((i-1) * 8 + 1): 0;
writeq(nic->msix_info[i].addr, &bar0->xmsi_address); writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data); writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access); writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) { if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
continue; continue;
} }
@ -3748,13 +3799,17 @@ static void store_xmsi_data(struct s2io_nic *nic)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64, addr, data; u64 val64, addr, data;
int i; int i, msix_index;
if (nic->device_type == XFRAME_I_DEVICE)
return;
/* Store and display */ /* Store and display */
for (i=0; i < MAX_REQUESTED_MSI_X; i++) { for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
val64 = (s2BIT(15) | vBIT(i, 26, 6)); msix_index = (i) ? ((i-1) * 8 + 1): 0;
val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access); writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) { if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
continue; continue;
} }
@ -3770,11 +3825,11 @@ static void store_xmsi_data(struct s2io_nic *nic)
static int s2io_enable_msi_x(struct s2io_nic *nic) static int s2io_enable_msi_x(struct s2io_nic *nic)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 tx_mat, rx_mat; u64 rx_mat;
u16 msi_control; /* Temp variable */ u16 msi_control; /* Temp variable */
int ret, i, j, msix_indx = 1; int ret, i, j, msix_indx = 1;
nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!nic->entries) { if (!nic->entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
@ -3783,10 +3838,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
return -ENOMEM; return -ENOMEM;
} }
nic->mac_control.stats_info->sw_stat.mem_allocated nic->mac_control.stats_info->sw_stat.mem_allocated
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (nic->num_entries * sizeof(struct msix_entry));
memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
nic->s2io_entries = nic->s2io_entries =
kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!nic->s2io_entries) { if (!nic->s2io_entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
@ -3794,60 +3851,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
kfree(nic->entries); kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (nic->num_entries * sizeof(struct msix_entry));
return -ENOMEM; return -ENOMEM;
} }
nic->mac_control.stats_info->sw_stat.mem_allocated nic->mac_control.stats_info->sw_stat.mem_allocated
+= (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); += (nic->num_entries * sizeof(struct s2io_msix_entry));
memset(nic->s2io_entries, 0,
nic->num_entries * sizeof(struct s2io_msix_entry));
for (i=0; i< MAX_REQUESTED_MSI_X; i++) { nic->entries[0].entry = 0;
nic->entries[i].entry = i; nic->s2io_entries[0].entry = 0;
nic->s2io_entries[i].entry = i; nic->s2io_entries[0].in_use = MSIX_FLG;
nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
nic->s2io_entries[0].arg = &nic->mac_control.fifos;
for (i = 1; i < nic->num_entries; i++) {
nic->entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].arg = NULL; nic->s2io_entries[i].arg = NULL;
nic->s2io_entries[i].in_use = 0; nic->s2io_entries[i].in_use = 0;
} }
tx_mat = readq(&bar0->tx_mat0_n[0]);
for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
tx_mat |= TX_MAT_SET(i, msix_indx);
nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
}
writeq(tx_mat, &bar0->tx_mat0_n[0]);
rx_mat = readq(&bar0->rx_mat); rx_mat = readq(&bar0->rx_mat);
for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { for (j = 0; j < nic->config.rx_ring_num; j++) {
rx_mat |= RX_MAT_SET(j, msix_indx); rx_mat |= RX_MAT_SET(j, msix_indx);
nic->s2io_entries[msix_indx].arg nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
= &nic->mac_control.rings[j]; nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; nic->s2io_entries[j+1].in_use = MSIX_FLG;
nic->s2io_entries[msix_indx].in_use = MSIX_FLG; msix_indx += 8;
} }
writeq(rx_mat, &bar0->rx_mat); writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
nic->avail_msix_vectors = 0; ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
/* We fail init if error or we get less vectors than min required */ /* We fail init if error or we get less vectors than min required */
if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
nic->avail_msix_vectors = ret;
ret = pci_enable_msix(nic->pdev, nic->entries, ret);
}
if (ret) { if (ret) {
DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
kfree(nic->entries); kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (nic->num_entries * sizeof(struct msix_entry));
kfree(nic->s2io_entries); kfree(nic->s2io_entries);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); += (nic->num_entries * sizeof(struct s2io_msix_entry));
nic->entries = NULL; nic->entries = NULL;
nic->s2io_entries = NULL; nic->s2io_entries = NULL;
nic->avail_msix_vectors = 0;
return -ENOMEM; return -ENOMEM;
} }
if (!nic->avail_msix_vectors)
nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
/* /*
* To enable MSI-X, MSI also needs to be enabled, due to a bug * To enable MSI-X, MSI also needs to be enabled, due to a bug
@ -3919,7 +3968,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
int i; int i;
u16 msi_control; u16 msi_control;
for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].in_use == if (sp->s2io_entries[i].in_use ==
MSIX_REGISTERED_SUCCESS) { MSIX_REGISTERED_SUCCESS) {
int vector = sp->entries[i].vector; int vector = sp->entries[i].vector;
@ -3975,29 +4024,6 @@ static int s2io_open(struct net_device *dev)
netif_carrier_off(dev); netif_carrier_off(dev);
sp->last_link_state = 0; sp->last_link_state = 0;
if (sp->config.intr_type == MSI_X) {
int ret = s2io_enable_msi_x(sp);
if (!ret) {
ret = s2io_test_msi(sp);
/* rollback MSI-X, will re-enable during add_isr() */
remove_msix_isr(sp);
}
if (ret) {
DBG_PRINT(ERR_DBG,
"%s: MSI-X requested but failed to enable\n",
dev->name);
sp->config.intr_type = INTA;
}
}
/* NAPI doesn't work well with MSI(X) */
if (sp->config.intr_type != INTA) {
if(sp->config.napi)
sp->config.napi = 0;
}
/* Initialize H/W and enable interrupts */ /* Initialize H/W and enable interrupts */
err = s2io_card_up(sp); err = s2io_card_up(sp);
if (err) { if (err) {
@ -4020,12 +4046,12 @@ hw_init_failed:
if (sp->entries) { if (sp->entries) {
kfree(sp->entries); kfree(sp->entries);
sp->mac_control.stats_info->sw_stat.mem_freed sp->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (sp->num_entries * sizeof(struct msix_entry));
} }
if (sp->s2io_entries) { if (sp->s2io_entries) {
kfree(sp->s2io_entries); kfree(sp->s2io_entries);
sp->mac_control.stats_info->sw_stat.mem_freed sp->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); += (sp->num_entries * sizeof(struct s2io_msix_entry));
} }
} }
return err; return err;
@ -4327,40 +4353,64 @@ s2io_alarm_handle(unsigned long data)
mod_timer(&sp->alarm_timer, jiffies + HZ / 2); mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
} }
static int s2io_chk_rx_buffers(struct ring_info *ring)
{
if (fill_rx_buffers(ring) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
return 0;
}
static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
{ {
struct ring_info *ring = (struct ring_info *)dev_id; struct ring_info *ring = (struct ring_info *)dev_id;
struct s2io_nic *sp = ring->nic; struct s2io_nic *sp = ring->nic;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
struct net_device *dev = sp->dev;
if (!is_s2io_card_up(sp)) if (unlikely(!is_s2io_card_up(sp)))
return IRQ_HANDLED; return IRQ_HANDLED;
rx_intr_handler(ring); if (sp->config.napi) {
s2io_chk_rx_buffers(ring); u8 *addr = NULL, val8 = 0;
addr = (u8 *)&bar0->xmsi_mask_reg;
addr += (7 - ring->ring_no);
val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
writeb(val8, addr);
val8 = readb(addr);
netif_rx_schedule(dev, &ring->napi);
} else {
rx_intr_handler(ring, 0);
s2io_chk_rx_buffers(ring);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
{ {
struct fifo_info *fifo = (struct fifo_info *)dev_id; int i;
struct s2io_nic *sp = fifo->nic; struct fifo_info *fifos = (struct fifo_info *)dev_id;
struct s2io_nic *sp = fifos->nic;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
struct config_param *config = &sp->config;
u64 reason;
if (!is_s2io_card_up(sp)) if (unlikely(!is_s2io_card_up(sp)))
return IRQ_NONE;
reason = readq(&bar0->general_int_status);
if (unlikely(reason == S2IO_MINUS_ONE))
/* Nothing much can be done. Get out */
return IRQ_HANDLED; return IRQ_HANDLED;
tx_intr_handler(fifo); writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
if (reason & GEN_INTR_TXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
for (i = 0; i < config->tx_fifo_num; i++)
tx_intr_handler(&fifos[i]);
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void s2io_txpic_intr_handle(struct s2io_nic *sp) static void s2io_txpic_intr_handle(struct s2io_nic *sp)
{ {
struct XENA_dev_config __iomem *bar0 = sp->bar0; struct XENA_dev_config __iomem *bar0 = sp->bar0;
@ -4762,14 +4812,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
if (config->napi) { if (config->napi) {
if (reason & GEN_INTR_RXTRAFFIC) { if (reason & GEN_INTR_RXTRAFFIC) {
if (likely(netif_rx_schedule_prep(dev, netif_rx_schedule(dev, &sp->napi);
&sp->napi))) { writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
__netif_rx_schedule(dev, &sp->napi); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
writeq(S2IO_MINUS_ONE, readl(&bar0->rx_traffic_int);
&bar0->rx_traffic_mask);
} else
writeq(S2IO_MINUS_ONE,
&bar0->rx_traffic_int);
} }
} else { } else {
/* /*
@ -4781,7 +4827,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++)
rx_intr_handler(&mac_control->rings[i]); rx_intr_handler(&mac_control->rings[i], 0);
} }
/* /*
@ -6984,62 +7030,62 @@ static int s2io_add_isr(struct s2io_nic * sp)
/* After proper initialization of H/W, register ISR */ /* After proper initialization of H/W, register ISR */
if (sp->config.intr_type == MSI_X) { if (sp->config.intr_type == MSI_X) {
int i, msix_tx_cnt=0,msix_rx_cnt=0; int i, msix_rx_cnt = 0;
for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { if (sp->s2io_entries[i].in_use == MSIX_FLG) {
sprintf(sp->desc[i], "%s:MSI-X-%d-TX", if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle, 0,
sp->desc[i],
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
dev->name, i); dev->name, i);
err = request_irq(sp->entries[i].vector, err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle, 0, sp->desc[i], s2io_msix_fifo_handle, 0,
sp->s2io_entries[i].arg); sp->desc[i],
/* If either data or addr is zero print it */ sp->s2io_entries[i].arg);
if(!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) {
DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
"Data:0x%llx\n",sp->desc[i],
(unsigned long long)
sp->msix_info[i].addr,
(unsigned long long)
sp->msix_info[i].data);
} else {
msix_tx_cnt++;
} }
} else { /* if either data or addr is zero print it. */
sprintf(sp->desc[i], "%s:MSI-X-%d-RX", if (!(sp->msix_info[i].addr &&
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle, 0, sp->desc[i],
sp->s2io_entries[i].arg);
/* If either data or addr is zero print it */
if(!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) { sp->msix_info[i].data)) {
DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " DBG_PRINT(ERR_DBG,
"Data:0x%llx\n",sp->desc[i], "%s @Addr:0x%llx Data:0x%llx\n",
sp->desc[i],
(unsigned long long) (unsigned long long)
sp->msix_info[i].addr, sp->msix_info[i].addr,
(unsigned long long) (unsigned long long)
sp->msix_info[i].data); ntohl(sp->msix_info[i].data));
} else { } else
msix_rx_cnt++; msix_rx_cnt++;
if (err) {
remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,
"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
DBG_PRINT(ERR_DBG,
"%s: Defaulting to INTA\n",
dev->name);
sp->config.intr_type = INTA;
break;
} }
sp->s2io_entries[i].in_use =
MSIX_REGISTERED_SUCCESS;
} }
if (err) {
remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
dev->name);
sp->config.intr_type = INTA;
break;
}
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
} }
if (!err) { if (!err) {
printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
msix_tx_cnt);
printk(KERN_INFO "MSI-X-RX %d entries enabled\n", printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
msix_rx_cnt); --msix_rx_cnt);
DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
" through alarm vector\n");
} }
} }
if (sp->config.intr_type == INTA) { if (sp->config.intr_type == INTA) {
@ -7080,8 +7126,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
clear_bit(__S2IO_STATE_CARD_UP, &sp->state); clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Disable napi */ /* Disable napi */
if (config->napi) if (sp->config.napi) {
napi_disable(&sp->napi); int off = 0;
if (config->intr_type == MSI_X) {
for (; off < sp->config.rx_ring_num; off++)
napi_disable(&sp->mac_control.rings[off].napi);
}
else
napi_disable(&sp->napi);
}
/* disable Tx and Rx traffic on the NIC */ /* disable Tx and Rx traffic on the NIC */
if (do_io) if (do_io)
@ -7173,8 +7226,15 @@ static int s2io_card_up(struct s2io_nic * sp)
} }
/* Initialise napi */ /* Initialise napi */
if (config->napi) if (config->napi) {
napi_enable(&sp->napi); int i;
if (config->intr_type == MSI_X) {
for (i = 0; i < sp->config.rx_ring_num; i++)
napi_enable(&sp->mac_control.rings[i].napi);
} else {
napi_enable(&sp->napi);
}
}
/* Maintain the state prior to the open */ /* Maintain the state prior to the open */
if (sp->promisc_flg) if (sp->promisc_flg)
@ -7217,7 +7277,7 @@ static int s2io_card_up(struct s2io_nic * sp)
/* Enable select interrupts */ /* Enable select interrupts */
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
if (sp->config.intr_type != INTA) if (sp->config.intr_type != INTA)
en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
else { else {
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR; interruptible |= TX_PIC_INTR;
@ -7615,9 +7675,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
rx_ring_num = MAX_RX_RINGS; rx_ring_num = MAX_RX_RINGS;
} }
if (*dev_intr_type != INTA)
napi = 0;
if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
"Defaulting to INTA\n"); "Defaulting to INTA\n");
@ -7918,8 +7975,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
* will use eth_mac_addr() for dev->set_mac_address * will use eth_mac_addr() for dev->set_mac_address
* mac address will be set every time dev->open() is called * mac address will be set every time dev->open() is called
*/ */
netif_napi_add(dev, &sp->napi, s2io_poll, 32);
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = s2io_netpoll; dev->poll_controller = s2io_netpoll;
#endif #endif
@ -7963,6 +8018,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
} }
} }
if (sp->config.intr_type == MSI_X) {
sp->num_entries = config->rx_ring_num + 1;
ret = s2io_enable_msi_x(sp);
if (!ret) {
ret = s2io_test_msi(sp);
/* rollback MSI-X, will re-enable during add_isr() */
remove_msix_isr(sp);
}
if (ret) {
DBG_PRINT(ERR_DBG,
"%s: MSI-X requested but failed to enable\n",
dev->name);
sp->config.intr_type = INTA;
}
}
if (config->intr_type == MSI_X) {
for (i = 0; i < config->rx_ring_num ; i++)
netif_napi_add(dev, &mac_control->rings[i].napi,
s2io_poll_msix, 64);
} else {
netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
}
/* Not needed for Herc */ /* Not needed for Herc */
if (sp->device_type & XFRAME_I_DEVICE) { if (sp->device_type & XFRAME_I_DEVICE) {
/* /*
@ -8013,6 +8094,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* store mac addresses from CAM to s2io_nic structure */ /* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp); do_s2io_store_unicast_mc(sp);
/* Configure MSIX vector for number of rings configured plus one */
if ((sp->device_type == XFRAME_II_DEVICE) &&
(config->intr_type == MSI_X))
sp->num_entries = config->rx_ring_num + 1;
/* Store the values of the MSIX table in the s2io_nic structure */ /* Store the values of the MSIX table in the s2io_nic structure */
store_xmsi_data(sp); store_xmsi_data(sp);
/* reset Nic and bring it to known state */ /* reset Nic and bring it to known state */
@ -8078,8 +8164,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
break; break;
} }
if (napi) switch (sp->config.napi) {
case 0:
DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
break;
case 1:
DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
break;
}
DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
sp->config.tx_fifo_num); sp->config.tx_fifo_num);

View file

@ -706,7 +706,7 @@ struct ring_info {
/* per-ring buffer counter */ /* per-ring buffer counter */
u32 rx_bufs_left; u32 rx_bufs_left;
#define MAX_LRO_SESSIONS 32 #define MAX_LRO_SESSIONS 32
struct lro lro0_n[MAX_LRO_SESSIONS]; struct lro lro0_n[MAX_LRO_SESSIONS];
u8 lro; u8 lro;
@ -725,6 +725,11 @@ struct ring_info {
/* copy of sp->pdev pointer */ /* copy of sp->pdev pointer */
struct pci_dev *pdev; struct pci_dev *pdev;
/* Per ring napi struct */
struct napi_struct napi;
unsigned long interrupt_count;
/* /*
* Place holders for the virtual and physical addresses of * Place holders for the virtual and physical addresses of
* all the Rx Blocks * all the Rx Blocks
@ -841,7 +846,7 @@ struct usr_addr {
* Structure to keep track of the MSI-X vectors and the corresponding * Structure to keep track of the MSI-X vectors and the corresponding
* argument registered against each vector * argument registered against each vector
*/ */
#define MAX_REQUESTED_MSI_X 17 #define MAX_REQUESTED_MSI_X 9
struct s2io_msix_entry struct s2io_msix_entry
{ {
u16 vector; u16 vector;
@ -849,8 +854,8 @@ struct s2io_msix_entry
void *arg; void *arg;
u8 type; u8 type;
#define MSIX_FIFO_TYPE 1 #define MSIX_ALARM_TYPE 1
#define MSIX_RING_TYPE 2 #define MSIX_RING_TYPE 2
u8 in_use; u8 in_use;
#define MSIX_REGISTERED_SUCCESS 0xAA #define MSIX_REGISTERED_SUCCESS 0xAA
@ -877,7 +882,6 @@ struct s2io_nic {
*/ */
int pkts_to_process; int pkts_to_process;
struct net_device *dev; struct net_device *dev;
struct napi_struct napi;
struct mac_info mac_control; struct mac_info mac_control;
struct config_param config; struct config_param config;
struct pci_dev *pdev; struct pci_dev *pdev;
@ -948,6 +952,7 @@ struct s2io_nic {
*/ */
u8 other_fifo_idx; u8 other_fifo_idx;
struct napi_struct napi;
/* after blink, the adapter must be restored with original /* after blink, the adapter must be restored with original
* values. * values.
*/ */
@ -962,6 +967,7 @@ struct s2io_nic {
unsigned long long start_time; unsigned long long start_time;
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
#define MSIX_FLG 0xA5 #define MSIX_FLG 0xA5
int num_entries;
struct msix_entry *entries; struct msix_entry *entries;
int msi_detected; int msi_detected;
wait_queue_head_t msi_wait; wait_queue_head_t msi_wait;
@ -982,6 +988,7 @@ struct s2io_nic {
u16 lro_max_aggr_per_sess; u16 lro_max_aggr_per_sess;
volatile unsigned long state; volatile unsigned long state;
u64 general_int_mask; u64 general_int_mask;
#define VPD_STRING_LEN 80 #define VPD_STRING_LEN 80
u8 product_name[VPD_STRING_LEN]; u8 product_name[VPD_STRING_LEN];
u8 serial_num[VPD_STRING_LEN]; u8 serial_num[VPD_STRING_LEN];
@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
static int init_shared_mem(struct s2io_nic *sp); static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp); static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic); static int init_nic(struct s2io_nic *nic);
static void rx_intr_handler(struct ring_info *ring_data); static int rx_intr_handler(struct ring_info *ring_data, int budget);
static void tx_intr_handler(struct fifo_info *fifo_data); static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id); static void s2io_handle_errors(void * dev_id);
@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link); static void s2io_link(struct s2io_nic * sp, int link);
static void s2io_reset(struct s2io_nic * sp); static void s2io_reset(struct s2io_nic * sp);
static int s2io_poll(struct napi_struct *napi, int budget); static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp); static void s2io_init_pci(struct s2io_nic * sp);
static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
static void s2io_alarm_handle(unsigned long data); static void s2io_alarm_handle(unsigned long data);

View file

@ -179,8 +179,7 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256 #define SBMAC_MAX_RXDESCR 256
#define ETHER_ALIGN 2 #define ETHER_ADDR_LEN 6
#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518 #define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */ /*#define ENET_PACKET_SIZE 9216 */
@ -262,8 +261,6 @@ struct sbmac_softc {
spinlock_t sbm_lock; /* spin lock */ spinlock_t sbm_lock; /* spin lock */
int sbm_devflags; /* current device flags */ int sbm_devflags; /* current device flags */
int sbm_buffersize;
/* /*
* Controller-specific things * Controller-specific things
*/ */
@ -305,10 +302,11 @@ struct sbmac_softc {
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
int txrx, int maxdescr); int txrx, int maxdescr);
static void sbdma_channel_start(struct sbmacdma *d, int rxtx); static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
struct sk_buff *m);
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
static void sbdma_emptyring(struct sbmacdma *d); static void sbdma_emptyring(struct sbmacdma *d);
static void sbdma_fillring(struct sbmacdma *d); static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int work_to_do, int poll); int work_to_do, int poll);
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d)
d->sbdma_remptr = NULL; d->sbdma_remptr = NULL;
} }
static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) static inline void sbdma_align_skb(struct sk_buff *skb,
unsigned int power2, unsigned int offset)
{ {
unsigned long addr; unsigned char *addr = skb->data;
unsigned long newaddr; unsigned char *newaddr = PTR_ALIGN(addr, power2);
addr = (unsigned long) skb->data; skb_reserve(skb, newaddr - addr + offset);
newaddr = (addr + power2 - 1) & ~(power2 - 1);
skb_reserve(skb,newaddr-addr+offset);
} }
@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
* this queues a buffer for inbound packets. * this queues a buffer for inbound packets.
* *
* Input parameters: * Input parameters:
* d - DMA channel descriptor * sc - softc structure
* d - DMA channel descriptor
* sb - sk_buff to add, or NULL if we should allocate one * sb - sk_buff to add, or NULL if we should allocate one
* *
* Return value: * Return value:
@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
********************************************************************* */ ********************************************************************* */
static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
struct sk_buff *sb)
{ {
struct net_device *dev = sc->sbm_dev;
struct sbdmadscr *dsc; struct sbdmadscr *dsc;
struct sbdmadscr *nextdsc; struct sbdmadscr *nextdsc;
struct sk_buff *sb_new = NULL; struct sk_buff *sb_new = NULL;
@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
*/ */
if (sb == NULL) { if (sb == NULL) {
sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
if (sb_new == NULL) { if (sb_new == NULL) {
pr_info("%s: sk_buff allocation failed\n", pr_info("%s: sk_buff allocation failed\n",
d->sbdma_eth->sbm_dev->name); d->sbdma_eth->sbm_dev->name);
return -ENOBUFS; return -ENOBUFS;
} }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
} }
else { else {
sb_new = sb; sb_new = sb;
@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
* Do not interrupt per DMA transfer. * Do not interrupt per DMA transfer.
*/ */
dsc->dscr_a = virt_to_phys(sb_new->data) | dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
#else #else
dsc->dscr_a = virt_to_phys(sb_new->data) | dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
M_DMA_DSCRA_INTERRUPT; M_DMA_DSCRA_INTERRUPT;
#endif #endif
@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d)
* with sk_buffs * with sk_buffs
* *
* Input parameters: * Input parameters:
* d - DMA channel * sc - softc structure
* d - DMA channel
* *
* Return value: * Return value:
* nothing * nothing
********************************************************************* */ ********************************************************************* */
static void sbdma_fillring(struct sbmacdma *d) static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
{ {
int idx; int idx;
for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
if (sbdma_add_rcvbuffer(d,NULL) != 0) if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
break; break;
} }
} }
@ -1159,10 +1160,11 @@ again:
* packet and put it right back on the receive ring. * packet and put it right back on the receive ring.
*/ */
if (unlikely (sbdma_add_rcvbuffer(d,NULL) == if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
-ENOBUFS)) { -ENOBUFS)) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ /* Re-add old buffer */
sbdma_add_rcvbuffer(sc, d, sb);
/* No point in continuing at the moment */ /* No point in continuing at the moment */
printk(KERN_ERR "dropped packet (1)\n"); printk(KERN_ERR "dropped packet (1)\n");
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
@ -1212,7 +1214,7 @@ again:
* put it back on the receive ring. * put it back on the receive ring.
*/ */
dev->stats.rx_errors++; dev->stats.rx_errors++;
sbdma_add_rcvbuffer(d,sb); sbdma_add_rcvbuffer(sc, d, sb);
} }
@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* Fill the receive ring * Fill the receive ring
*/ */
sbdma_fillring(&(s->sbm_rxdma)); sbdma_fillring(s, &(s->sbm_rxdma));
/* /*
* Turn on the rest of the bits in the enable register * Turn on the rest of the bits in the enable register
@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev->dev_addr[i] = eaddr[i]; dev->dev_addr[i] = eaddr[i];
} }
/*
* Init packet size
*/
sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
/* /*
* Initialize context (get pointers to registers and stuff), then * Initialize context (get pointers to registers and stuff), then
* allocate the memory for the descriptor tables. * allocate the memory for the descriptor tables.

View file

@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned entry; unsigned entry;
u32 tx_status; u32 tx_status;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
if (unlikely(skb->len > TX_BUF_SIZE)) { if (unlikely(skb->len > TX_BUF_SIZE)) {
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
goto out; goto out;
@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
len = skb->len; len = skb->len;
if (unlikely(len < ETH_ZLEN)) {
memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
0, ETH_ZLEN - len);
len = ETH_ZLEN;
}
wmb(); wmb();

View file

@ -483,7 +483,7 @@ typedef union efx_oword {
#endif #endif
#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
if (FALCON_REV(efx) >= FALCON_REV_B0) { \ if (falcon_rev(efx) >= FALCON_REV_B0) { \
EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
} else { \ } else { \
EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
@ -491,7 +491,7 @@ typedef union efx_oword {
} while (0) } while (0)
#define EFX_QWORD_FIELD_VER(efx, qword, field) \ #define EFX_QWORD_FIELD_VER(efx, qword, field) \
(FALCON_REV(efx) >= FALCON_REV_B0 ? \ (falcon_rev(efx) >= FALCON_REV_B0 ? \
EFX_QWORD_FIELD((qword), field##_B0) : \ EFX_QWORD_FIELD((qword), field##_B0) : \
EFX_QWORD_FIELD((qword), field##_A1)) EFX_QWORD_FIELD((qword), field##_A1))
@ -501,8 +501,5 @@ typedef union efx_oword {
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EFX_DMA_TYPE_WIDTH(width) \ #define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
~((u64) 0) : ~((u32) 0))
#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
#endif /* EFX_BITFIELD_H */ #endif /* EFX_BITFIELD_H */

View file

@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
struct efx_blinker *bl = &efx->board_info.blinker; struct efx_blinker *bl = &efx->board_info.blinker;
efx->board_info.set_fault_led(efx, bl->state); efx->board_info.set_fault_led(efx, bl->state);
bl->state = !bl->state; bl->state = !bl->state;
if (bl->resubmit) { if (bl->resubmit)
bl->timer.expires = jiffies + BLINK_INTERVAL; mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
add_timer(&bl->timer);
}
} }
static void board_blink(struct efx_nic *efx, int blink) static void board_blink(struct efx_nic *efx, int blink)
@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
blinker->state = 0; blinker->state = 0;
setup_timer(&blinker->timer, blink_led_timer, setup_timer(&blinker->timer, blink_led_timer,
(unsigned long)efx); (unsigned long)efx);
blinker->timer.expires = jiffies + BLINK_INTERVAL; mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
add_timer(&blinker->timer);
} else { } else {
blinker->resubmit = 0; blinker->resubmit = 0;
if (blinker->timer.function) if (blinker->timer.function)

View file

@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
*/ */
static inline void efx_channel_processed(struct efx_channel *channel) static inline void efx_channel_processed(struct efx_channel *channel)
{ {
/* Write to EVQ_RPTR_REG. If a new event arrived in a race /* The interrupt handler for this channel may set work_pending
* with finishing processing, a new interrupt will be raised. * as soon as we acknowledge the events we've seen. Make sure
*/ * it's cleared before then. */
channel->work_pending = 0; channel->work_pending = 0;
smp_wmb(); /* Ensure channel updated before any new interrupt. */ smp_wmb();
falcon_eventq_read_ack(channel); falcon_eventq_read_ack(channel);
} }
@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
napi_disable(&channel->napi_str); napi_disable(&channel->napi_str);
/* Poll the channel */ /* Poll the channel */
(void) efx_process_channel(channel, efx->type->evq_size); efx_process_channel(channel, efx->type->evq_size);
/* Ack the eventq. This may cause an interrupt to be generated /* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */ * when they are reenabled */
@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
* *
*************************************************************************/ *************************************************************************/
/* Setup per-NIC RX buffer parameters.
* Calculate the rx buffer allocation parameters required to support
* the current MTU, including padding for header alignment and overruns.
*/
static void efx_calc_rx_buffer_params(struct efx_nic *efx)
{
unsigned int order, len;
len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
/* Calculate page-order */
for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
;
efx->rx_buffer_len = len;
efx->rx_buffer_order = order;
}
static int efx_probe_channel(struct efx_channel *channel) static int efx_probe_channel(struct efx_channel *channel)
{ {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
struct efx_channel *channel; struct efx_channel *channel;
int rc = 0; int rc = 0;
efx_calc_rx_buffer_params(efx); /* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
efx->rx_buffer_order = get_order(efx->rx_buffer_len);
/* Initialise the channels */ /* Initialise the channels */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
netif_napi_add(channel->napi_dev, &channel->napi_str, netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight); efx_poll, napi_weight);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set. */
channel->work_pending = 0; channel->work_pending = 0;
channel->enabled = 1; channel->enabled = 1;
smp_wmb(); /* ensure channel updated before first interrupt */ smp_wmb();
napi_enable(&channel->napi_str); napi_enable(&channel->napi_str);
@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */ /* Serialise against efx_set_multicast_list() */
if (NET_DEV_REGISTERED(efx)) { if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
} }
@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
efx->membase = ioremap_nocache(efx->membase_phys, efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size); efx->type->mem_map_size);
if (!efx->membase) { if (!efx->membase) {
EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
efx->type->mem_bar, efx->membase_phys, efx->type->mem_bar,
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size); efx->type->mem_map_size);
rc = -ENOMEM; rc = -ENOMEM;
goto fail4; goto fail4;
} }
EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, efx->type->mem_bar, (unsigned long long)efx->membase_phys,
efx->membase); efx->type->mem_map_size, efx->membase);
return 0; return 0;
fail4: fail4:
release_mem_region(efx->membase_phys, efx->type->mem_map_size); release_mem_region(efx->membase_phys, efx->type->mem_map_size);
fail3: fail3:
efx->membase_phys = 0UL; efx->membase_phys = 0;
fail2: fail2:
pci_disable_device(efx->pci_dev); pci_disable_device(efx->pci_dev);
fail1: fail1:
@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
if (efx->membase_phys) { if (efx->membase_phys) {
pci_release_region(efx->pci_dev, efx->type->mem_bar); pci_release_region(efx->pci_dev, efx->type->mem_bar);
efx->membase_phys = 0UL; efx->membase_phys = 0;
} }
pci_disable_device(efx->pci_dev); pci_disable_device(efx->pci_dev);
@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
return; return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return; return;
if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
return; return;
/* Mark the port as enabled so port reconfigurations can start, then /* Mark the port as enabled so port reconfigurations can start, then
@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->monitor_work); cancel_delayed_work_sync(&efx->monitor_work);
/* Ensure that all RX slow refills are complete. */ /* Ensure that all RX slow refills are complete. */
efx_for_each_rx_queue(rx_queue, efx) { efx_for_each_rx_queue(rx_queue, efx)
cancel_delayed_work_sync(&rx_queue->work); cancel_delayed_work_sync(&rx_queue->work);
}
/* Stop scheduled port reconfigurations */ /* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->reconfigure_work); cancel_work_sync(&efx->reconfigure_work);
@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
falcon_disable_interrupts(efx); falcon_disable_interrupts(efx);
if (efx->legacy_irq) if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq); synchronize_irq(efx->legacy_irq);
efx_for_each_channel_with_interrupt(channel, efx) efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq) if (channel->irq)
synchronize_irq(channel->irq); synchronize_irq(channel->irq);
}
/* Stop all NAPI processing and synchronous rx refills */ /* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx) efx_for_each_channel(channel, efx)
@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog /* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */ * timer isn't ticking over the flush */
efx_stop_queue(efx); efx_stop_queue(efx);
if (NET_DEV_REGISTERED(efx)) { if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
} }
@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
return 0; return 0;
} }
/* Context: process, dev_base_lock held, non-blocking. */ /* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev) static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{ {
struct efx_nic *efx = net_dev->priv; struct efx_nic *efx = net_dev->priv;
struct efx_mac_stats *mac_stats = &efx->mac_stats; struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct net_device_stats *stats = &net_dev->stats; struct net_device_stats *stats = &net_dev->stats;
/* Update stats if possible, but do not wait if another thread
* is updating them (or resetting the NIC); slightly stale
* stats are acceptable.
*/
if (!spin_trylock(&efx->stats_lock)) if (!spin_trylock(&efx->stats_lock))
return stats; return stats;
if (efx->state == STATE_RUNNING) { if (efx->state == STATE_RUNNING) {
@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static int efx_netdev_event(struct notifier_block *this, static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
struct net_device *net_dev = (struct net_device *)ptr; struct net_device *net_dev = ptr;
if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
struct efx_nic *efx = net_dev->priv; struct efx_nic *efx = net_dev->priv;
@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
efx_for_each_tx_queue(tx_queue, efx) efx_for_each_tx_queue(tx_queue, efx)
efx_release_tx_buffers(tx_queue); efx_release_tx_buffers(tx_queue);
if (NET_DEV_REGISTERED(efx)) { if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
unregister_netdev(efx->net_dev); unregister_netdev(efx->net_dev);
} }
@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
if (method == RESET_TYPE_DISABLE) { if (method == RESET_TYPE_DISABLE) {
/* Reinitialise the device anyway so the driver unload sequence /* Reinitialise the device anyway so the driver unload sequence
* can talk to the external SRAM */ * can talk to the external SRAM */
(void) falcon_init_nic(efx); falcon_init_nic(efx);
rc = -EIO; rc = -EIO;
goto fail4; goto fail4;
} }

View file

@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
************************************************************************** **************************************************************************
*/ */
/* DMA address mask (up to 46-bit, avoiding compiler warnings) /* DMA address mask */
* #define FALCON_DMA_MASK DMA_BIT_MASK(46)
* Note that it is possible to have a platform with 64-bit longs and
* 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
* platform DMA mask.
*/
#if BITS_PER_LONG == 64
#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
#else
#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
#endif
/* TX DMA length mask (13-bit) */ /* TX DMA length mask (13-bit) */
#define FALCON_TX_DMA_MASK (4096 - 1) #define FALCON_TX_DMA_MASK (4096 - 1)
@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
#define PCI_EXP_LNKSTA_LNK_WID_LBN 4 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4
#define FALCON_IS_DUAL_FUNC(efx) \ #define FALCON_IS_DUAL_FUNC(efx) \
(FALCON_REV(efx) < FALCON_REV_B0) (falcon_rev(efx) < FALCON_REV_B0)
/************************************************************************** /**************************************************************************
* *
@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
TX_DESCQ_TYPE, 0, TX_DESCQ_TYPE, 0,
TX_NON_IP_DROP_DIS_B0, 1); TX_NON_IP_DROP_DIS_B0, 1);
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue); tx_queue->queue);
if (FALCON_REV(efx) < FALCON_REV_B0) { if (falcon_rev(efx) < FALCON_REV_B0) {
efx_oword_t reg; efx_oword_t reg;
BUG_ON(tx_queue->queue >= 128); /* HW limit */ BUG_ON(tx_queue->queue >= 128); /* HW limit */
@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
efx_oword_t rx_desc_ptr; efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
int rc; int rc;
int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
int iscsi_digest_en = is_b0; int iscsi_digest_en = is_b0;
EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label]; tx_queue = &efx->tx_queue[tx_ev_q_label];
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev); netif_tx_lock(efx->net_dev);
falcon_notify_tx_desc(tx_queue); falcon_notify_tx_desc(tx_queue);
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_unlock(efx->net_dev); netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) { EFX_WORKAROUND_10727(efx)) {
@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
RX_EV_TCP_UDP_CHKSUM_ERR); RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
EFX_QWORD_FIELD(*event, XG_PHY_INTR)) EFX_QWORD_FIELD(*event, XG_PHY_INTR))
is_phy_event = 1; is_phy_event = 1;
if ((FALCON_REV(efx) >= FALCON_REV_B0) && if ((falcon_rev(efx) >= FALCON_REV_B0) &&
EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
is_phy_event = 1; is_phy_event = 1;
@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
{ {
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr; efx_oword_t fatal_intr;
int error, mem_perr; int error, mem_perr;
static int n_int_errors; static int n_int_errors;
@ -1451,8 +1442,8 @@ out:
*/ */
static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
{ {
struct efx_nic *efx = (struct efx_nic *)dev_id; struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel; struct efx_channel *channel;
efx_dword_t reg; efx_dword_t reg;
u32 queues; u32 queues;
@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{ {
struct efx_nic *efx = (struct efx_nic *)dev_id; struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel; struct efx_channel *channel;
int syserr; int syserr;
int queues; int queues;
@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*/ */
static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
{ {
struct efx_channel *channel = (struct efx_channel *)dev_id; struct efx_channel *channel = dev_id;
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
int syserr; int syserr;
efx->last_irq_cpu = raw_smp_processor_id(); efx->last_irq_cpu = raw_smp_processor_id();
@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
unsigned long offset; unsigned long offset;
efx_dword_t dword; efx_dword_t dword;
if (FALCON_REV(efx) < FALCON_REV_B0) if (falcon_rev(efx) < FALCON_REV_B0)
return; return;
for (offset = RX_RSS_INDIR_TBL_B0; for (offset = RX_RSS_INDIR_TBL_B0;
@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
if (!EFX_INT_MODE_USE_MSI(efx)) { if (!EFX_INT_MODE_USE_MSI(efx)) {
irq_handler_t handler; irq_handler_t handler;
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
handler = falcon_legacy_interrupt_b0; handler = falcon_legacy_interrupt_b0;
else else
handler = falcon_legacy_interrupt_a1; handler = falcon_legacy_interrupt_a1;
@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
efx_oword_t reg; efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */ /* Disable MSI/MSI-X interrupts */
efx_for_each_channel_with_interrupt(channel, efx) efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq) if (channel->irq)
free_irq(channel->irq, channel); free_irq(channel->irq, channel);
}
/* ACK legacy interrupt */ /* ACK legacy interrupt */
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
falcon_read(efx, &reg, INT_ISR0_B0); falcon_read(efx, &reg, INT_ISR0_B0);
else else
falcon_irq_ack_a1(efx); falcon_irq_ack_a1(efx);
@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
efx_oword_t temp; efx_oword_t temp;
int count; int count;
if ((FALCON_REV(efx) < FALCON_REV_B0) || if ((falcon_rev(efx) < FALCON_REV_B0) ||
(efx->loopback_mode != LOOPBACK_NONE)) (efx->loopback_mode != LOOPBACK_NONE))
return; return;
@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
{ {
efx_oword_t temp; efx_oword_t temp;
if (FALCON_REV(efx) < FALCON_REV_B0) if (falcon_rev(efx) < FALCON_REV_B0)
return; return;
/* Isolate the MAC -> RX */ /* Isolate the MAC -> RX */
@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
MAC_SPEED, link_speed); MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get /* On B0, MAC backpressure can be disabled and packets get
* discarded. */ * discarded. */
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
!efx->link_up); !efx->link_up);
} }
@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
/* Unisolate the MAC -> RX */ /* Unisolate the MAC -> RX */
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
falcon_write(efx, &reg, RX_CFG_REG_KER); falcon_write(efx, &reg, RX_CFG_REG_KER);
} }
@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
return 0; return 0;
/* Statistics fetch will fail if the MAC is in TX drain */ /* Statistics fetch will fail if the MAC is in TX drain */
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
efx_oword_t temp; efx_oword_t temp;
falcon_read(efx, &temp, MAC0_CTRL_REG_KER); falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
static void falcon_mdio_write(struct net_device *net_dev, int phy_id, static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
int addr, int value) int addr, int value)
{ {
struct efx_nic *efx = (struct efx_nic *)net_dev->priv; struct efx_nic *efx = net_dev->priv;
unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg; efx_oword_t reg;
@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
* could be read, -1 will be returned. */ * could be read, -1 will be returned. */
static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
{ {
struct efx_nic *efx = (struct efx_nic *)net_dev->priv; struct efx_nic *efx = net_dev->priv;
unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg; efx_oword_t reg;
int value = -1; int value = -1;
@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx)
falcon_init_mdio(&efx->mii); falcon_init_mdio(&efx->mii);
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
efx->flow_control = EFX_FC_RX | EFX_FC_TX; efx->flow_control = EFX_FC_RX | EFX_FC_TX;
else else
efx->flow_control = EFX_FC_RX; efx->flow_control = EFX_FC_RX;
@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
return -ENODEV; return -ENODEV;
} }
switch (FALCON_REV(efx)) { switch (falcon_rev(efx)) {
case FALCON_REV_A0: case FALCON_REV_A0:
case 0xff: case 0xff:
EFX_ERR(efx, "Falcon rev A0 not supported\n"); EFX_ERR(efx, "Falcon rev A0 not supported\n");
@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
break; break;
default: default:
EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
return -ENODEV; return -ENODEV;
} }
@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Allocate storage for hardware specific data */ /* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
efx->nic_data = (void *) nic_data; efx->nic_data = nic_data;
/* Determine number of ports etc. */ /* Determine number of ports etc. */
rc = falcon_probe_nic_variant(efx); rc = falcon_probe_nic_variant(efx);
@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx)
*/ */
int falcon_init_nic(struct efx_nic *efx) int falcon_init_nic(struct efx_nic *efx)
{ {
struct falcon_nic_data *data;
efx_oword_t temp; efx_oword_t temp;
unsigned thresh; unsigned thresh;
int rc; int rc;
data = (struct falcon_nic_data *)efx->nic_data;
/* Set up the address region register. This is only needed /* Set up the address region register. This is only needed
* for the B0 FPGA, but since we are just pushing in the * for the B0 FPGA, but since we are just pushing in the
* reset defaults this may as well be unconditional. */ * reset defaults this may as well be unconditional. */
@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Set number of RSS queues for receive path. */ /* Set number of RSS queues for receive path. */
falcon_read(efx, &temp, RX_FILTER_CTL_REG); falcon_read(efx, &temp, RX_FILTER_CTL_REG);
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
else else
EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Prefetch threshold 2 => fetch when descriptor cache half empty */ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
/* Squash TX of packets of 16 bytes or less */ /* Squash TX of packets of 16 bytes or less */
if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
falcon_write(efx, &temp, TX_CFG2_REG_KER); falcon_write(efx, &temp, TX_CFG2_REG_KER);
@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx)
if (EFX_WORKAROUND_7575(efx)) if (EFX_WORKAROUND_7575(efx))
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
(3 * 4096) / 32); (3 * 4096) / 32);
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
/* RX FIFO flow control thresholds */ /* RX FIFO flow control thresholds */
@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx)
falcon_write(efx, &temp, RX_CFG_REG_KER); falcon_write(efx, &temp, RX_CFG_REG_KER);
/* Set destination of both TX and RX Flush events */ /* Set destination of both TX and RX Flush events */
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
falcon_write(efx, &temp, DP_CTRL_REG); falcon_write(efx, &temp, DP_CTRL_REG);
} }
@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx)
falcon_free_buffer(efx, &efx->irq_status); falcon_free_buffer(efx, &efx->irq_status);
(void) falcon_reset_hw(efx, RESET_TYPE_ALL); falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */ /* Release the second function after the reset */
if (nic_data->pci_dev2) { if (nic_data->pci_dev2) {

View file

@ -23,7 +23,10 @@ enum falcon_revision {
FALCON_REV_B0 = 2, FALCON_REV_B0 = 2,
}; };
#define FALCON_REV(efx) ((efx)->pci_dev->revision) static inline int falcon_rev(struct efx_nic *efx)
{
return efx->pci_dev->revision;
}
extern struct efx_nic_type falcon_a_nic_type; extern struct efx_nic_type falcon_a_nic_type;
extern struct efx_nic_type falcon_b_nic_type; extern struct efx_nic_type falcon_b_nic_type;

View file

@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
u8 port1_phy_type; u8 port1_phy_type;
__le16 asic_sub_revision; __le16 asic_sub_revision;
__le16 board_revision; __le16 board_revision;
} __attribute__ ((packed)); } __packed;
#define NVCONFIG_BASE 0x300 #define NVCONFIG_BASE 0x300
#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
__le16 board_struct_ver; __le16 board_struct_ver;
__le16 board_checksum; __le16 board_checksum;
struct falcon_nvconfig_board_v2 board_v2; struct falcon_nvconfig_board_v2 board_v2;
} __attribute__ ((packed)); } __packed;
#endif /* EFX_FALCON_HWDEFS_H */ #endif /* EFX_FALCON_HWDEFS_H */

View file

@ -56,14 +56,27 @@
#define FALCON_USE_QWORD_IO 1 #define FALCON_USE_QWORD_IO 1
#endif #endif
#define _falcon_writeq(efx, value, reg) \ #ifdef FALCON_USE_QWORD_IO
__raw_writeq((__force u64) (value), (efx)->membase + (reg)) static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
#define _falcon_writel(efx, value, reg) \ unsigned int reg)
__raw_writel((__force u32) (value), (efx)->membase + (reg)) {
#define _falcon_readq(efx, reg) \ __raw_writeq((__force u64)value, efx->membase + reg);
((__force __le64) __raw_readq((efx)->membase + (reg))) }
#define _falcon_readl(efx, reg) \ static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
((__force __le32) __raw_readl((efx)->membase + (reg))) {
return (__force __le64)__raw_readq(efx->membase + reg);
}
#endif
static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
unsigned int reg)
{
__raw_writel((__force u32)value, efx->membase + reg);
}
static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
{
return (__force __le32)__raw_readl(efx->membase + reg);
}
/* Writes to a normal 16-byte Falcon register, locking as appropriate. */ /* Writes to a normal 16-byte Falcon register, locking as appropriate. */
static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,

View file

@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
{ {
efx_dword_t reg; efx_dword_t reg;
if (FALCON_REV(efx) < FALCON_REV_B0) if (falcon_rev(efx) < FALCON_REV_B0)
return 1; return 1;
/* The ISR latches, so clear it and re-read */ /* The ISR latches, so clear it and re-read */
@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
{ {
efx_dword_t reg; efx_dword_t reg;
if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
return; return;
/* Flush the ISR */ /* Flush the ISR */
@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
__func__, tries); __func__, tries);
(void) falcon_reset_xaui(efx); falcon_reset_xaui(efx);
udelay(200); udelay(200);
tries--; tries--;
} }
@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
xaui_link_ok = falcon_xaui_link_ok(efx); xaui_link_ok = falcon_xaui_link_ok(efx);
if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
(void) falcon_reset_xaui(efx); falcon_reset_xaui(efx);
/* Call the PHY check_hw routine */ /* Call the PHY check_hw routine */
rc = efx->phy_op->check_hw(efx); rc = efx->phy_op->check_hw(efx);
@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
reset = ((flow_control & EFX_FC_TX) && reset = ((flow_control & EFX_FC_TX) &&
!(efx->flow_control & EFX_FC_TX)); !(efx->flow_control & EFX_FC_TX));
if (EFX_WORKAROUND_11482(efx) && reset) { if (EFX_WORKAROUND_11482(efx) && reset) {
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
/* Recover by resetting the EM block */ /* Recover by resetting the EM block */
if (efx->link_up) if (efx->link_up)
falcon_drain_tx_fifo(efx); falcon_drain_tx_fifo(efx);

View file

@ -42,7 +42,7 @@
#ifndef EFX_DRIVER_NAME #ifndef EFX_DRIVER_NAME
#define EFX_DRIVER_NAME "sfc" #define EFX_DRIVER_NAME "sfc"
#endif #endif
#define EFX_DRIVER_VERSION "2.2.0136" #define EFX_DRIVER_VERSION "2.2"
#ifdef EFX_ENABLE_DEBUG #ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@ -52,28 +52,19 @@
#define EFX_WARN_ON_PARANOID(x) do {} while (0) #define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif #endif
#define NET_DEV_REGISTERED(efx) \
((efx)->net_dev->reg_state == NETREG_REGISTERED)
/* Include net device name in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
/* Un-rate-limited logging */ /* Un-rate-limited logging */
#define EFX_ERR(efx, fmt, args...) \ #define EFX_ERR(efx, fmt, args...) \
dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
#define EFX_INFO(efx, fmt, args...) \ #define EFX_INFO(efx, fmt, args...) \
dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
#ifdef EFX_ENABLE_DEBUG #ifdef EFX_ENABLE_DEBUG
#define EFX_LOG(efx, fmt, args...) \ #define EFX_LOG(efx, fmt, args...) \
dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
#else #else
#define EFX_LOG(efx, fmt, args...) \ #define EFX_LOG(efx, fmt, args...) \
dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
#endif #endif
#define EFX_TRACE(efx, fmt, args...) do {} while (0) #define EFX_TRACE(efx, fmt, args...) do {} while (0)
@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
#define EFX_LOG_RL(efx, fmt, args...) \ #define EFX_LOG_RL(efx, fmt, args...) \
do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
/* Kernel headers may redefine inline anyway */
#ifndef inline
#define inline inline __attribute__ ((always_inline))
#endif
/************************************************************************** /**************************************************************************
* *
* Efx data structures * Efx data structures
@ -695,7 +681,7 @@ struct efx_nic {
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
struct work_struct reset_work; struct work_struct reset_work;
struct delayed_work monitor_work; struct delayed_work monitor_work;
unsigned long membase_phys; resource_size_t membase_phys;
void __iomem *membase; void __iomem *membase;
spinlock_t biu_lock; spinlock_t biu_lock;
enum efx_int_mode interrupt_mode; enum efx_int_mode interrupt_mode;
@ -719,7 +705,7 @@ struct efx_nic {
unsigned n_rx_nodesc_drop_cnt; unsigned n_rx_nodesc_drop_cnt;
void *nic_data; struct falcon_nic_data *nic_data;
struct mutex mac_lock; struct mutex mac_lock;
int port_enabled; int port_enabled;
@ -760,6 +746,20 @@ struct efx_nic {
void *loopback_selftest; void *loopback_selftest;
}; };
static inline int efx_dev_registered(struct efx_nic *efx)
{
return efx->net_dev->reg_state == NETREG_REGISTERED;
}
/* Net device name, for inclusion in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
static inline const char *efx_dev_name(struct efx_nic *efx)
{
return efx_dev_registered(efx) ? efx->name : "";
}
/** /**
* struct efx_nic_type - Efx device type definition * struct efx_nic_type - Efx device type definition
* @mem_bar: Memory BAR number * @mem_bar: Memory BAR number
@ -795,7 +795,7 @@ struct efx_nic_type {
unsigned int txd_ring_mask; unsigned int txd_ring_mask;
unsigned int rxd_ring_mask; unsigned int rxd_ring_mask;
unsigned int evq_size; unsigned int evq_size;
dma_addr_t max_dma_mask; u64 max_dma_mask;
unsigned int tx_dma_mask; unsigned int tx_dma_mask;
unsigned bug5391_mask; unsigned bug5391_mask;

View file

@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
*/ */
#define EFX_RXD_HEAD_ROOM 2 #define EFX_RXD_HEAD_ROOM 2
/* Macros for zero-order pages (potentially) containing multiple RX buffers */ static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
#define RX_DATA_OFFSET(_data) \ {
(((unsigned long) (_data)) & (PAGE_SIZE-1)) /* Offset is always within one page, so we don't need to consider
#define RX_BUF_OFFSET(_rx_buf) \ * the page order.
RX_DATA_OFFSET((_rx_buf)->data) */
return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
#define RX_PAGE_SIZE(_efx) \ }
(PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
/************************************************************************** /**************************************************************************
@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
void **tcpudp_hdr, u64 *hdr_flags, void *priv) void **tcpudp_hdr, u64 *hdr_flags, void *priv)
{ {
struct efx_channel *channel = (struct efx_channel *)priv; struct efx_channel *channel = priv;
struct iphdr *iph; struct iphdr *iph;
struct tcphdr *th; struct tcphdr *th;
@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
void *priv) void *priv)
{ {
struct efx_channel *channel = (struct efx_channel *)priv; struct efx_channel *channel = priv;
struct ethhdr *eh; struct ethhdr *eh;
struct iphdr *iph; struct iphdr *iph;
/* We support EtherII and VLAN encapsulated IPv4 */ /* We support EtherII and VLAN encapsulated IPv4 */
eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); eh = page_address(frag->page) + frag->page_offset;
*mac_hdr = eh; *mac_hdr = eh;
if (eh->h_proto == htons(ETH_P_IP)) { if (eh->h_proto == htons(ETH_P_IP)) {
@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
return -ENOMEM; return -ENOMEM;
dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
0, RX_PAGE_SIZE(efx), 0, efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(dma_addr))) { if (unlikely(pci_dma_mapping_error(dma_addr))) {
@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue->buf_page = rx_buf->page; rx_queue->buf_page = rx_buf->page;
rx_queue->buf_dma_addr = dma_addr; rx_queue->buf_dma_addr = dma_addr;
rx_queue->buf_data = ((char *) page_address(rx_buf->page) + rx_queue->buf_data = (page_address(rx_buf->page) +
EFX_PAGE_IP_ALIGN); EFX_PAGE_IP_ALIGN);
} }
offset = RX_DATA_OFFSET(rx_queue->buf_data);
rx_buf->len = bytes; rx_buf->len = bytes;
rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
rx_buf->data = rx_queue->buf_data; rx_buf->data = rx_queue->buf_data;
offset = efx_rx_buf_offset(rx_buf);
rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
/* Try to pack multiple buffers per page */ /* Try to pack multiple buffers per page */
if (efx->rx_buffer_order == 0) { if (efx->rx_buffer_order == 0) {
@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
offset += ((bytes + 0x1ff) & ~0x1ff); offset += ((bytes + 0x1ff) & ~0x1ff);
space = RX_PAGE_SIZE(efx) - offset; space = efx_rx_buf_size(efx) - offset;
if (space >= bytes) { if (space >= bytes) {
/* Refs dropped on kernel releasing each skb */ /* Refs dropped on kernel releasing each skb */
get_page(rx_queue->buf_page); get_page(rx_queue->buf_page);
@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
EFX_BUG_ON_PARANOID(rx_buf->skb); EFX_BUG_ON_PARANOID(rx_buf->skb);
if (rx_buf->unmap_addr) { if (rx_buf->unmap_addr) {
pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
rx_buf->unmap_addr = 0; rx_buf->unmap_addr = 0;
} }
} else if (likely(rx_buf->skb)) { } else if (likely(rx_buf->skb)) {
@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
return 0; return 0;
/* Record minimum fill level */ /* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level) if (fill_level)
rx_queue->min_fill = fill_level; rx_queue->min_fill = fill_level;
}
/* Acquire RX add lock. If this lock is contended, then a fast /* Acquire RX add lock. If this lock is contended, then a fast
* fill must already be in progress (e.g. in the refill * fill must already be in progress (e.g. in the refill
@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
struct skb_frag_struct frags; struct skb_frag_struct frags;
frags.page = rx_buf->page; frags.page = rx_buf->page;
frags.page_offset = RX_BUF_OFFSET(rx_buf); frags.page_offset = efx_rx_buf_offset(rx_buf);
frags.size = rx_buf->len; frags.size = rx_buf->len;
lro_receive_frags(lro_mgr, &frags, rx_buf->len, lro_receive_frags(lro_mgr, &frags, rx_buf->len,
@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
if (unlikely(rx_buf->len > hdr_len)) { if (unlikely(rx_buf->len > hdr_len)) {
struct skb_frag_struct *frag = skb_shinfo(skb)->frags; struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
frag->page = rx_buf->page; frag->page = rx_buf->page;
frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
frag->size = skb->len - hdr_len; frag->size = skb->len - hdr_len;
skb_shinfo(skb)->nr_frags = 1; skb_shinfo(skb)->nr_frags = 1;
skb->data_len = frag->size; skb->data_len = frag->size;
@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
/* For a page that is part-way through splitting into RX buffers */ /* For a page that is part-way through splitting into RX buffers */
if (rx_queue->buf_page != NULL) { if (rx_queue->buf_page != NULL) {
pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); efx_rx_buf_size(rx_queue->efx),
PCI_DMA_FROMDEVICE);
__free_pages(rx_queue->buf_page, __free_pages(rx_queue->buf_page,
rx_queue->efx->rx_buffer_order); rx_queue->efx->rx_buffer_order);
rx_queue->buf_page = NULL; rx_queue->buf_page = NULL;

View file

@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
payload = &state->payload; payload = &state->payload;
received = (struct efx_loopback_payload *)(char *) buf_ptr; received = (struct efx_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr; received->ip.saddr = payload->ip.saddr;
received->ip.check = payload->ip.check; received->ip.check = payload->ip.check;
@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
* interrupt handler. */ * interrupt handler. */
smp_wmb(); smp_wmb();
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
rc = efx_xmit(efx, tx_queue, skb); rc = efx_xmit(efx, tx_queue, skb);
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) { if (rc != NETDEV_TX_OK) {
@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
int tx_done = 0, rx_good, rx_bad; int tx_done = 0, rx_good, rx_bad;
int i, rc = 0; int i, rc = 0;
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any /* Count the number of tx completions, and decrement the refcnt. Any
@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */ /* Check TX completion and received packet counts */
@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
state->packet_count = min(1 << (i << 2), state->packet_count); state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kzalloc(sizeof(state->skbs[0]) * state->skbs = kzalloc(sizeof(state->skbs[0]) *
state->packet_count, GFP_KERNEL); state->packet_count, GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = 0; state->flush = 0;
EFX_LOG(efx, "TX queue %d testing %s loopback with %d " EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
* "flushing" so all inflight packets are dropped */ * "flushing" so all inflight packets are dropped */
BUG_ON(efx->loopback_selftest); BUG_ON(efx->loopback_selftest);
state->flush = 1; state->flush = 1;
efx->loopback_selftest = (void *)state; efx->loopback_selftest = state;
rc = efx_test_loopbacks(efx, tests, loopback_modes); rc = efx_test_loopbacks(efx, tests, loopback_modes);

View file

@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx)
/* Turn off all power rails */ /* Turn off all power rails */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */ /* Disable port 1 outputs on IO expander */
cfg = 0xff; cfg = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
/* Disable port 0 outputs on IO expander */ /* Disable port 0 outputs on IO expander */
cfg = 0xff; cfg = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
/* Clear any over-temperature alert */ /* Clear any over-temperature alert */
(void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
} }
/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
@ -253,14 +253,14 @@ done:
fail3: fail3:
/* Turn off all power rails */ /* Turn off all power rails */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */ /* Disable port 1 outputs on IO expander */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
fail2: fail2:
/* Disable port 0 outputs on IO expander */ /* Disable port 0 outputs on IO expander */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
fail1: fail1:
return rc; return rc;
} }

View file

@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
int rc = 0; int rc = 0;
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data; efx->phy_data = phy_data;
tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
* perform a special software reset */ * perform a special software reset */
if ((phy_data->tx_disabled && !efx->tx_disabled) || if ((phy_data->tx_disabled && !efx->tx_disabled) ||
loop_change) { loop_change) {
(void) tenxpress_special_reset(efx); tenxpress_special_reset(efx);
falcon_reset_xaui(efx); falcon_reset_xaui(efx);
} }

View file

@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
if (unlikely(tx_queue->stopped)) { if (unlikely(tx_queue->stopped)) {
fill_level = tx_queue->insert_count - tx_queue->read_count; fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
/* Do this under netif_tx_lock(), to avoid racing /* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */ * with efx_xmit(). */
@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
base_dma = tsoh->dma_addr & PAGE_MASK; base_dma = tsoh->dma_addr & PAGE_MASK;
p = &tx_queue->tso_headers_free; p = &tx_queue->tso_headers_free;
while (*p != NULL) while (*p != NULL) {
if (((unsigned long)*p & PAGE_MASK) == base_kva) if (((unsigned long)*p & PAGE_MASK) == base_kva)
*p = (*p)->next; *p = (*p)->next;
else else
p = &(*p)->next; p = &(*p)->next;
}
pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
} }
@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
/* Allocate a DMA-mapped header buffer. */ /* Allocate a DMA-mapped header buffer. */
if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
if (tx_queue->tso_headers_free == NULL) if (tx_queue->tso_headers_free == NULL) {
if (efx_tsoh_block_alloc(tx_queue)) if (efx_tsoh_block_alloc(tx_queue))
return -1; return -1;
}
EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
tsoh = tx_queue->tso_headers_free; tsoh = tx_queue->tso_headers_free;
tx_queue->tso_headers_free = tsoh->next; tx_queue->tso_headers_free = tsoh->next;
@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
{ {
unsigned i; unsigned i;
if (tx_queue->buffer) if (tx_queue->buffer) {
for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
while (tx_queue->tso_headers_free != NULL) while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,

View file

@ -16,7 +16,7 @@
*/ */
#define EFX_WORKAROUND_ALWAYS(efx) 1 #define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
/* XAUI resets if link not detected */ /* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS

View file

@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
int rc; int rc;
phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
efx->phy_data = (void *) phy_data; if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
" %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),

View file

@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
} }
#ifdef SKY2_VLAN_TAG_USED #ifdef SKY2_VLAN_TAG_USED
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
{ {
struct sky2_port *sky2 = netdev_priv(dev); if (onoff) {
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
netif_tx_lock_bh(dev);
napi_disable(&hw->napi);
sky2->vlgrp = grp;
if (grp) {
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON); RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF); TX_VLAN_TAG_OFF);
} }
}
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
netif_tx_lock_bh(dev);
napi_disable(&hw->napi);
sky2->vlgrp = grp;
sky2_set_vlan_mode(hw, port, grp != NULL);
sky2_read32(hw, B0_Y2_SP_LISR); sky2_read32(hw, B0_Y2_SP_LISR);
napi_enable(&hw->napi); napi_enable(&hw->napi);
@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1); TX_RING_SIZE - 1);
#ifdef SKY2_VLAN_TAG_USED
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
err = sky2_rx_start(sky2); err = sky2_rx_start(sky2);
if (err) if (err)
goto err_out; goto err_out;

View file

@ -264,7 +264,7 @@ struct xl_private {
u16 asb; u16 asb;
u8 __iomem *xl_mmio; u8 __iomem *xl_mmio;
char *xl_card_name; const char *xl_card_name;
struct pci_dev *pdev ; struct pci_dev *pdev ;
spinlock_t xl_lock ; spinlock_t xl_lock ;

View file

@ -254,7 +254,7 @@ struct olympic_private {
u8 __iomem *olympic_mmio; u8 __iomem *olympic_mmio;
u8 __iomem *olympic_lap; u8 __iomem *olympic_lap;
struct pci_dev *pdev ; struct pci_dev *pdev ;
char *olympic_card_name ; const char *olympic_card_name;
spinlock_t olympic_lock ; spinlock_t olympic_lock ;

View file

@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *);
static const struct ethtool_ops netdev_ethtool_ops; static const struct ethtool_ops netdev_ethtool_ops;
static u16 read_srom_word(long, int); static u16 read_srom_word(long, int);
static irqreturn_t uli526x_interrupt(int, void *); static irqreturn_t uli526x_interrupt(int, void *);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uli526x_poll(struct net_device *dev);
#endif
static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
static void allocate_rx_buffer(struct uli526x_board_info *); static void allocate_rx_buffer(struct uli526x_board_info *);
static void update_cr6(u32, unsigned long); static void update_cr6(u32, unsigned long);
@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
dev->get_stats = &uli526x_get_stats; dev->get_stats = &uli526x_get_stats;
dev->set_multicast_list = &uli526x_set_filter_mode; dev->set_multicast_list = &uli526x_set_filter_mode;
dev->ethtool_ops = &netdev_ethtool_ops; dev->ethtool_ops = &netdev_ethtool_ops;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = &uli526x_poll;
#endif
spin_lock_init(&db->lock); spin_lock_init(&db->lock);
@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
db->cr5_data = inl(ioaddr + DCR5); db->cr5_data = inl(ioaddr + DCR5);
outl(db->cr5_data, ioaddr + DCR5); outl(db->cr5_data, ioaddr + DCR5);
if ( !(db->cr5_data & 0x180c1) ) { if ( !(db->cr5_data & 0x180c1) ) {
spin_unlock_irqrestore(&db->lock, flags); /* Restore CR7 to enable interrupt mask */
outl(db->cr7_data, ioaddr + DCR7); outl(db->cr7_data, ioaddr + DCR7);
spin_unlock_irqrestore(&db->lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uli526x_poll(struct net_device *dev)
{
/* ISR grabs the irqsave lock, so this should be safe */
uli526x_interrupt(dev->irq, dev);
}
#endif
/* /*
* Free TX resource after TX complete * Free TX resource after TX complete

View file

@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
skb->dev = ugeth->dev; skb->dev = ugeth->dev;
out_be32(&((struct qe_bd __iomem *)bd)->buf, out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(NULL, dma_map_single(&ugeth->dev->dev,
skb->data, skb->data,
ugeth->ug_info->uf_info.max_rx_buf_length + ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT, UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
continue; continue;
for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
if (ugeth->tx_skbuff[i][j]) { if (ugeth->tx_skbuff[i][j]) {
dma_unmap_single(NULL, dma_unmap_single(&ugeth->dev->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf), in_be32(&((struct qe_bd __iomem *)bd)->buf),
(in_be32((u32 __iomem *)bd) & (in_be32((u32 __iomem *)bd) &
BD_LENGTH_MASK), BD_LENGTH_MASK),
@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
bd = ugeth->p_rx_bd_ring[i]; bd = ugeth->p_rx_bd_ring[i];
for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
if (ugeth->rx_skbuff[i][j]) { if (ugeth->rx_skbuff[i][j]) {
dma_unmap_single(NULL, dma_unmap_single(&ugeth->dev->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf), in_be32(&((struct qe_bd __iomem *)bd)->buf),
ugeth->ug_info-> ugeth->ug_info->
uf_info.max_rx_buf_length + uf_info.max_rx_buf_length +
@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set up the buffer descriptor */ /* set up the buffer descriptor */
out_be32(&((struct qe_bd __iomem *)bd)->buf, out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); dma_map_single(&ugeth->dev->dev, skb->data,
skb->len, DMA_TO_DEVICE));
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */

View file

@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = {
// Belkin F5D5055 // Belkin F5D5055
USB_DEVICE(0x050d, 0x5055), USB_DEVICE(0x050d, 0x5055),
.driver_info = (unsigned long) &ax88178_info, .driver_info = (unsigned long) &ax88178_info,
}, {
// Apple USB Ethernet Adapter
USB_DEVICE(0x05ac, 0x1402),
.driver_info = (unsigned long) &ax88772_info,
}, },
{ }, // END { }, // END
}; };

View file

@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
dev_dbg(&info->control->dev, dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval); "rndis response error, code %d\n", retval);
} }
msleep(2); msleep(20);
} }
dev_dbg(&info->control->dev, "rndis response timeout\n"); dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT; return -ETIMEDOUT;

View file

@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev)
kfree_skb(skb); kfree_skb(skb);
vi->num--; vi->num--;
} }
while ((skb = __skb_dequeue(&vi->send)) != NULL) __skb_queue_purge(&vi->send);
kfree_skb(skb);
BUG_ON(vi->num != 0); BUG_ON(vi->num != 0);

View file

@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22";
#undef DEBUG_LINK #undef DEBUG_LINK
static struct hdlc_proto *first_proto = NULL; static struct hdlc_proto *first_proto;
static int hdlc_change_mtu(struct net_device *dev, int new_mtu) static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
{ {
@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev)
void register_hdlc_protocol(struct hdlc_proto *proto) void register_hdlc_protocol(struct hdlc_proto *proto)
{ {
rtnl_lock();
proto->next = first_proto; proto->next = first_proto;
first_proto = proto; first_proto = proto;
rtnl_unlock();
} }
void unregister_hdlc_protocol(struct hdlc_proto *proto) void unregister_hdlc_protocol(struct hdlc_proto *proto)
{ {
struct hdlc_proto **p = &first_proto; struct hdlc_proto **p;
while (*p) {
if (*p == proto) { rtnl_lock();
*p = proto->next; p = &first_proto;
return; while (*p != proto) {
} BUG_ON(!*p);
p = &((*p)->next); p = &((*p)->next);
} }
*p = proto->next;
rtnl_unlock();
} }

View file

@ -56,6 +56,7 @@ struct cisco_state {
cisco_proto settings; cisco_proto settings;
struct timer_list timer; struct timer_list timer;
spinlock_t lock;
unsigned long last_poll; unsigned long last_poll;
int up; int up;
int request_sent; int request_sent;
@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb)
{ {
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
struct hdlc_header *data = (struct hdlc_header*)skb->data; struct hdlc_header *data = (struct hdlc_header*)skb->data;
struct cisco_packet *cisco_data; struct cisco_packet *cisco_data;
struct in_device *in_dev; struct in_device *in_dev;
@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb)
goto rx_error; goto rx_error;
case CISCO_KEEPALIVE_REQ: case CISCO_KEEPALIVE_REQ:
state(hdlc)->rxseq = ntohl(cisco_data->par1); spin_lock(&st->lock);
if (state(hdlc)->request_sent && st->rxseq = ntohl(cisco_data->par1);
ntohl(cisco_data->par2) == state(hdlc)->txseq) { if (st->request_sent &&
state(hdlc)->last_poll = jiffies; ntohl(cisco_data->par2) == st->txseq) {
if (!state(hdlc)->up) { st->last_poll = jiffies;
if (!st->up) {
u32 sec, min, hrs, days; u32 sec, min, hrs, days;
sec = ntohl(cisco_data->time) / 1000; sec = ntohl(cisco_data->time) / 1000;
min = sec / 60; sec -= min * 60; min = sec / 60; sec -= min * 60;
@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb)
days = hrs / 24; hrs -= days * 24; days = hrs / 24; hrs -= days * 24;
printk(KERN_INFO "%s: Link up (peer " printk(KERN_INFO "%s: Link up (peer "
"uptime %ud%uh%um%us)\n", "uptime %ud%uh%um%us)\n",
dev->name, days, hrs, dev->name, days, hrs, min, sec);
min, sec);
netif_dormant_off(dev); netif_dormant_off(dev);
state(hdlc)->up = 1; st->up = 1;
} }
} }
spin_unlock(&st->lock);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg)
{ {
struct net_device *dev = (struct net_device *)arg; struct net_device *dev = (struct net_device *)arg;
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
if (state(hdlc)->up && spin_lock(&st->lock);
time_after(jiffies, state(hdlc)->last_poll + if (st->up &&
state(hdlc)->settings.timeout * HZ)) { time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
state(hdlc)->up = 0; st->up = 0;
printk(KERN_INFO "%s: Link down\n", dev->name); printk(KERN_INFO "%s: Link down\n", dev->name);
netif_dormant_on(dev); netif_dormant_on(dev);
} }
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
htonl(++state(hdlc)->txseq), htonl(st->rxseq));
htonl(state(hdlc)->rxseq)); st->request_sent = 1;
state(hdlc)->request_sent = 1; spin_unlock(&st->lock);
state(hdlc)->timer.expires = jiffies +
state(hdlc)->settings.interval * HZ; st->timer.expires = jiffies + st->settings.interval * HZ;
state(hdlc)->timer.function = cisco_timer; st->timer.function = cisco_timer;
state(hdlc)->timer.data = arg; st->timer.data = arg;
add_timer(&state(hdlc)->timer); add_timer(&st->timer);
} }
@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg)
static void cisco_start(struct net_device *dev) static void cisco_start(struct net_device *dev)
{ {
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
state(hdlc)->up = 0; struct cisco_state *st = state(hdlc);
state(hdlc)->request_sent = 0; unsigned long flags;
state(hdlc)->txseq = state(hdlc)->rxseq = 0;
init_timer(&state(hdlc)->timer); spin_lock_irqsave(&st->lock, flags);
state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ st->up = 0;
state(hdlc)->timer.function = cisco_timer; st->request_sent = 0;
state(hdlc)->timer.data = (unsigned long)dev; st->txseq = st->rxseq = 0;
add_timer(&state(hdlc)->timer); spin_unlock_irqrestore(&st->lock, flags);
init_timer(&st->timer);
st->timer.expires = jiffies + HZ; /* First poll after 1 s */
st->timer.function = cisco_timer;
st->timer.data = (unsigned long)dev;
add_timer(&st->timer);
} }
@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev)
static void cisco_stop(struct net_device *dev) static void cisco_stop(struct net_device *dev)
{ {
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&state(hdlc)->timer); struct cisco_state *st = state(hdlc);
unsigned long flags;
del_timer_sync(&st->timer);
spin_lock_irqsave(&st->lock, flags);
netif_dormant_on(dev); netif_dormant_on(dev);
state(hdlc)->up = 0; st->up = 0;
state(hdlc)->request_sent = 0; st->request_sent = 0;
spin_unlock_irqrestore(&st->lock, flags);
} }
@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return result; return result;
memcpy(&state(hdlc)->settings, &new_settings, size); memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
dev->hard_start_xmit = hdlc->xmit; dev->hard_start_xmit = hdlc->xmit;
dev->header_ops = &cisco_header_ops; dev->header_ops = &cisco_header_ops;
dev->type = ARPHRD_CISCO; dev->type = ARPHRD_CISCO;

View file

@ -2669,6 +2669,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
dev->irq = ethdev->irq; dev->irq = ethdev->irq;
dev->base_addr = ethdev->base_addr; dev->base_addr = ethdev->base_addr;
dev->wireless_data = ethdev->wireless_data; dev->wireless_data = ethdev->wireless_data;
SET_NETDEV_DEV(dev, ethdev->dev.parent);
memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
err = register_netdev(dev); err = register_netdev(dev);
if (err<0) { if (err<0) {
@ -2905,7 +2906,7 @@ EXPORT_SYMBOL(init_airo_card);
static int waitbusy (struct airo_info *ai) { static int waitbusy (struct airo_info *ai) {
int delay = 0; int delay = 0;
while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) { while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
udelay (10); udelay (10);
if ((++delay % 20) == 0) if ((++delay % 20) == 0)
OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);

View file

@ -833,6 +833,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */
PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),

View file

@ -3276,11 +3276,6 @@ while (0)
} }
printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
#ifndef PRISM2_NO_PROCFS_DEBUG
create_proc_read_entry("registers", 0, local->proc,
prism2_registers_proc_read, local);
#endif /* PRISM2_NO_PROCFS_DEBUG */
hostap_init_data(local); hostap_init_data(local);
return dev; return dev;
@ -3307,6 +3302,10 @@ static int hostap_hw_ready(struct net_device *dev)
netif_carrier_off(local->ddev); netif_carrier_off(local->ddev);
} }
hostap_init_proc(local); hostap_init_proc(local);
#ifndef PRISM2_NO_PROCFS_DEBUG
create_proc_read_entry("registers", 0, local->proc,
prism2_registers_proc_read, local);
#endif /* PRISM2_NO_PROCFS_DEBUG */
hostap_init_ap_proc(local); hostap_init_ap_proc(local);
return 0; return 0;
} }

View file

@ -11584,6 +11584,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
rc = register_netdev(priv->prom_net_dev); rc = register_netdev(priv->prom_net_dev);
if (rc) { if (rc) {

View file

@ -73,8 +73,8 @@ out:
return ret; return ret;
} }
static void lbs_ethtool_get_stats(struct net_device * dev, static void lbs_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats * stats, u64 * data) struct ethtool_stats *stats, uint64_t *data)
{ {
struct lbs_private *priv = dev->priv; struct lbs_private *priv = dev->priv;
struct cmd_ds_mesh_access mesh_access; struct cmd_ds_mesh_access mesh_access;
@ -83,12 +83,12 @@ static void lbs_ethtool_get_stats(struct net_device * dev,
lbs_deb_enter(LBS_DEB_ETHTOOL); lbs_deb_enter(LBS_DEB_ETHTOOL);
/* Get Mesh Statistics */ /* Get Mesh Statistics */
ret = lbs_prepare_and_send_command(priv, ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access);
CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS,
CMD_OPTION_WAITFORRSP, 0, &mesh_access);
if (ret) if (ret) {
memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t)));
return; return;
}
priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]);
priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]);
@ -111,19 +111,18 @@ static void lbs_ethtool_get_stats(struct net_device * dev,
lbs_deb_enter(LBS_DEB_ETHTOOL); lbs_deb_enter(LBS_DEB_ETHTOOL);
} }
static int lbs_ethtool_get_sset_count(struct net_device * dev, int sset) static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset)
{ {
switch (sset) { struct lbs_private *priv = dev->priv;
case ETH_SS_STATS:
if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
return MESH_STATS_NUM; return MESH_STATS_NUM;
default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
} }
static void lbs_ethtool_get_strings(struct net_device *dev, static void lbs_ethtool_get_strings(struct net_device *dev,
u32 stringset, uint32_t stringset, uint8_t *s)
u8 * s)
{ {
int i; int i;

View file

@ -756,6 +756,7 @@ static int lbs_thread(void *data)
priv->nr_retries = 0; priv->nr_retries = 0;
} else { } else {
priv->cur_cmd = NULL; priv->cur_cmd = NULL;
priv->dnld_sent = DNLD_RES_RECEIVED;
lbs_pr_info("requeueing command %x due to timeout (#%d)\n", lbs_pr_info("requeueing command %x due to timeout (#%d)\n",
le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries);
@ -1556,6 +1557,7 @@ static int lbs_add_rtap(struct lbs_private *priv)
rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
rtap_dev->set_multicast_list = lbs_set_multicast_list; rtap_dev->set_multicast_list = lbs_set_multicast_list;
rtap_dev->priv = priv; rtap_dev->priv = priv;
SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
ret = register_netdev(rtap_dev); ret = register_netdev(rtap_dev);
if (ret) { if (ret) {

View file

@ -461,6 +461,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */

View file

@ -92,6 +92,7 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
u8 data[4]; u8 data[4];
struct usb_ctrlrequest dr; struct usb_ctrlrequest dr;
} *buf; } *buf;
int rc;
buf = kmalloc(sizeof(*buf), GFP_ATOMIC); buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
if (!buf) if (!buf)
@ -116,7 +117,11 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0),
(unsigned char *)dr, buf, len, (unsigned char *)dr, buf, len,
rtl8187_iowrite_async_cb, buf); rtl8187_iowrite_async_cb, buf);
usb_submit_urb(urb, GFP_ATOMIC); rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc < 0) {
kfree(buf);
usb_free_urb(urb);
}
} }
static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv,
@ -169,6 +174,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
struct urb *urb; struct urb *urb;
__le16 rts_dur = 0; __le16 rts_dur = 0;
u32 flags; u32 flags;
int rc;
urb = usb_alloc_urb(0, GFP_ATOMIC); urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) { if (!urb) {
@ -208,7 +214,11 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
info->dev = dev; info->dev = dev;
usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
hdr, skb->len, rtl8187_tx_cb, skb); hdr, skb->len, rtl8187_tx_cb, skb);
usb_submit_urb(urb, GFP_ATOMIC); rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc < 0) {
usb_free_urb(urb);
kfree_skb(skb);
}
return 0; return 0;
} }

View file

@ -946,8 +946,7 @@ err:
work_done++; work_done++;
} }
while ((skb = __skb_dequeue(&errq))) __skb_queue_purge(&errq);
kfree_skb(skb);
work_done -= handle_incoming_queue(dev, &rxq); work_done -= handle_incoming_queue(dev, &rxq);
@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
} }
} }
while ((skb = __skb_dequeue(&free_list)) != NULL) __skb_queue_purge(&free_list);
dev_kfree_skb(skb);
spin_unlock_bh(&np->rx_lock); spin_unlock_bh(&np->rx_lock);
} }

View file

@ -92,7 +92,6 @@ header-y += if_slip.h
header-y += if_strip.h header-y += if_strip.h
header-y += if_tun.h header-y += if_tun.h
header-y += if_tunnel.h header-y += if_tunnel.h
header-y += in6.h
header-y += in_route.h header-y += in_route.h
header-y += ioctl.h header-y += ioctl.h
header-y += ip6_tunnel.h header-y += ip6_tunnel.h
@ -236,6 +235,7 @@ unifdef-y += if_vlan.h
unifdef-y += igmp.h unifdef-y += igmp.h
unifdef-y += inet_diag.h unifdef-y += inet_diag.h
unifdef-y += in.h unifdef-y += in.h
unifdef-y += in6.h
unifdef-y += inotify.h unifdef-y += inotify.h
unifdef-y += input.h unifdef-y += input.h
unifdef-y += ip.h unifdef-y += ip.h

View file

@ -745,6 +745,9 @@ struct net_device
/* rtnetlink link ops */ /* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops; const struct rtnl_link_ops *rtnl_link_ops;
/* VLAN feature mask */
unsigned long vlan_features;
/* for setting kernel sock attribute on TCP connection setup */ /* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536 #define GSO_MAX_SIZE 65536
unsigned int gso_max_size; unsigned int gso_max_size;

View file

@ -3,7 +3,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/init.h> #include <linux/init.h>
#include <linux/types.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
@ -14,6 +13,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#endif #endif
#include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
/* Responses from hook functions. */ /* Responses from hook functions. */

View file

@ -11,11 +11,11 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/if.h> #include <linux/if.h>
#include <linux/types.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#endif #endif
#include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/netfilter_arp.h> #include <linux/netfilter_arp.h>

View file

@ -17,11 +17,11 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/if.h> #include <linux/if.h>
#include <linux/types.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#endif #endif
#include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv4.h>

View file

@ -17,11 +17,11 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/if.h> #include <linux/if.h>
#include <linux/types.h>
#include <linux/in6.h> #include <linux/in6.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#endif #endif
#include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/netfilter_ipv6.h> #include <linux/netfilter_ipv6.h>

View file

@ -355,7 +355,7 @@ struct tcp_sock {
u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
u16 advmss; /* Advertised MSS */ u16 advmss; /* Advertised MSS */
u16 prior_ssthresh; /* ssthresh saved at recovery start */ u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 lost_out; /* Lost packets */ u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */ u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */ u32 fackets_out; /* FACK'd packets */

View file

@ -1602,13 +1602,16 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
void ieee80211_scan_completed(struct ieee80211_hw *hw); void ieee80211_scan_completed(struct ieee80211_hw *hw);
/** /**
* ieee80211_iterate_active_interfaces - iterate active interfaces * ieee80211_iterate_active_interfaces- iterate active interfaces
* *
* This function iterates over the interfaces associated with a given * This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them. * hardware that are currently active and calls the callback for them.
* This function allows the iterator function to sleep, when the iterator
* function is atomic @ieee80211_iterate_active_interfaces_atomic can
* be used.
* *
* @hw: the hardware struct of which the interfaces should be iterated over * @hw: the hardware struct of which the interfaces should be iterated over
* @iterator: the iterator function to call, cannot sleep * @iterator: the iterator function to call
* @data: first argument of the iterator function * @data: first argument of the iterator function
*/ */
void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
@ -1616,6 +1619,24 @@ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
struct ieee80211_vif *vif), struct ieee80211_vif *vif),
void *data); void *data);
/**
* ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
* This function requires the iterator callback function to be atomic,
* if that is not desired, use @ieee80211_iterate_active_interfaces instead.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iterator: the iterator function to call, cannot sleep
* @data: first argument of the iterator function
*/
void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
void (*iterator)(void *data,
u8 *mac,
struct ieee80211_vif *vif),
void *data);
/** /**
* ieee80211_start_tx_ba_session - Start a tx Block Ack session. * ieee80211_start_tx_ba_session - Start a tx Block Ack session.
* @hw: pointer as obtained from ieee80211_alloc_hw(). * @hw: pointer as obtained from ieee80211_alloc_hw().

View file

@ -129,6 +129,10 @@ extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
void __user *buffer, void __user *buffer,
size_t *lenp, size_t *lenp,
loff_t *ppos); loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
int nlen, void __user *oldval,
size_t __user *oldlenp,
void __user *newval, size_t newlen);
#endif #endif
extern void inet6_ifinfo_notify(int event, extern void inet6_ifinfo_notify(int event,

View file

@ -772,12 +772,13 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
const struct nla_policy *policy, const struct nla_policy *policy,
int len) int len)
{ {
if (nla_len(nla) < len) int nested_len = nla_len(nla) - NLA_ALIGN(len);
if (nested_len < 0)
return -1; return -1;
if (nla_len(nla) >= NLA_ALIGN(len) + sizeof(struct nlattr)) if (nested_len >= nla_attr_size(0))
return nla_parse_nested(tb, maxtype, return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
nla_data(nla) + NLA_ALIGN(len), nested_len, policy);
policy);
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
return 0; return 0;
} }

View file

@ -382,6 +382,18 @@ static void vlan_sync_address(struct net_device *dev,
memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
} }
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
unsigned long old_features = vlandev->features;
vlandev->features &= ~dev->vlan_features;
vlandev->features |= dev->features & dev->vlan_features;
if (old_features != vlandev->features)
netdev_features_change(vlandev);
}
static void __vlan_device_event(struct net_device *dev, unsigned long event) static void __vlan_device_event(struct net_device *dev, unsigned long event)
{ {
switch (event) { switch (event) {
@ -410,10 +422,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
int i, flgs; int i, flgs;
struct net_device *vlandev; struct net_device *vlandev;
if (is_vlan_dev(dev)) { if (is_vlan_dev(dev))
__vlan_device_event(dev, event); __vlan_device_event(dev, event);
goto out;
}
grp = __vlan_find_group(dev); grp = __vlan_find_group(dev);
if (!grp) if (!grp)
@ -450,6 +460,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
} }
break; break;
case NETDEV_FEAT_CHANGE:
/* Propagate device features to underlying device */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
vlan_transfer_features(dev, vlandev);
}
break;
case NETDEV_DOWN: case NETDEV_DOWN:
/* Put all VLANs for this dev in the down state too. */ /* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {

View file

@ -663,6 +663,8 @@ static int vlan_dev_init(struct net_device *dev)
(1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT); (1<<__LINK_STATE_PRESENT);
dev->features |= real_dev->features & real_dev->vlan_features;
/* ipv6 shared card related stuff */ /* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id; dev->dev_id = real_dev->dev_id;

View file

@ -3141,7 +3141,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
* Load in the correct multicast list now the flags have changed. * Load in the correct multicast list now the flags have changed.
*/ */
if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST) if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
dev->change_rx_flags(dev, IFF_MULTICAST); dev->change_rx_flags(dev, IFF_MULTICAST);
dev_set_rx_mode(dev); dev_set_rx_mode(dev);

View file

@ -390,6 +390,7 @@ struct pktgen_thread {
int cpu; int cpu;
wait_queue_head_t queue; wait_queue_head_t queue;
struct completion start_done;
}; };
#define REMOVE 1 #define REMOVE 1
@ -3414,6 +3415,7 @@ static int pktgen_thread_worker(void *arg)
BUG_ON(smp_processor_id() != cpu); BUG_ON(smp_processor_id() != cpu);
init_waitqueue_head(&t->queue); init_waitqueue_head(&t->queue);
complete(&t->start_done);
pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
@ -3615,6 +3617,7 @@ static int __init pktgen_create_thread(int cpu)
INIT_LIST_HEAD(&t->if_list); INIT_LIST_HEAD(&t->if_list);
list_add_tail(&t->th_list, &pktgen_threads); list_add_tail(&t->th_list, &pktgen_threads);
init_completion(&t->start_done);
p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
if (IS_ERR(p)) { if (IS_ERR(p)) {
@ -3639,6 +3642,7 @@ static int __init pktgen_create_thread(int cpu)
} }
wake_up_process(p); wake_up_process(p);
wait_for_completion(&t->start_done);
return 0; return 0;
} }

View file

@ -1288,7 +1288,6 @@ static void arp_format_neigh_entry(struct seq_file *seq,
struct neighbour *n) struct neighbour *n)
{ {
char hbuffer[HBUFFERLEN]; char hbuffer[HBUFFERLEN];
const char hexbuf[] = "0123456789ABCDEF";
int k, j; int k, j;
char tbuf[16]; char tbuf[16];
struct net_device *dev = n->dev; struct net_device *dev = n->dev;
@ -1302,8 +1301,8 @@ static void arp_format_neigh_entry(struct seq_file *seq,
else { else {
#endif #endif
for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) { for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) {
hbuffer[k++] = hexbuf[(n->ha[j] >> 4) & 15]; hbuffer[k++] = hex_asc_hi(n->ha[j]);
hbuffer[k++] = hexbuf[n->ha[j] & 15]; hbuffer[k++] = hex_asc_lo(n->ha[j]);
hbuffer[k++] = ':'; hbuffer[k++] = ':';
} }
hbuffer[--k] = 0; hbuffer[--k] = 0;

View file

@ -313,9 +313,8 @@ static void ipgre_tunnel_uninit(struct net_device *dev)
static void ipgre_err(struct sk_buff *skb, u32 info) static void ipgre_err(struct sk_buff *skb, u32 info)
{ {
#ifndef I_WISH_WORLD_WERE_PERFECT
/* It is not :-( All the routers (except for Linux) return only /* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of 8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible. ICMP in the real Internet is absolutely infeasible.
@ -398,149 +397,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
out: out:
read_unlock(&ipgre_lock); read_unlock(&ipgre_lock);
return; return;
#else
struct iphdr *iph = (struct iphdr*)dp;
struct iphdr *eiph;
__be16 *p = (__be16*)(dp+(iph->ihl<<2));
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
int rel_type = 0;
int rel_code = 0;
__be32 rel_info = 0;
__u32 n = 0;
__be16 flags;
int grehlen = (iph->ihl<<2) + 4;
struct sk_buff *skb2;
struct flowi fl;
struct rtable *rt;
if (p[1] != htons(ETH_P_IP))
return;
flags = p[0];
if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
if (flags&(GRE_VERSION|GRE_ROUTING))
return;
if (flags&GRE_CSUM)
grehlen += 4;
if (flags&GRE_KEY)
grehlen += 4;
if (flags&GRE_SEQ)
grehlen += 4;
}
if (len < grehlen + sizeof(struct iphdr))
return;
eiph = (struct iphdr*)(dp + grehlen);
switch (type) {
default:
return;
case ICMP_PARAMETERPROB:
n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
if (n < (iph->ihl<<2))
return;
/* So... This guy found something strange INSIDE encapsulated
packet. Well, he is fool, but what can we do ?
*/
rel_type = ICMP_PARAMETERPROB;
n -= grehlen;
rel_info = htonl(n << 24);
break;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return;
case ICMP_FRAG_NEEDED:
/* And it is the only really necessary thing :-) */
n = ntohs(icmp_hdr(skb)->un.frag.mtu);
if (n < grehlen+68)
return;
n -= grehlen;
/* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
if (n > ntohs(eiph->tot_len))
return;
rel_info = htonl(n);
break;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe, it is just ether pollution. --ANK
*/
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return;
break;
}
/* Prepare fake skb to feed it to icmp_send */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
return;
dst_release(skb2->dst);
skb2->dst = NULL;
skb_pull(skb2, skb->data - (u8*)eiph);
skb_reset_network_header(skb2);
/* Try to guess incoming interface */
memset(&fl, 0, sizeof(fl));
fl.fl4_dst = eiph->saddr;
fl.fl4_tos = RT_TOS(eiph->tos);
fl.proto = IPPROTO_GRE;
if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) {
kfree_skb(skb2);
return;
}
skb2->dev = rt->u.dst.dev;
/* route "incoming" packet */
if (rt->rt_flags&RTCF_LOCAL) {
ip_rt_put(rt);
rt = NULL;
fl.fl4_dst = eiph->daddr;
fl.fl4_src = eiph->saddr;
fl.fl4_tos = eiph->tos;
if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
rt->u.dst.dev->type != ARPHRD_IPGRE) {
ip_rt_put(rt);
kfree_skb(skb2);
return;
}
} else {
ip_rt_put(rt);
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
skb2->dst->dev->type != ARPHRD_IPGRE) {
kfree_skb(skb2);
return;
}
}
/* change mtu on this route */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
if (n > dst_mtu(skb2->dst)) {
kfree_skb(skb2);
return;
}
skb2->dst->ops->update_pmtu(skb2->dst, n);
} else if (type == ICMP_TIME_EXCEEDED) {
struct ip_tunnel *t = netdev_priv(skb2->dev);
if (t->parms.iph.ttl) {
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
}
}
icmp_send(skb2, rel_type, rel_code, rel_info);
kfree_skb(skb2);
#endif
} }
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)

View file

@ -278,9 +278,8 @@ static void ipip_tunnel_uninit(struct net_device *dev)
static int ipip_err(struct sk_buff *skb, u32 info) static int ipip_err(struct sk_buff *skb, u32 info)
{ {
#ifndef I_WISH_WORLD_WERE_PERFECT
/* It is not :-( All the routers (except for Linux) return only /* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of 8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible. ICMP in the real Internet is absolutely infeasible.
*/ */
@ -337,133 +336,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
out: out:
read_unlock(&ipip_lock); read_unlock(&ipip_lock);
return err; return err;
#else
struct iphdr *iph = (struct iphdr*)dp;
int hlen = iph->ihl<<2;
struct iphdr *eiph;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
int rel_type = 0;
int rel_code = 0;
__be32 rel_info = 0;
__u32 n = 0;
struct sk_buff *skb2;
struct flowi fl;
struct rtable *rt;
if (len < hlen + sizeof(struct iphdr))
return 0;
eiph = (struct iphdr*)(dp + hlen);
switch (type) {
default:
return 0;
case ICMP_PARAMETERPROB:
n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
if (n < hlen)
return 0;
/* So... This guy found something strange INSIDE encapsulated
packet. Well, he is fool, but what can we do ?
*/
rel_type = ICMP_PARAMETERPROB;
rel_info = htonl((n - hlen) << 24);
break;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
case ICMP_FRAG_NEEDED:
/* And it is the only really necessary thing :-) */
n = ntohs(icmp_hdr(skb)->un.frag.mtu);
if (n < hlen+68)
return 0;
n -= hlen;
/* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
if (n > ntohs(eiph->tot_len))
return 0;
rel_info = htonl(n);
break;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe, it is just ether pollution. --ANK
*/
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return 0;
break;
}
/* Prepare fake skb to feed it to icmp_send */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
return 0;
dst_release(skb2->dst);
skb2->dst = NULL;
skb_pull(skb2, skb->data - (u8*)eiph);
skb_reset_network_header(skb2);
/* Try to guess incoming interface */
memset(&fl, 0, sizeof(fl));
fl.fl4_daddr = eiph->saddr;
fl.fl4_tos = RT_TOS(eiph->tos);
fl.proto = IPPROTO_IPIP;
if (ip_route_output_key(dev_net(skb->dev), &rt, &key)) {
kfree_skb(skb2);
return 0;
}
skb2->dev = rt->u.dst.dev;
/* route "incoming" packet */
if (rt->rt_flags&RTCF_LOCAL) {
ip_rt_put(rt);
rt = NULL;
fl.fl4_daddr = eiph->daddr;
fl.fl4_src = eiph->saddr;
fl.fl4_tos = eiph->tos;
if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
rt->u.dst.dev->type != ARPHRD_TUNNEL) {
ip_rt_put(rt);
kfree_skb(skb2);
return 0;
}
} else {
ip_rt_put(rt);
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
skb2->dst->dev->type != ARPHRD_TUNNEL) {
kfree_skb(skb2);
return 0;
}
}
/* change mtu on this route */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
if (n > dst_mtu(skb2->dst)) {
kfree_skb(skb2);
return 0;
}
skb2->dst->ops->update_pmtu(skb2->dst, n);
} else if (type == ICMP_TIME_EXCEEDED) {
struct ip_tunnel *t = netdev_priv(skb2->dev);
if (t->parms.iph.ttl) {
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
}
}
icmp_send(skb2, rel_type, rel_code, rel_info);
kfree_skb(skb2);
return 0;
#endif
} }
static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph, static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,

View file

@ -160,7 +160,7 @@ static struct dst_ops ipv4_dst_ops = {
.negative_advice = ipv4_negative_advice, .negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure, .link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu, .update_pmtu = ip_rt_update_pmtu,
.local_out = ip_local_out, .local_out = __ip_local_out,
.entry_size = sizeof(struct rtable), .entry_size = sizeof(struct rtable),
.entries = ATOMIC_INIT(0), .entries = ATOMIC_INIT(0),
}; };

View file

@ -1836,7 +1836,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss = tcp_current_mss(sk, 0); unsigned int cur_mss;
int err; int err;
/* Inconslusive MTU probe */ /* Inconslusive MTU probe */
@ -1858,6 +1858,11 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
return -ENOMEM; return -ENOMEM;
} }
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
cur_mss = tcp_current_mss(sk, 0);
/* If receiver has shrunk his window, and skb is out of /* If receiver has shrunk his window, and skb is out of
* new window, do not retransmit it. The exception is the * new window, do not retransmit it. The exception is the
* case, when window is shrunk to zero. In this case * case, when window is shrunk to zero. In this case
@ -1884,9 +1889,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
(sysctl_tcp_retrans_collapse != 0)) (sysctl_tcp_retrans_collapse != 0))
tcp_retrans_try_collapse(sk, skb, cur_mss); tcp_retrans_try_collapse(sk, skb, cur_mss);
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
/* Some Solaris stacks overoptimize and ignore the FIN on a /* Some Solaris stacks overoptimize and ignore the FIN on a
* retransmit when old data is attached. So strip it off * retransmit when old data is attached. So strip it off
* since it is cheap to do so and saves bytes on the network. * since it is cheap to do so and saves bytes on the network.

View file

@ -1764,14 +1764,16 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
* 2) Configure prefixes with the auto flag set * 2) Configure prefixes with the auto flag set
*/ */
/* Avoid arithmetic overflow. Really, we could if (valid_lft == INFINITY_LIFE_TIME)
save rt_expires in seconds, likely valid_lft, rt_expires = ~0UL;
but it would require division in fib gc, that it else if (valid_lft >= 0x7FFFFFFF/HZ) {
not good. /* Avoid arithmetic overflow. Really, we could
*/ * save rt_expires in seconds, likely valid_lft,
if (valid_lft >= 0x7FFFFFFF/HZ) * but it would require division in fib gc, that it
* not good.
*/
rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ); rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ);
else } else
rt_expires = valid_lft * HZ; rt_expires = valid_lft * HZ;
/* /*
@ -1779,7 +1781,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
* Avoid arithmetic overflow there as well. * Avoid arithmetic overflow there as well.
* Overflow can happen only if HZ < USER_HZ. * Overflow can happen only if HZ < USER_HZ.
*/ */
if (HZ < USER_HZ && rt_expires > 0x7FFFFFFF / USER_HZ) if (HZ < USER_HZ && ~rt_expires && rt_expires > 0x7FFFFFFF / USER_HZ)
rt_expires = 0x7FFFFFFF / USER_HZ; rt_expires = 0x7FFFFFFF / USER_HZ;
if (pinfo->onlink) { if (pinfo->onlink) {
@ -1788,17 +1790,28 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
dev->ifindex, 1); dev->ifindex, 1);
if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) {
if (rt->rt6i_flags&RTF_EXPIRES) { /* Autoconf prefix route */
if (valid_lft == 0) { if (valid_lft == 0) {
ip6_del_rt(rt); ip6_del_rt(rt);
rt = NULL; rt = NULL;
} else { } else if (~rt_expires) {
rt->rt6i_expires = jiffies + rt_expires; /* not infinity */
} rt->rt6i_expires = jiffies + rt_expires;
rt->rt6i_flags |= RTF_EXPIRES;
} else {
rt->rt6i_flags &= ~RTF_EXPIRES;
rt->rt6i_expires = 0;
} }
} else if (valid_lft) { } else if (valid_lft) {
int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
clock_t expires = 0;
if (~rt_expires) {
/* not infinity */
flags |= RTF_EXPIRES;
expires = jiffies_to_clock_t(rt_expires);
}
addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
dev, jiffies_to_clock_t(rt_expires), RTF_ADDRCONF|RTF_EXPIRES|RTF_PREFIX_RT); dev, expires, flags);
} }
if (rt) if (rt)
dst_release(&rt->u.dst); dst_release(&rt->u.dst);
@ -2021,7 +2034,8 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
struct inet6_dev *idev; struct inet6_dev *idev;
struct net_device *dev; struct net_device *dev;
int scope; int scope;
u32 flags = RTF_EXPIRES; u32 flags;
clock_t expires;
ASSERT_RTNL(); ASSERT_RTNL();
@ -2041,8 +2055,13 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
if (valid_lft == INFINITY_LIFE_TIME) { if (valid_lft == INFINITY_LIFE_TIME) {
ifa_flags |= IFA_F_PERMANENT; ifa_flags |= IFA_F_PERMANENT;
flags = 0; flags = 0;
} else if (valid_lft >= 0x7FFFFFFF/HZ) expires = 0;
valid_lft = 0x7FFFFFFF/HZ; } else {
if (valid_lft >= 0x7FFFFFFF/HZ)
valid_lft = 0x7FFFFFFF/HZ;
flags = RTF_EXPIRES;
expires = jiffies_to_clock_t(valid_lft * HZ);
}
if (prefered_lft == 0) if (prefered_lft == 0)
ifa_flags |= IFA_F_DEPRECATED; ifa_flags |= IFA_F_DEPRECATED;
@ -2060,7 +2079,7 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
spin_unlock_bh(&ifp->lock); spin_unlock_bh(&ifp->lock);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
jiffies_to_clock_t(valid_lft * HZ), flags); expires, flags);
/* /*
* Note that section 3.1 of RFC 4429 indicates * Note that section 3.1 of RFC 4429 indicates
* that the Optimistic flag should not be set for * that the Optimistic flag should not be set for
@ -3148,7 +3167,8 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
u32 prefered_lft, u32 valid_lft) u32 prefered_lft, u32 valid_lft)
{ {
u32 flags = RTF_EXPIRES; u32 flags;
clock_t expires;
if (!valid_lft || (prefered_lft > valid_lft)) if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL; return -EINVAL;
@ -3156,8 +3176,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
if (valid_lft == INFINITY_LIFE_TIME) { if (valid_lft == INFINITY_LIFE_TIME) {
ifa_flags |= IFA_F_PERMANENT; ifa_flags |= IFA_F_PERMANENT;
flags = 0; flags = 0;
} else if (valid_lft >= 0x7FFFFFFF/HZ) expires = 0;
valid_lft = 0x7FFFFFFF/HZ; } else {
if (valid_lft >= 0x7FFFFFFF/HZ)
valid_lft = 0x7FFFFFFF/HZ;
flags = RTF_EXPIRES;
expires = jiffies_to_clock_t(valid_lft * HZ);
}
if (prefered_lft == 0) if (prefered_lft == 0)
ifa_flags |= IFA_F_DEPRECATED; ifa_flags |= IFA_F_DEPRECATED;
@ -3176,7 +3201,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
ipv6_ifa_notify(0, ifp); ipv6_ifa_notify(0, ifp);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
jiffies_to_clock_t(valid_lft * HZ), flags); expires, flags);
addrconf_verify(0); addrconf_verify(0);
return 0; return 0;
@ -4242,7 +4267,7 @@ static void addrconf_sysctl_register(struct inet6_dev *idev)
neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6, neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6,
NET_IPV6_NEIGH, "ipv6", NET_IPV6_NEIGH, "ipv6",
&ndisc_ifinfo_sysctl_change, &ndisc_ifinfo_sysctl_change,
NULL); ndisc_ifinfo_sysctl_strategy);
__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
idev->dev->ifindex, idev, &idev->cnf); idev->dev->ifindex, idev, &idev->cnf);
} }

View file

@ -1727,10 +1727,10 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
return ret; return ret;
} }
static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name, int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
int nlen, void __user *oldval, int nlen, void __user *oldval,
size_t __user *oldlenp, size_t __user *oldlenp,
void __user *newval, size_t newlen) void __user *newval, size_t newlen)
{ {
struct net_device *dev = ctl->extra1; struct net_device *dev = ctl->extra1;
struct inet6_dev *idev; struct inet6_dev *idev;

View file

@ -109,7 +109,7 @@ static struct dst_ops ip6_dst_ops_template = {
.negative_advice = ip6_negative_advice, .negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure, .link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu, .update_pmtu = ip6_rt_update_pmtu,
.local_out = ip6_local_out, .local_out = __ip6_local_out,
.entry_size = sizeof(struct rt6_info), .entry_size = sizeof(struct rt6_info),
.entries = ATOMIC_INIT(0), .entries = ATOMIC_INIT(0),
}; };
@ -475,7 +475,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
lifetime = ntohl(rinfo->lifetime); lifetime = ntohl(rinfo->lifetime);
if (lifetime == 0xffffffff) { if (lifetime == 0xffffffff) {
/* infinity */ /* infinity */
} else if (lifetime > 0x7fffffff/HZ) { } else if (lifetime > 0x7fffffff/HZ - 1) {
/* Avoid arithmetic overflow */ /* Avoid arithmetic overflow */
lifetime = 0x7fffffff/HZ - 1; lifetime = 0x7fffffff/HZ - 1;
} }
@ -1106,7 +1106,9 @@ int ip6_route_add(struct fib6_config *cfg)
} }
rt->u.dst.obsolete = -1; rt->u.dst.obsolete = -1;
rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires); rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
jiffies + clock_t_to_jiffies(cfg->fc_expires) :
0;
if (cfg->fc_protocol == RTPROT_UNSPEC) if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT; cfg->fc_protocol = RTPROT_BOOT;
@ -2200,7 +2202,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0; expires = (rt->rt6i_flags & RTF_EXPIRES) ?
rt->rt6i_expires - jiffies : 0;
if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
expires, rt->u.dst.error) < 0) expires, rt->u.dst.error) < 0)
goto nla_put_failure; goto nla_put_failure;

View file

@ -403,9 +403,8 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
static int ipip6_err(struct sk_buff *skb, u32 info) static int ipip6_err(struct sk_buff *skb, u32 info)
{ {
#ifndef I_WISH_WORLD_WERE_PERFECT
/* It is not :-( All the routers (except for Linux) return only /* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of 8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible. ICMP in the real Internet is absolutely infeasible.
*/ */
@ -462,92 +461,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
out: out:
read_unlock(&ipip6_lock); read_unlock(&ipip6_lock);
return err; return err;
#else
struct iphdr *iph = (struct iphdr*)dp;
int hlen = iph->ihl<<2;
struct ipv6hdr *iph6;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
int rel_type = 0;
int rel_code = 0;
int rel_info = 0;
struct sk_buff *skb2;
struct rt6_info *rt6i;
if (len < hlen + sizeof(struct ipv6hdr))
return;
iph6 = (struct ipv6hdr*)(dp + hlen);
switch (type) {
default:
return;
case ICMP_PARAMETERPROB:
if (icmp_hdr(skb)->un.gateway < hlen)
return;
/* So... This guy found something strange INSIDE encapsulated
packet. Well, he is fool, but what can we do ?
*/
rel_type = ICMPV6_PARAMPROB;
rel_info = icmp_hdr(skb)->un.gateway - hlen;
break;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return;
case ICMP_FRAG_NEEDED:
/* Too complicated case ... */
return;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe, it is just ether pollution. --ANK
*/
rel_type = ICMPV6_DEST_UNREACH;
rel_code = ICMPV6_ADDR_UNREACH;
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return;
rel_type = ICMPV6_TIME_EXCEED;
rel_code = ICMPV6_EXC_HOPLIMIT;
break;
}
/* Prepare fake skb to feed it to icmpv6_send */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
return 0;
dst_release(skb2->dst);
skb2->dst = NULL;
skb_pull(skb2, skb->data - (u8*)iph6);
skb_reset_network_header(skb2);
/* Try to guess incoming interface */
rt6i = rt6_lookup(dev_net(skb->dev), &iph6->saddr, NULL, NULL, 0);
if (rt6i && rt6i->rt6i_dev) {
skb2->dev = rt6i->rt6i_dev;
rt6i = rt6_lookup(dev_net(skb->dev),
&iph6->daddr, &iph6->saddr, NULL, 0);
if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) {
rel_type = ICMPV6_DEST_UNREACH;
rel_code = ICMPV6_ADDR_UNREACH;
}
icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
}
}
kfree_skb(skb2);
return 0;
#endif
} }
static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)

View file

@ -1251,7 +1251,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
x->sel.prefixlen_s = addr->sadb_address_prefixlen; x->sel.prefixlen_s = addr->sadb_address_prefixlen;
} }
if (x->props.mode == XFRM_MODE_TRANSPORT) if (!x->sel.family)
x->sel.family = x->props.family; x->sel.family = x->props.family;
if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {

View file

@ -721,7 +721,17 @@ static void ieee80211_send_assoc(struct net_device *dev,
capab |= WLAN_CAPABILITY_PRIVACY; capab |= WLAN_CAPABILITY_PRIVACY;
if (bss->wmm_ie) if (bss->wmm_ie)
wmm = 1; wmm = 1;
/* get all rates supported by the device and the AP as
* some APs don't like getting a superset of their rates
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode) */
rates_len = ieee80211_compatible_rates(bss, sband, &rates);
ieee80211_rx_bss_put(dev, bss); ieee80211_rx_bss_put(dev, bss);
} else {
rates = ~0;
rates_len = sband->n_bitrates;
} }
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@ -752,10 +762,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
*pos++ = ifsta->ssid_len; *pos++ = ifsta->ssid_len;
memcpy(pos, ifsta->ssid, ifsta->ssid_len); memcpy(pos, ifsta->ssid, ifsta->ssid_len);
/* all supported rates should be added here but some APs /* add all rates which were marked to be used above */
* (e.g. D-Link DAP 1353 in b-only mode) don't like that
* Therefore only add rates the AP supports */
rates_len = ieee80211_compatible_rates(bss, sband, &rates);
supp_rates_len = rates_len; supp_rates_len = rates_len;
if (supp_rates_len > 8) if (supp_rates_len > 8)
supp_rates_len = 8; supp_rates_len = 8;
@ -3434,21 +3441,17 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
struct ieee80211_sta_bss *bss, *selected = NULL; struct ieee80211_sta_bss *bss, *selected = NULL;
int top_rssi = 0, freq; int top_rssi = 0, freq;
if (!(ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL))) {
ifsta->state = IEEE80211_AUTHENTICATE;
ieee80211_sta_reset_auth(dev, ifsta);
return 0;
}
spin_lock_bh(&local->sta_bss_lock); spin_lock_bh(&local->sta_bss_lock);
freq = local->oper_channel->center_freq; freq = local->oper_channel->center_freq;
list_for_each_entry(bss, &local->sta_bss_list, list) { list_for_each_entry(bss, &local->sta_bss_list, list) {
if (!(bss->capability & WLAN_CAPABILITY_ESS)) if (!(bss->capability & WLAN_CAPABILITY_ESS))
continue; continue;
if (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
!!sdata->default_key) IEEE80211_STA_AUTO_BSSID_SEL |
IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
(!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
!!sdata->default_key))
continue; continue;
if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&

View file

@ -389,6 +389,41 @@ void ieee80211_iterate_active_interfaces(
struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata; struct ieee80211_sub_if_data *sdata;
rtnl_lock();
list_for_each_entry(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
case IEEE80211_IF_TYPE_INVALID:
case IEEE80211_IF_TYPE_MNTR:
case IEEE80211_IF_TYPE_VLAN:
continue;
case IEEE80211_IF_TYPE_AP:
case IEEE80211_IF_TYPE_STA:
case IEEE80211_IF_TYPE_IBSS:
case IEEE80211_IF_TYPE_WDS:
case IEEE80211_IF_TYPE_MESH_POINT:
break;
}
if (sdata->dev == local->mdev)
continue;
if (netif_running(sdata->dev))
iterator(data, sdata->dev->dev_addr,
&sdata->vif);
}
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
void ieee80211_iterate_active_interfaces_atomic(
struct ieee80211_hw *hw,
void (*iterator)(void *data, u8 *mac,
struct ieee80211_vif *vif),
void *data)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) { list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@ -413,4 +448,4 @@ void ieee80211_iterate_active_interfaces(
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);

View file

@ -221,7 +221,6 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
range->num_frequency = c; range->num_frequency = c;
IW_EVENT_CAPA_SET_KERNEL(range->event_capa); IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);

View file

@ -220,7 +220,7 @@ replay:
tp = kzalloc(sizeof(*tp), GFP_KERNEL); tp = kzalloc(sizeof(*tp), GFP_KERNEL);
if (tp == NULL) if (tp == NULL)
goto errout; goto errout;
err = -EINVAL; err = -ENOENT;
tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]); tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]);
if (tp_ops == NULL) { if (tp_ops == NULL) {
#ifdef CONFIG_KMOD #ifdef CONFIG_KMOD

View file

@ -50,19 +50,8 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
switch (type) { switch (type) {
case XFRMA_ALG_AUTH: case XFRMA_ALG_AUTH:
if (!algp->alg_key_len &&
strcmp(algp->alg_name, "digest_null") != 0)
return -EINVAL;
break;
case XFRMA_ALG_CRYPT: case XFRMA_ALG_CRYPT:
if (!algp->alg_key_len &&
strcmp(algp->alg_name, "cipher_null") != 0)
return -EINVAL;
break;
case XFRMA_ALG_COMP: case XFRMA_ALG_COMP:
/* Zero length keys are legal. */
break; break;
default: default: