netdev: ethernet dev_alloc_skb to netdev_alloc_skb
Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet - Removed extra skb->dev = dev after netdev_alloc_skb Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c4062dfc42
commit
dae2e9f430
33 changed files with 47 additions and 58 deletions
|
@ -150,7 +150,7 @@ static void netx_eth_receive(struct net_device *ndev)
|
||||||
seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
|
seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
|
||||||
len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
|
len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
|
||||||
|
|
||||||
skb = dev_alloc_skb(len);
|
skb = netdev_alloc_skb(ndev, len);
|
||||||
if (unlikely(skb == NULL)) {
|
if (unlikely(skb == NULL)) {
|
||||||
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
|
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
|
||||||
ndev->name);
|
ndev->name);
|
||||||
|
|
|
@ -735,7 +735,7 @@ static void netdev_rx(struct net_device *dev)
|
||||||
|
|
||||||
if (status & RXDS_RXGD) {
|
if (status & RXDS_RXGD) {
|
||||||
data = ether->rdesc->recv_buf[ether->cur_rx];
|
data = ether->rdesc->recv_buf[ether->cur_rx];
|
||||||
skb = dev_alloc_skb(length+2);
|
skb = netdev_alloc_skb(dev, length + 2);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
dev_err(&pdev->dev, "get skb buffer error\n");
|
dev_err(&pdev->dev, "get skb buffer error\n");
|
||||||
ether->stats.rx_dropped++;
|
ether->stats.rx_dropped++;
|
||||||
|
|
|
@ -1815,7 +1815,7 @@ static int nv_alloc_rx(struct net_device *dev)
|
||||||
less_rx = np->last_rx.orig;
|
less_rx = np->last_rx.orig;
|
||||||
|
|
||||||
while (np->put_rx.orig != less_rx) {
|
while (np->put_rx.orig != less_rx) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
|
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
np->put_rx_ctx->skb = skb;
|
np->put_rx_ctx->skb = skb;
|
||||||
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
|
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
|
||||||
|
@ -1850,7 +1850,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
|
||||||
less_rx = np->last_rx.ex;
|
less_rx = np->last_rx.ex;
|
||||||
|
|
||||||
while (np->put_rx.ex != less_rx) {
|
while (np->put_rx.ex != less_rx) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
|
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
np->put_rx_ctx->skb = skb;
|
np->put_rx_ctx->skb = skb;
|
||||||
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
|
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
|
||||||
|
@ -4993,9 +4993,9 @@ static int nv_loopback_test(struct net_device *dev)
|
||||||
|
|
||||||
/* setup packet for tx */
|
/* setup packet for tx */
|
||||||
pkt_len = ETH_DATA_LEN;
|
pkt_len = ETH_DATA_LEN;
|
||||||
tx_skb = dev_alloc_skb(pkt_len);
|
tx_skb = netdev_alloc_skb(dev, pkt_len);
|
||||||
if (!tx_skb) {
|
if (!tx_skb) {
|
||||||
netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
|
netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1188,11 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev)
|
||||||
}
|
}
|
||||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
|
||||||
hmp->rx_skbuff[i] = skb;
|
hmp->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
||||||
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
|
@ -1488,7 +1487,7 @@ static int hamachi_rx(struct net_device *dev)
|
||||||
/* Check if the packet is long enough to accept without copying
|
/* Check if the packet is long enough to accept without copying
|
||||||
to a minimally-sized skbuff. */
|
to a minimally-sized skbuff. */
|
||||||
if (pkt_len < rx_copybreak &&
|
if (pkt_len < rx_copybreak &&
|
||||||
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
|
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
|
||||||
#ifdef RX_CHECKSUM
|
#ifdef RX_CHECKSUM
|
||||||
printk(KERN_ERR "%s: rx_copybreak non-zero "
|
printk(KERN_ERR "%s: rx_copybreak non-zero "
|
||||||
"not good with RX_CHECKSUM\n", dev->name);
|
"not good with RX_CHECKSUM\n", dev->name);
|
||||||
|
@ -1591,12 +1590,11 @@ static int hamachi_rx(struct net_device *dev)
|
||||||
entry = hmp->dirty_rx % RX_RING_SIZE;
|
entry = hmp->dirty_rx % RX_RING_SIZE;
|
||||||
desc = &(hmp->rx_ring[entry]);
|
desc = &(hmp->rx_ring[entry]);
|
||||||
if (hmp->rx_skbuff[entry] == NULL) {
|
if (hmp->rx_skbuff[entry] == NULL) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
|
||||||
|
|
||||||
hmp->rx_skbuff[entry] = skb;
|
hmp->rx_skbuff[entry] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
||||||
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
|
|
|
@ -743,11 +743,10 @@ static int yellowfin_init_ring(struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
|
||||||
yp->rx_skbuff[i] = skb;
|
yp->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
||||||
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
|
@ -1133,7 +1132,7 @@ static int yellowfin_rx(struct net_device *dev)
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
yp->rx_skbuff[entry] = NULL;
|
yp->rx_skbuff[entry] = NULL;
|
||||||
} else {
|
} else {
|
||||||
skb = dev_alloc_skb(pkt_len + 2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||||
|
@ -1156,11 +1155,10 @@ static int yellowfin_rx(struct net_device *dev)
|
||||||
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
|
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
|
||||||
entry = yp->dirty_rx % RX_RING_SIZE;
|
entry = yp->dirty_rx % RX_RING_SIZE;
|
||||||
if (yp->rx_skbuff[entry] == NULL) {
|
if (yp->rx_skbuff[entry] == NULL) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
yp->rx_skbuff[entry] = skb;
|
yp->rx_skbuff[entry] = skb;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
||||||
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
|
|
|
@ -643,7 +643,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
|
||||||
/* Entry in use? */
|
/* Entry in use? */
|
||||||
WARN_ON(*buff);
|
WARN_ON(*buff);
|
||||||
|
|
||||||
skb = dev_alloc_skb(mac->bufsz);
|
skb = netdev_alloc_skb(dev, mac->bufsz);
|
||||||
skb_reserve(skb, LOCAL_SKB_ALIGN);
|
skb_reserve(skb, LOCAL_SKB_ALIGN);
|
||||||
|
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
|
|
|
@ -1487,7 +1487,7 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
struct pci_dev *pdev = adapter->pdev;
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
|
|
||||||
buffer->skb = dev_alloc_skb(rds_ring->skb_size);
|
buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
|
||||||
if (!buffer->skb)
|
if (!buffer->skb)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
|
|
@ -719,7 +719,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
|
||||||
int i, loop, cnt = 0;
|
int i, loop, cnt = 0;
|
||||||
|
|
||||||
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
|
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
|
||||||
skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE);
|
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
|
||||||
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
|
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
|
||||||
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
|
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
|
||||||
|
|
||||||
|
|
|
@ -1440,7 +1440,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
struct pci_dev *pdev = adapter->pdev;
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
|
|
||||||
skb = dev_alloc_skb(rds_ring->skb_size);
|
skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->stats.skb_alloc_failure++;
|
adapter->stats.skb_alloc_failure++;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -552,7 +552,7 @@ static void ni5010_rx(struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Malloc up new buffer. */
|
/* Malloc up new buffer. */
|
||||||
skb = dev_alloc_skb(i_pkt_size + 3);
|
skb = netdev_alloc_skb(dev, i_pkt_size + 3);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
|
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
|
|
|
@ -783,7 +783,7 @@ static void net_rx(struct net_device *dev)
|
||||||
int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
|
int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len + 2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
|
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
|
||||||
dev->name);
|
dev->name);
|
||||||
|
|
|
@ -653,13 +653,12 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
/* skb */
|
/* skb */
|
||||||
mdp->rx_skbuff[i] = NULL;
|
mdp->rx_skbuff[i] = NULL;
|
||||||
skb = dev_alloc_skb(mdp->rx_buf_sz);
|
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
|
||||||
mdp->rx_skbuff[i] = skb;
|
mdp->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
|
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
skb->dev = ndev; /* Mark as being used by this device. */
|
|
||||||
sh_eth_set_receive_align(skb);
|
sh_eth_set_receive_align(skb);
|
||||||
|
|
||||||
/* RX descriptor */
|
/* RX descriptor */
|
||||||
|
@ -953,13 +952,12 @@ static int sh_eth_rx(struct net_device *ndev)
|
||||||
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
||||||
|
|
||||||
if (mdp->rx_skbuff[entry] == NULL) {
|
if (mdp->rx_skbuff[entry] == NULL) {
|
||||||
skb = dev_alloc_skb(mdp->rx_buf_sz);
|
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
|
||||||
mdp->rx_skbuff[entry] = skb;
|
mdp->rx_skbuff[entry] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
|
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
skb->dev = ndev;
|
|
||||||
sh_eth_set_receive_align(skb);
|
sh_eth_set_receive_align(skb);
|
||||||
|
|
||||||
skb_checksum_none_assert(skb);
|
skb_checksum_none_assert(skb);
|
||||||
|
|
|
@ -643,7 +643,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
|
||||||
if (next_ptr <= this_ptr)
|
if (next_ptr <= this_ptr)
|
||||||
length += RX_END - RX_START;
|
length += RX_END - RX_START;
|
||||||
|
|
||||||
skb = dev_alloc_skb(length + 2);
|
skb = netdev_alloc_skb(dev, length + 2);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
unsigned char *buf;
|
unsigned char *buf;
|
||||||
|
|
||||||
|
|
|
@ -548,7 +548,7 @@ static void seeq8005_rx(struct net_device *dev)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned char *buf;
|
unsigned char *buf;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len);
|
skb = netdev_alloc_skb(dev, pkt_len);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
|
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
|
|
|
@ -1166,7 +1166,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
|
||||||
for (i = 0; i < NUM_RX_DESC; i++) {
|
for (i = 0; i < NUM_RX_DESC; i++) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
|
if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
|
||||||
/* not enough memory for skbuff, this makes a "hole"
|
/* not enough memory for skbuff, this makes a "hole"
|
||||||
on the buffer ring, it is not clear how the
|
on the buffer ring, it is not clear how the
|
||||||
hardware will react to this kind of degenerated
|
hardware will react to this kind of degenerated
|
||||||
|
@ -1769,7 +1769,7 @@ static int sis900_rx(struct net_device *net_dev)
|
||||||
|
|
||||||
/* refill the Rx buffer, what if there is not enough
|
/* refill the Rx buffer, what if there is not enough
|
||||||
* memory for new socket buffer ?? */
|
* memory for new socket buffer ?? */
|
||||||
if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
|
if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
|
||||||
/*
|
/*
|
||||||
* Not enough memory to refill the buffer
|
* Not enough memory to refill the buffer
|
||||||
* so we need to recycle the old one so
|
* so we need to recycle the old one so
|
||||||
|
@ -1827,7 +1827,7 @@ refill_rx_ring:
|
||||||
entry = sis_priv->dirty_rx % NUM_RX_DESC;
|
entry = sis_priv->dirty_rx % NUM_RX_DESC;
|
||||||
|
|
||||||
if (sis_priv->rx_skbuff[entry] == NULL) {
|
if (sis_priv->rx_skbuff[entry] == NULL) {
|
||||||
if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
|
if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
|
||||||
/* not enough memory for skbuff, this makes a
|
/* not enough memory for skbuff, this makes a
|
||||||
* "hole" on the buffer ring, it is not clear
|
* "hole" on the buffer ring, it is not clear
|
||||||
* how the hardware will react to this kind
|
* how the hardware will react to this kind
|
||||||
|
|
|
@ -934,7 +934,7 @@ static void epic_init_ring(struct net_device *dev)
|
||||||
|
|
||||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
|
||||||
ep->rx_skbuff[i] = skb;
|
ep->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
|
@ -1199,7 +1199,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
||||||
/* Check if the packet is long enough to accept without copying
|
/* Check if the packet is long enough to accept without copying
|
||||||
to a minimally-sized skbuff. */
|
to a minimally-sized skbuff. */
|
||||||
if (pkt_len < rx_copybreak &&
|
if (pkt_len < rx_copybreak &&
|
||||||
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
|
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||||
pci_dma_sync_single_for_cpu(ep->pci_dev,
|
pci_dma_sync_single_for_cpu(ep->pci_dev,
|
||||||
ep->rx_ring[entry].bufaddr,
|
ep->rx_ring[entry].bufaddr,
|
||||||
|
@ -1232,7 +1232,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
||||||
entry = ep->dirty_rx % RX_RING_SIZE;
|
entry = ep->dirty_rx % RX_RING_SIZE;
|
||||||
if (ep->rx_skbuff[entry] == NULL) {
|
if (ep->rx_skbuff[entry] == NULL) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
|
skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
|
|
|
@ -401,7 +401,7 @@ static inline void smc911x_rcv(struct net_device *dev)
|
||||||
} else {
|
} else {
|
||||||
/* Receive a valid packet */
|
/* Receive a valid packet */
|
||||||
/* Alloc a buffer with extra room for DMA alignment */
|
/* Alloc a buffer with extra room for DMA alignment */
|
||||||
skb=dev_alloc_skb(pkt_len+32);
|
skb = netdev_alloc_skb(dev, pkt_len+32);
|
||||||
if (unlikely(skb == NULL)) {
|
if (unlikely(skb == NULL)) {
|
||||||
PRINTK( "%s: Low memory, rcvd packet dropped.\n",
|
PRINTK( "%s: Low memory, rcvd packet dropped.\n",
|
||||||
dev->name);
|
dev->name);
|
||||||
|
|
|
@ -1222,7 +1222,7 @@ static void smc_rcv(struct net_device *dev)
|
||||||
if ( status & RS_MULTICAST )
|
if ( status & RS_MULTICAST )
|
||||||
dev->stats.multicast++;
|
dev->stats.multicast++;
|
||||||
|
|
||||||
skb = dev_alloc_skb( packet_length + 5);
|
skb = netdev_alloc_skb(dev, packet_length + 5);
|
||||||
|
|
||||||
if ( skb == NULL ) {
|
if ( skb == NULL ) {
|
||||||
printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
|
printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
|
||||||
|
|
|
@ -1500,7 +1500,7 @@ static void smc_rx(struct net_device *dev)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* Note: packet_length adds 5 or 6 extra bytes here! */
|
/* Note: packet_length adds 5 or 6 extra bytes here! */
|
||||||
skb = dev_alloc_skb(packet_length+2);
|
skb = netdev_alloc_skb(dev, packet_length+2);
|
||||||
|
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
pr_debug("%s: Low memory, packet dropped.\n", dev->name);
|
pr_debug("%s: Low memory, packet dropped.\n", dev->name);
|
||||||
|
|
|
@ -463,7 +463,7 @@ static inline void smc_rcv(struct net_device *dev)
|
||||||
* multiple of 4 bytes on 32 bit buses.
|
* multiple of 4 bytes on 32 bit buses.
|
||||||
* Hence packet_len - 6 + 2 + 2 + 2.
|
* Hence packet_len - 6 + 2 + 2 + 2.
|
||||||
*/
|
*/
|
||||||
skb = dev_alloc_skb(packet_len);
|
skb = netdev_alloc_skb(dev, packet_len);
|
||||||
if (unlikely(skb == NULL)) {
|
if (unlikely(skb == NULL)) {
|
||||||
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
|
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
|
||||||
dev->name);
|
dev->name);
|
||||||
|
|
|
@ -850,8 +850,6 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb->dev = pd->dev;
|
|
||||||
|
|
||||||
mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
|
mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
|
||||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(pd->pdev, mapping)) {
|
if (pci_dma_mapping_error(pd->pdev, mapping)) {
|
||||||
|
|
|
@ -1974,7 +1974,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
|
||||||
else
|
else
|
||||||
alloclen = max(hlen, RX_COPY_MIN);
|
alloclen = max(hlen, RX_COPY_MIN);
|
||||||
|
|
||||||
skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
|
skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
|
|
@ -853,7 +853,7 @@ static void bigmac_rx(struct bigmac *bp)
|
||||||
/* Trim the original skb for the netif. */
|
/* Trim the original skb for the netif. */
|
||||||
skb_trim(skb, len);
|
skb_trim(skb, len);
|
||||||
} else {
|
} else {
|
||||||
struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
|
struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2);
|
||||||
|
|
||||||
if (copy_skb == NULL) {
|
if (copy_skb == NULL) {
|
||||||
drops++;
|
drops++;
|
||||||
|
|
|
@ -2043,7 +2043,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
|
||||||
/* Trim the original skb for the netif. */
|
/* Trim the original skb for the netif. */
|
||||||
skb_trim(skb, len);
|
skb_trim(skb, len);
|
||||||
} else {
|
} else {
|
||||||
struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
|
struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (copy_skb == NULL) {
|
if (copy_skb == NULL) {
|
||||||
drops++;
|
drops++;
|
||||||
|
|
|
@ -435,7 +435,7 @@ static void qe_rx(struct sunqe *qep)
|
||||||
dev->stats.rx_length_errors++;
|
dev->stats.rx_length_errors++;
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
} else {
|
} else {
|
||||||
skb = dev_alloc_skb(len + 2);
|
skb = netdev_alloc_skb(dev, len + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
drops++;
|
drops++;
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
|
|
|
@ -1089,12 +1089,11 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
|
||||||
ENTER;
|
ENTER;
|
||||||
dno = bdx_rxdb_available(db) - 1;
|
dno = bdx_rxdb_available(db) - 1;
|
||||||
while (dno > 0) {
|
while (dno > 0) {
|
||||||
skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
|
skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
pr_err("NO MEM: dev_alloc_skb failed\n");
|
pr_err("NO MEM: netdev_alloc_skb failed\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
skb->dev = priv->ndev;
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
|
|
||||||
idx = bdx_rxdb_alloc_elem(db);
|
idx = bdx_rxdb_alloc_elem(db);
|
||||||
|
@ -1258,7 +1257,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
|
||||||
skb = dm->skb;
|
skb = dm->skb;
|
||||||
|
|
||||||
if (len < BDX_COPYBREAK &&
|
if (len < BDX_COPYBREAK &&
|
||||||
(skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
|
(skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
|
||||||
skb_reserve(skb2, NET_IP_ALIGN);
|
skb_reserve(skb2, NET_IP_ALIGN);
|
||||||
/*skb_put(skb2, len); */
|
/*skb_put(skb2, len); */
|
||||||
pci_dma_sync_single_for_cpu(priv->pdev,
|
pci_dma_sync_single_for_cpu(priv->pdev,
|
||||||
|
|
|
@ -992,10 +992,9 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
|
||||||
|
|
||||||
static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
|
static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
|
struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size);
|
||||||
if (WARN_ON(!skb))
|
if (WARN_ON(!skb))
|
||||||
return NULL;
|
return NULL;
|
||||||
skb->dev = priv->ndev;
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -419,7 +419,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Avoid "false sharing" with last cache line. */
|
/* Avoid "false sharing" with last cache line. */
|
||||||
/* ISSUE: This is already done by "dev_alloc_skb()". */
|
/* ISSUE: This is already done by "netdev_alloc_skb()". */
|
||||||
unsigned int len =
|
unsigned int len =
|
||||||
(((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
|
(((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
|
||||||
CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
|
CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
|
||||||
|
@ -433,7 +433,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
|
||||||
struct sk_buff **skb_ptr;
|
struct sk_buff **skb_ptr;
|
||||||
|
|
||||||
/* Request 96 extra bytes for alignment purposes. */
|
/* Request 96 extra bytes for alignment purposes. */
|
||||||
skb = dev_alloc_skb(len + padding);
|
skb = netdev_alloc_skb(info->napi->dev, len + padding);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|
|
@ -453,7 +453,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
|
||||||
dma_addr_t *dma_handle)
|
dma_addr_t *dma_handle)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
skb = dev_alloc_skb(RX_BUF_SIZE);
|
skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return NULL;
|
return NULL;
|
||||||
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
|
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
|
||||||
|
|
|
@ -1155,7 +1155,6 @@ static void alloc_rbufs(struct net_device *dev)
|
||||||
rp->rx_skbuff[i] = skb;
|
rp->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
|
|
||||||
rp->rx_skbuff_dma[i] =
|
rp->rx_skbuff_dma[i] =
|
||||||
pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
|
pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
|
||||||
|
@ -1940,7 +1939,6 @@ static int rhine_rx(struct net_device *dev, int limit)
|
||||||
rp->rx_skbuff[entry] = skb;
|
rp->rx_skbuff[entry] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
rp->rx_skbuff_dma[entry] =
|
rp->rx_skbuff_dma[entry] =
|
||||||
pci_map_single(rp->pdev, skb->data,
|
pci_map_single(rp->pdev, skb->data,
|
||||||
rp->rx_buf_sz,
|
rp->rx_buf_sz,
|
||||||
|
|
|
@ -1509,7 +1509,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
|
||||||
struct rx_desc *rd = &(vptr->rx.ring[idx]);
|
struct rx_desc *rd = &(vptr->rx.ring[idx]);
|
||||||
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
|
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
|
||||||
|
|
||||||
rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
|
rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
|
||||||
if (rd_info->skb == NULL)
|
if (rd_info->skb == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -613,7 +613,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
|
||||||
u32 len;
|
u32 len;
|
||||||
|
|
||||||
len = ETH_FRAME_LEN + ETH_FCS_LEN;
|
len = ETH_FRAME_LEN + ETH_FCS_LEN;
|
||||||
skb = dev_alloc_skb(len + ALIGNMENT);
|
skb = netdev_alloc_skb(dev, len + ALIGNMENT);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
/* Couldn't get memory. */
|
/* Couldn't get memory. */
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
|
|
|
@ -1039,7 +1039,8 @@ xirc2ps_interrupt(int irq, void *dev_id)
|
||||||
|
|
||||||
pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen);
|
pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen);
|
||||||
|
|
||||||
skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
|
/* 1 extra so we can use insw */
|
||||||
|
skb = netdev_alloc_skb(dev, pktlen + 3);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
|
pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue