[NETPOLL]: Revert two bogus cleanups that broke netconsole.
Based upon a report by Andrew Morton and code analysis done by Jarek Poplawski. This reverts33f807ba0d
("[NETPOLL]: Kill NETPOLL_RX_DROP, set but never tested.") andc7b6ea24b4
("[NETPOLL]: Don't need rx_flags."). The rx_flags did get tested for zero vs. non-zero and therefore we do need those tests and that code which sets NETPOLL_RX_DROP et al. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ec9b6add7d
commit
d9452e9f81
2 changed files with 12 additions and 7 deletions
|
@ -25,6 +25,7 @@ struct netpoll {
|
||||||
|
|
||||||
struct netpoll_info {
|
struct netpoll_info {
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
|
int rx_flags;
|
||||||
spinlock_t rx_lock;
|
spinlock_t rx_lock;
|
||||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||||
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
||||||
|
@ -50,12 +51,12 @@ static inline int netpoll_rx(struct sk_buff *skb)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!npinfo || !npinfo->rx_np)
|
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||||
/* check rx_np again with the lock held */
|
/* check rx_flags again with the lock held */
|
||||||
if (npinfo->rx_np && __netpoll_rx(skb))
|
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||||
ret = 1;
|
ret = 1;
|
||||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
|
||||||
static atomic_t trapped;
|
static atomic_t trapped;
|
||||||
|
|
||||||
#define USEC_PER_POLL 50
|
#define USEC_PER_POLL 50
|
||||||
|
#define NETPOLL_RX_ENABLED 1
|
||||||
|
#define NETPOLL_RX_DROP 2
|
||||||
|
|
||||||
#define MAX_SKB_SIZE \
|
#define MAX_SKB_SIZE \
|
||||||
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
|
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
|
||||||
|
@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
|
||||||
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
||||||
return budget;
|
return budget;
|
||||||
|
|
||||||
|
npinfo->rx_flags |= NETPOLL_RX_DROP;
|
||||||
atomic_inc(&trapped);
|
atomic_inc(&trapped);
|
||||||
|
|
||||||
work = napi->poll(napi, budget);
|
work = napi->poll(napi, budget);
|
||||||
|
|
||||||
atomic_dec(&trapped);
|
atomic_dec(&trapped);
|
||||||
|
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
|
||||||
|
|
||||||
return budget - work;
|
return budget - work;
|
||||||
}
|
}
|
||||||
|
@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb)
|
||||||
if (skb->dev->type != ARPHRD_ETHER)
|
if (skb->dev->type != ARPHRD_ETHER)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* if receive ARP during middle of NAPI poll, then queue */
|
/* check if netpoll clients need ARP */
|
||||||
if (skb->protocol == htons(ETH_P_ARP) &&
|
if (skb->protocol == htons(ETH_P_ARP) &&
|
||||||
atomic_read(&trapped)) {
|
atomic_read(&trapped)) {
|
||||||
skb_queue_tail(&npi->arp_tx, skb);
|
skb_queue_tail(&npi->arp_tx, skb);
|
||||||
|
@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* If packet received while already in poll then just
|
|
||||||
* silently drop.
|
|
||||||
*/
|
|
||||||
if (atomic_read(&trapped)) {
|
if (atomic_read(&trapped)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np)
|
||||||
goto release;
|
goto release;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
npinfo->rx_flags = 0;
|
||||||
npinfo->rx_np = NULL;
|
npinfo->rx_np = NULL;
|
||||||
|
|
||||||
spin_lock_init(&npinfo->rx_lock);
|
spin_lock_init(&npinfo->rx_lock);
|
||||||
|
@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
|
||||||
|
|
||||||
if (np->rx_hook) {
|
if (np->rx_hook) {
|
||||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||||
|
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
|
||||||
npinfo->rx_np = np;
|
npinfo->rx_np = np;
|
||||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np)
|
||||||
if (npinfo->rx_np == np) {
|
if (npinfo->rx_np == np) {
|
||||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||||
npinfo->rx_np = NULL;
|
npinfo->rx_np = NULL;
|
||||||
|
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
|
||||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue