netpoll: make __netpoll_cleanup non-block
Like the previous patch, slave_disable_netpoll() and __netpoll_cleanup() may be called with read_lock() held too, so we should make them non-block, by moving the cleanup and kfree() to call_rcu_bh() callbacks. Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
47be03a28c
commit
38e6bc185d
5 changed files with 39 additions and 24 deletions
|
|
@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
slave->np = NULL;
|
slave->np = NULL;
|
||||||
synchronize_rcu_bh();
|
__netpoll_free_rcu(np);
|
||||||
__netpoll_cleanup(np);
|
|
||||||
kfree(np);
|
|
||||||
}
|
}
|
||||||
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
|
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@ struct netpoll {
|
||||||
u8 remote_mac[ETH_ALEN];
|
u8 remote_mac[ETH_ALEN];
|
||||||
|
|
||||||
struct list_head rx; /* rx_np list element */
|
struct list_head rx; /* rx_np list element */
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct netpoll_info {
|
struct netpoll_info {
|
||||||
|
|
@ -38,6 +39,7 @@ struct netpoll_info {
|
||||||
struct delayed_work tx_work;
|
struct delayed_work tx_work;
|
||||||
|
|
||||||
struct netpoll *netpoll;
|
struct netpoll *netpoll;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
|
||||||
|
|
@ -48,6 +50,7 @@ int netpoll_setup(struct netpoll *np);
|
||||||
int netpoll_trap(void);
|
int netpoll_trap(void);
|
||||||
void netpoll_set_trap(int trap);
|
void netpoll_set_trap(int trap);
|
||||||
void __netpoll_cleanup(struct netpoll *np);
|
void __netpoll_cleanup(struct netpoll *np);
|
||||||
|
void __netpoll_free_rcu(struct netpoll *np);
|
||||||
void netpoll_cleanup(struct netpoll *np);
|
void netpoll_cleanup(struct netpoll *np);
|
||||||
int __netpoll_rx(struct sk_buff *skb);
|
int __netpoll_rx(struct sk_buff *skb);
|
||||||
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||||
|
|
|
||||||
|
|
@ -704,11 +704,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
|
||||||
|
|
||||||
info->netpoll = NULL;
|
info->netpoll = NULL;
|
||||||
|
|
||||||
/* Wait for transmitting packets to finish before freeing. */
|
__netpoll_free_rcu(netpoll);
|
||||||
synchronize_rcu_bh();
|
|
||||||
|
|
||||||
__netpoll_cleanup(netpoll);
|
|
||||||
kfree(netpoll);
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
|
||||||
|
|
||||||
p->np = NULL;
|
p->np = NULL;
|
||||||
|
|
||||||
/* Wait for transmitting packets to finish before freeing. */
|
__netpoll_free_rcu(np);
|
||||||
synchronize_rcu_bh();
|
|
||||||
|
|
||||||
__netpoll_cleanup(np);
|
|
||||||
kfree(np);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -878,6 +878,24 @@ static int __init netpoll_init(void)
|
||||||
}
|
}
|
||||||
core_initcall(netpoll_init);
|
core_initcall(netpoll_init);
|
||||||
|
|
||||||
|
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
|
||||||
|
{
|
||||||
|
struct netpoll_info *npinfo =
|
||||||
|
container_of(rcu_head, struct netpoll_info, rcu);
|
||||||
|
|
||||||
|
skb_queue_purge(&npinfo->arp_tx);
|
||||||
|
skb_queue_purge(&npinfo->txq);
|
||||||
|
|
||||||
|
/* we can't call cancel_delayed_work_sync here, as we are in softirq */
|
||||||
|
cancel_delayed_work(&npinfo->tx_work);
|
||||||
|
|
||||||
|
/* clean after last, unfinished work */
|
||||||
|
__skb_queue_purge(&npinfo->txq);
|
||||||
|
/* now cancel it again */
|
||||||
|
cancel_delayed_work(&npinfo->tx_work);
|
||||||
|
kfree(npinfo);
|
||||||
|
}
|
||||||
|
|
||||||
void __netpoll_cleanup(struct netpoll *np)
|
void __netpoll_cleanup(struct netpoll *np)
|
||||||
{
|
{
|
||||||
struct netpoll_info *npinfo;
|
struct netpoll_info *npinfo;
|
||||||
|
|
@ -903,21 +921,25 @@ void __netpoll_cleanup(struct netpoll *np)
|
||||||
ops->ndo_netpoll_cleanup(np->dev);
|
ops->ndo_netpoll_cleanup(np->dev);
|
||||||
|
|
||||||
RCU_INIT_POINTER(np->dev->npinfo, NULL);
|
RCU_INIT_POINTER(np->dev->npinfo, NULL);
|
||||||
|
call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
|
||||||
/* avoid racing with NAPI reading npinfo */
|
|
||||||
synchronize_rcu_bh();
|
|
||||||
|
|
||||||
skb_queue_purge(&npinfo->arp_tx);
|
|
||||||
skb_queue_purge(&npinfo->txq);
|
|
||||||
cancel_delayed_work_sync(&npinfo->tx_work);
|
|
||||||
|
|
||||||
/* clean after last, unfinished work */
|
|
||||||
__skb_queue_purge(&npinfo->txq);
|
|
||||||
kfree(npinfo);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
|
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
|
||||||
|
|
||||||
|
static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
|
||||||
|
{
|
||||||
|
struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
|
||||||
|
|
||||||
|
__netpoll_cleanup(np);
|
||||||
|
kfree(np);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __netpoll_free_rcu(struct netpoll *np)
|
||||||
|
{
|
||||||
|
call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
|
||||||
|
|
||||||
void netpoll_cleanup(struct netpoll *np)
|
void netpoll_cleanup(struct netpoll *np)
|
||||||
{
|
{
|
||||||
if (!np->dev)
|
if (!np->dev)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue