net: add a limit parameter to sk_add_backlog()
sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the memory limit. We need to make this limit a parameter for TCP use. No functional change expected in this patch, all callers still using the old sk_rcvbuf limit. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Tom Herbert <therbert@google.com> Cc: Maciej Żenczykowski <maze@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Cc: Rick Jones <rick.jones2@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b98985073b
commit
f545a38f74
10 changed files with 21 additions and 19 deletions
|
@ -709,17 +709,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
* Do not take into account this skb truesize,
|
||||
* to allow even a single big packet to come.
|
||||
*/
|
||||
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
|
||||
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
|
||||
unsigned int limit)
|
||||
{
|
||||
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
|
||||
|
||||
return qsize > sk->sk_rcvbuf;
|
||||
return qsize > limit;
|
||||
}
|
||||
|
||||
/* The per-socket spinlock must be held here. */
|
||||
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int limit)
|
||||
{
|
||||
if (sk_rcvqueues_full(sk, skb))
|
||||
if (sk_rcvqueues_full(sk, skb, limit))
|
||||
return -ENOBUFS;
|
||||
|
||||
__sk_add_backlog(sk, skb);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue