tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e005d193d5
commit
a2a385d627
9 changed files with 219 additions and 216 deletions
|
@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
|
|||
tp->pushed_seq = tp->write_seq;
|
||||
}
|
||||
|
||||
static inline int forced_push(const struct tcp_sock *tp)
|
||||
static inline bool forced_push(const struct tcp_sock *tp)
|
||||
{
|
||||
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
|
||||
}
|
||||
|
@ -1082,7 +1082,7 @@ new_segment:
|
|||
if (err)
|
||||
goto do_fault;
|
||||
} else {
|
||||
int merge = 0;
|
||||
bool merge = false;
|
||||
int i = skb_shinfo(skb)->nr_frags;
|
||||
struct page *page = sk->sk_sndmsg_page;
|
||||
int off;
|
||||
|
@ -1096,7 +1096,7 @@ new_segment:
|
|||
off != PAGE_SIZE) {
|
||||
/* We can extend the last page
|
||||
* fragment. */
|
||||
merge = 1;
|
||||
merge = true;
|
||||
} else if (i == MAX_SKB_FRAGS || !sg) {
|
||||
/* Need to add new fragment and cannot
|
||||
* do this because interface is non-SG,
|
||||
|
@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
|
|||
void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int time_to_ack = 0;
|
||||
bool time_to_ack = false;
|
||||
|
||||
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
|
||||
|
||||
|
@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
|||
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
|
||||
!icsk->icsk_ack.pingpong)) &&
|
||||
!atomic_read(&sk->sk_rmem_alloc)))
|
||||
time_to_ack = 1;
|
||||
time_to_ack = true;
|
||||
}
|
||||
|
||||
/* We send an ACK if we can now advertise a non-zero window
|
||||
|
@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
|||
* "Lots" means "at least twice" here.
|
||||
*/
|
||||
if (new_window && new_window >= 2 * rcv_window_now)
|
||||
time_to_ack = 1;
|
||||
time_to_ack = true;
|
||||
}
|
||||
}
|
||||
if (time_to_ack)
|
||||
|
@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);
|
|||
|
||||
/* These states need RST on ABORT according to RFC793 */
|
||||
|
||||
static inline int tcp_need_reset(int state)
|
||||
static inline bool tcp_need_reset(int state)
|
||||
{
|
||||
return (1 << state) &
|
||||
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
|
||||
|
@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
}
|
||||
EXPORT_SYMBOL(tcp_disconnect);
|
||||
|
||||
static inline int tcp_can_repair_sock(struct sock *sk)
|
||||
static inline bool tcp_can_repair_sock(const struct sock *sk)
|
||||
{
|
||||
return capable(CAP_NET_ADMIN) &&
|
||||
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
|
||||
|
@ -3172,13 +3172,13 @@ out_free:
|
|||
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
|
||||
{
|
||||
struct tcp_md5sig_pool __percpu *pool;
|
||||
int alloc = 0;
|
||||
bool alloc = false;
|
||||
|
||||
retry:
|
||||
spin_lock_bh(&tcp_md5sig_pool_lock);
|
||||
pool = tcp_md5sig_pool;
|
||||
if (tcp_md5sig_users++ == 0) {
|
||||
alloc = 1;
|
||||
alloc = true;
|
||||
spin_unlock_bh(&tcp_md5sig_pool_lock);
|
||||
} else if (!pool) {
|
||||
tcp_md5sig_users--;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue