static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ sk->sk_wmem_queued -= skb->truesize;
+ sk_mem_uncharge(sk, skb->truesize);
if (!sk->sk_tx_skb_cache) {
+ skb_zcopy_clear(skb, true);
sk->sk_tx_skb_cache = skb;
return;
}
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
- sk->sk_wmem_queued -= skb->truesize;
- sk_mem_uncharge(sk, skb->truesize);
__kfree_skb(skb);
}
{
struct sk_buff *skb;
- skb = sk->sk_tx_skb_cache;
- if (skb && !size) {
- const struct sk_buff_fclones *fclones;
-
- fclones = container_of(skb, struct sk_buff_fclones, skb1);
- if (refcount_read(&fclones->fclone_ref) == 1) {
- sk->sk_wmem_queued -= skb->truesize;
- sk_mem_uncharge(sk, skb->truesize);
+ if (likely(!size)) {
+ skb = sk->sk_tx_skb_cache;
+ if (skb && !skb_cloned(skb)) {
skb->truesize -= skb->data_len;
sk->sk_tx_skb_cache = NULL;
pskb_trim(skb, 0);
tcp_rtx_queue_purge(sk);
skb = sk->sk_tx_skb_cache;
if (skb) {
- sk->sk_wmem_queued -= skb->truesize;
- sk_mem_uncharge(sk, skb->truesize);
__kfree_skb(skb);
sk->sk_tx_skb_cache = NULL;
}