]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0017-Revert-gro-add-support-of-hw-gro-packets-to-gro-stac.patch
revert two stable patches that have reports about regressions
[pve-kernel.git] / patches / kernel / 0017-Revert-gro-add-support-of-hw-gro-packets-to-gro-stac.patch
CommitLineData
a0a93ff7
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Thomas Lamprecht <t.lamprecht@proxmox.com>
3Date: Sat, 7 Jan 2023 13:50:22 +0100
4Subject: [PATCH] Revert "gro: add support of (hw)gro packets to gro stack"
5
6Seems to be the cause of a regression in network performance:
7https://lore.kernel.org/netdev/CAK8fFZ5pzMaw3U1KXgC_OK4shKGsN=HDcR62cfPOuL0umXE1Ww@mail.gmail.com/
8
9This reverts commit 5eddb24901ee49eee23c0bfce6af2e83fd5679bd.
10
11Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
12---
13 net/core/gro.c | 18 ++++--------------
14 net/ipv4/tcp_offload.c | 17 ++---------------
15 2 files changed, 6 insertions(+), 29 deletions(-)
16
17diff --git a/net/core/gro.c b/net/core/gro.c
18index bc9451743307..b4190eb08467 100644
19--- a/net/core/gro.c
20+++ b/net/core/gro.c
21@@ -160,7 +160,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
22 unsigned int gro_max_size;
23 unsigned int new_truesize;
24 struct sk_buff *lp;
25- int segs;
26
27 /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
28 gro_max_size = READ_ONCE(p->dev->gro_max_size);
29@@ -176,7 +175,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
30 return -E2BIG;
31 }
32
33- segs = NAPI_GRO_CB(skb)->count;
34 lp = NAPI_GRO_CB(p)->last;
35 pinfo = skb_shinfo(lp);
36
37@@ -267,7 +265,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
38 lp = p;
39
40 done:
41- NAPI_GRO_CB(p)->count += segs;
42+ NAPI_GRO_CB(p)->count++;
43 p->data_len += len;
44 p->truesize += delta_truesize;
45 p->len += len;
46@@ -498,15 +496,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
47 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
48 sizeof(u32))); /* Avoid slow unaligned acc */
49 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
50- NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
51+ NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
52 NAPI_GRO_CB(skb)->is_atomic = 1;
53- NAPI_GRO_CB(skb)->count = 1;
54- if (unlikely(skb_is_gso(skb))) {
55- NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
56- /* Only support TCP at the moment. */
57- if (!skb_is_gso_tcp(skb))
58- NAPI_GRO_CB(skb)->flush = 1;
59- }
60
61 /* Setup for GRO checksum validation */
62 switch (skb->ip_summed) {
63@@ -554,10 +545,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
64 else
65 gro_list->count++;
66
67+ NAPI_GRO_CB(skb)->count = 1;
68 NAPI_GRO_CB(skb)->age = jiffies;
69 NAPI_GRO_CB(skb)->last = skb;
70- if (!skb_is_gso(skb))
71- skb_shinfo(skb)->gso_size = skb_gro_len(skb);
72+ skb_shinfo(skb)->gso_size = skb_gro_len(skb);
73 list_add(&skb->list, &gro_list->list);
74 ret = GRO_HELD;
75
76@@ -669,7 +660,6 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
77
78 skb->encapsulation = 0;
79 skb_shinfo(skb)->gso_type = 0;
80- skb_shinfo(skb)->gso_size = 0;
81 if (unlikely(skb->slow_gro)) {
82 skb_orphan(skb);
83 skb_ext_reset(skb);
84diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
85index 45dda7889387..a844a0d38482 100644
86--- a/net/ipv4/tcp_offload.c
87+++ b/net/ipv4/tcp_offload.c
88@@ -255,15 +255,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
89
90 mss = skb_shinfo(p)->gso_size;
91
92- /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
93- * If it is a single frame, do not aggregate it if its length
94- * is bigger than our mss.
95- */
96- if (unlikely(skb_is_gso(skb)))
97- flush |= (mss != skb_shinfo(skb)->gso_size);
98- else
99- flush |= (len - 1) >= mss;
100-
101+ flush |= (len - 1) >= mss;
102 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
103 #ifdef CONFIG_TLS_DEVICE
104 flush |= p->decrypted ^ skb->decrypted;
105@@ -277,12 +269,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
106 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
107
108 out_check_final:
109- /* Force a flush if last segment is smaller than mss. */
110- if (unlikely(skb_is_gso(skb)))
111- flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
112- else
113- flush = len < mss;
114-
115+ flush = len < mss;
116 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
117 TCP_FLAG_RST | TCP_FLAG_SYN |
118 TCP_FLAG_FIN));