]>
Commit | Line | Data |
---|---|---|
2be7d22f | 1 | /* |
af3db60a | 2 | * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. |
2be7d22f VK |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
2be7d22f | 17 | #include <linux/etherdevice.h> |
2be7d22f VK |
18 | #include <net/ieee80211_radiotap.h> |
19 | #include <linux/if_arp.h> | |
20 | #include <linux/moduleparam.h> | |
504937d4 KE |
21 | #include <linux/ip.h> |
22 | #include <linux/ipv6.h> | |
23 | #include <net/ipv6.h> | |
0786dc4e | 24 | #include <linux/prefetch.h> |
2be7d22f VK |
25 | |
26 | #include "wil6210.h" | |
27 | #include "wmi.h" | |
28 | #include "txrx.h" | |
98658095 | 29 | #include "trace.h" |
2be7d22f VK |
30 | |
31 | static bool rtap_include_phy_info; | |
78484c44 | 32 | module_param(rtap_include_phy_info, bool, 0444); |
2be7d22f VK |
33 | MODULE_PARM_DESC(rtap_include_phy_info, |
34 | " Include PHY info in the radiotap header, default - no"); | |
35 | ||
c406ea7c | 36 | bool rx_align_2; |
78484c44 | 37 | module_param(rx_align_2, bool, 0444); |
c406ea7c VK |
38 | MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); |
39 | ||
52a45702 LD |
40 | bool rx_large_buf; |
41 | module_param(rx_large_buf, bool, 0444); | |
42 | MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); | |
43 | ||
c406ea7c VK |
44 | static inline uint wil_rx_snaplen(void) |
45 | { | |
46 | return rx_align_2 ? 6 : 0; | |
47 | } | |
48 | ||
2be7d22f VK |
49 | static inline int wil_vring_is_empty(struct vring *vring) |
50 | { | |
51 | return vring->swhead == vring->swtail; | |
52 | } | |
53 | ||
54 | static inline u32 wil_vring_next_tail(struct vring *vring) | |
55 | { | |
56 | return (vring->swtail + 1) % vring->size; | |
57 | } | |
58 | ||
59 | static inline void wil_vring_advance_head(struct vring *vring, int n) | |
60 | { | |
61 | vring->swhead = (vring->swhead + n) % vring->size; | |
62 | } | |
63 | ||
64 | static inline int wil_vring_is_full(struct vring *vring) | |
65 | { | |
66 | return wil_vring_next_tail(vring) == vring->swhead; | |
67 | } | |
8fe59627 | 68 | |
0436fd9a VS |
69 | /* Used space in Tx Vring */ |
70 | static inline int wil_vring_used_tx(struct vring *vring) | |
2be7d22f VK |
71 | { |
72 | u32 swhead = vring->swhead; | |
73 | u32 swtail = vring->swtail; | |
0436fd9a VS |
74 | return (vring->size + swhead - swtail) % vring->size; |
75 | } | |
2be7d22f | 76 | |
0436fd9a VS |
77 | /* Available space in Tx Vring */ |
78 | static inline int wil_vring_avail_tx(struct vring *vring) | |
79 | { | |
80 | return vring->size - wil_vring_used_tx(vring) - 1; | |
2be7d22f VK |
81 | } |
82 | ||
0436fd9a | 83 | /* wil_vring_wmark_low - low watermark for available descriptor space */ |
5bb6423e VK |
84 | static inline int wil_vring_wmark_low(struct vring *vring) |
85 | { | |
86 | return vring->size/8; | |
87 | } | |
88 | ||
0436fd9a | 89 | /* wil_vring_wmark_high - high watermark for available descriptor space */ |
5bb6423e VK |
90 | static inline int wil_vring_wmark_high(struct vring *vring) |
91 | { | |
92 | return vring->size/4; | |
93 | } | |
94 | ||
f9e3033f DL |
95 | /* returns true if num avail descriptors is lower than wmark_low */ |
96 | static inline int wil_vring_avail_low(struct vring *vring) | |
97 | { | |
98 | return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring); | |
99 | } | |
100 | ||
101 | /* returns true if num avail descriptors is higher than wmark_high */ | |
102 | static inline int wil_vring_avail_high(struct vring *vring) | |
103 | { | |
104 | return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring); | |
105 | } | |
106 | ||
0436fd9a VS |
107 | /* wil_val_in_range - check if value in [min,max) */ |
108 | static inline bool wil_val_in_range(int val, int min, int max) | |
109 | { | |
110 | return val >= min && val < max; | |
111 | } | |
112 | ||
2be7d22f VK |
113 | static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) |
114 | { | |
115 | struct device *dev = wil_to_dev(wil); | |
116 | size_t sz = vring->size * sizeof(vring->va[0]); | |
117 | uint i; | |
118 | ||
af3db60a | 119 | wil_dbg_misc(wil, "vring_alloc:\n"); |
9cf10d62 | 120 | |
2be7d22f VK |
121 | BUILD_BUG_ON(sizeof(vring->va[0]) != 32); |
122 | ||
123 | vring->swhead = 0; | |
124 | vring->swtail = 0; | |
f88f113a | 125 | vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); |
2be7d22f | 126 | if (!vring->ctx) { |
2be7d22f VK |
127 | vring->va = NULL; |
128 | return -ENOMEM; | |
129 | } | |
f2de576d | 130 | |
0436fd9a | 131 | /* vring->va should be aligned on its size rounded up to power of 2 |
f2de576d HK |
132 | * This is granted by the dma_alloc_coherent. |
133 | * | |
134 | * HW has limitation that all vrings addresses must share the same | |
135 | * upper 16 msb bits part of 48 bits address. To workaround that, | |
136 | * if we are using 48 bit addresses switch to 32 bit allocation | |
137 | * before allocating vring memory. | |
138 | * | |
139 | * There's no check for the return value of dma_set_mask_and_coherent, | |
140 | * since we assume if we were able to set the mask during | |
141 | * initialization in this system it will not fail if we set it again | |
2be7d22f | 142 | */ |
f2de576d HK |
143 | if (wil->use_extended_dma_addr) |
144 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | |
145 | ||
2be7d22f VK |
146 | vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); |
147 | if (!vring->va) { | |
2be7d22f VK |
148 | kfree(vring->ctx); |
149 | vring->ctx = NULL; | |
150 | return -ENOMEM; | |
151 | } | |
f2de576d HK |
152 | |
153 | if (wil->use_extended_dma_addr) | |
154 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); | |
155 | ||
2be7d22f VK |
156 | /* initially, all descriptors are SW owned |
157 | * For Tx and Rx, ownership bit is at the same location, thus | |
158 | * we can use any | |
159 | */ | |
160 | for (i = 0; i < vring->size; i++) { | |
8fe59627 VK |
161 | volatile struct vring_tx_desc *_d = &vring->va[i].tx; |
162 | ||
68ada71e | 163 | _d->dma.status = TX_DMA_STATUS_DU; |
2be7d22f VK |
164 | } |
165 | ||
39c52ee8 VK |
166 | wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, |
167 | vring->va, &vring->pa, vring->ctx); | |
2be7d22f VK |
168 | |
169 | return 0; | |
170 | } | |
171 | ||
2232abd5 VK |
172 | static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, |
173 | struct wil_ctx *ctx) | |
174 | { | |
175 | dma_addr_t pa = wil_desc_addr(&d->dma.addr); | |
176 | u16 dmalen = le16_to_cpu(d->dma.length); | |
8fe59627 | 177 | |
2232abd5 VK |
178 | switch (ctx->mapped_as) { |
179 | case wil_mapped_as_single: | |
180 | dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); | |
181 | break; | |
182 | case wil_mapped_as_page: | |
183 | dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); | |
184 | break; | |
185 | default: | |
186 | break; | |
187 | } | |
188 | } | |
189 | ||
2be7d22f VK |
190 | static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, |
191 | int tx) | |
192 | { | |
193 | struct device *dev = wil_to_dev(wil); | |
194 | size_t sz = vring->size * sizeof(vring->va[0]); | |
195 | ||
9b1ba7b2 | 196 | lockdep_assert_held(&wil->mutex); |
ef77285f VK |
197 | if (tx) { |
198 | int vring_index = vring - wil->vring_tx; | |
199 | ||
200 | wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", | |
201 | vring_index, vring->size, vring->va, | |
202 | &vring->pa, vring->ctx); | |
203 | } else { | |
204 | wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n", | |
205 | vring->size, vring->va, | |
206 | &vring->pa, vring->ctx); | |
207 | } | |
208 | ||
2be7d22f | 209 | while (!wil_vring_is_empty(vring)) { |
68ada71e | 210 | dma_addr_t pa; |
7e594444 | 211 | u16 dmalen; |
f88f113a | 212 | struct wil_ctx *ctx; |
68ada71e | 213 | |
2be7d22f | 214 | if (tx) { |
68ada71e VK |
215 | struct vring_tx_desc dd, *d = ⅆ |
216 | volatile struct vring_tx_desc *_d = | |
2be7d22f | 217 | &vring->va[vring->swtail].tx; |
68ada71e | 218 | |
f88f113a | 219 | ctx = &vring->ctx[vring->swtail]; |
34b8886e ME |
220 | if (!ctx) { |
221 | wil_dbg_txrx(wil, | |
222 | "ctx(%d) was already completed\n", | |
223 | vring->swtail); | |
224 | vring->swtail = wil_vring_next_tail(vring); | |
225 | continue; | |
226 | } | |
68ada71e | 227 | *d = *_d; |
2232abd5 | 228 | wil_txdesc_unmap(dev, d, ctx); |
f88f113a VK |
229 | if (ctx->skb) |
230 | dev_kfree_skb_any(ctx->skb); | |
2be7d22f VK |
231 | vring->swtail = wil_vring_next_tail(vring); |
232 | } else { /* rx */ | |
68ada71e VK |
233 | struct vring_rx_desc dd, *d = ⅆ |
234 | volatile struct vring_rx_desc *_d = | |
4d1ac072 | 235 | &vring->va[vring->swhead].rx; |
68ada71e | 236 | |
f88f113a | 237 | ctx = &vring->ctx[vring->swhead]; |
68ada71e VK |
238 | *d = *_d; |
239 | pa = wil_desc_addr(&d->dma.addr); | |
7e594444 VK |
240 | dmalen = le16_to_cpu(d->dma.length); |
241 | dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); | |
f88f113a | 242 | kfree_skb(ctx->skb); |
2be7d22f VK |
243 | wil_vring_advance_head(vring, 1); |
244 | } | |
245 | } | |
246 | dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); | |
247 | kfree(vring->ctx); | |
248 | vring->pa = 0; | |
249 | vring->va = NULL; | |
250 | vring->ctx = NULL; | |
251 | } | |
252 | ||
253 | /** | |
254 | * Allocate one skb for Rx VRING | |
255 | * | |
256 | * Safe to call from IRQ | |
257 | */ | |
258 | static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, | |
259 | u32 i, int headroom) | |
260 | { | |
261 | struct device *dev = wil_to_dev(wil); | |
52a45702 | 262 | unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); |
68ada71e | 263 | struct vring_rx_desc dd, *d = ⅆ |
8fe59627 | 264 | volatile struct vring_rx_desc *_d = &vring->va[i].rx; |
2be7d22f | 265 | dma_addr_t pa; |
2be7d22f | 266 | struct sk_buff *skb = dev_alloc_skb(sz + headroom); |
8fe59627 | 267 | |
2be7d22f VK |
268 | if (unlikely(!skb)) |
269 | return -ENOMEM; | |
270 | ||
271 | skb_reserve(skb, headroom); | |
272 | skb_put(skb, sz); | |
273 | ||
274 | pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); | |
275 | if (unlikely(dma_mapping_error(dev, pa))) { | |
276 | kfree_skb(skb); | |
277 | return -ENOMEM; | |
278 | } | |
279 | ||
48c963af | 280 | d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; |
68ada71e | 281 | wil_desc_addr_set(&d->dma.addr, pa); |
2be7d22f VK |
282 | /* ip_length don't care */ |
283 | /* b11 don't care */ | |
284 | /* error don't care */ | |
285 | d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ | |
7e594444 | 286 | d->dma.length = cpu_to_le16(sz); |
68ada71e | 287 | *_d = *d; |
f88f113a | 288 | vring->ctx[i].skb = skb; |
2be7d22f VK |
289 | |
290 | return 0; | |
291 | } | |
292 | ||
293 | /** | |
294 | * Adds radiotap header | |
295 | * | |
296 | * Any error indicated as "Bad FCS" | |
297 | * | |
298 | * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: | |
299 | * - Rx descriptor: 32 bytes | |
300 | * - Phy info | |
301 | */ | |
302 | static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, | |
33e61169 | 303 | struct sk_buff *skb) |
2be7d22f VK |
304 | { |
305 | struct wireless_dev *wdev = wil->wdev; | |
306 | struct wil6210_rtap { | |
307 | struct ieee80211_radiotap_header rthdr; | |
308 | /* fields should be in the order of bits in rthdr.it_present */ | |
309 | /* flags */ | |
310 | u8 flags; | |
311 | /* channel */ | |
312 | __le16 chnl_freq __aligned(2); | |
313 | __le16 chnl_flags; | |
314 | /* MCS */ | |
315 | u8 mcs_present; | |
316 | u8 mcs_flags; | |
317 | u8 mcs_index; | |
318 | } __packed; | |
319 | struct wil6210_rtap_vendor { | |
320 | struct wil6210_rtap rtap; | |
321 | /* vendor */ | |
322 | u8 vendor_oui[3] __aligned(2); | |
323 | u8 vendor_ns; | |
324 | __le16 vendor_skip; | |
325 | u8 vendor_data[0]; | |
326 | } __packed; | |
33e61169 | 327 | struct vring_rx_desc *d = wil_skb_rxdesc(skb); |
2be7d22f VK |
328 | struct wil6210_rtap_vendor *rtap_vendor; |
329 | int rtap_len = sizeof(struct wil6210_rtap); | |
330 | int phy_length = 0; /* phy info header size, bytes */ | |
331 | static char phy_data[128]; | |
332 | struct ieee80211_channel *ch = wdev->preset_chandef.chan; | |
333 | ||
334 | if (rtap_include_phy_info) { | |
335 | rtap_len = sizeof(*rtap_vendor) + sizeof(*d); | |
336 | /* calculate additional length */ | |
337 | if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { | |
338 | /** | |
339 | * PHY info starts from 8-byte boundary | |
340 | * there are 8-byte lines, last line may be partially | |
341 | * written (HW bug), thus FW configures for last line | |
342 | * to be excessive. Driver skips this last line. | |
343 | */ | |
344 | int len = min_t(int, 8 + sizeof(phy_data), | |
345 | wil_rxdesc_phy_length(d)); | |
8fe59627 | 346 | |
2be7d22f VK |
347 | if (len > 8) { |
348 | void *p = skb_tail_pointer(skb); | |
349 | void *pa = PTR_ALIGN(p, 8); | |
8fe59627 | 350 | |
2be7d22f VK |
351 | if (skb_tailroom(skb) >= len + (pa - p)) { |
352 | phy_length = len - 8; | |
353 | memcpy(phy_data, pa, phy_length); | |
354 | } | |
355 | } | |
356 | } | |
357 | rtap_len += phy_length; | |
358 | } | |
359 | ||
360 | if (skb_headroom(skb) < rtap_len && | |
361 | pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { | |
9165dabb | 362 | wil_err(wil, "Unable to expand headroom to %d\n", rtap_len); |
2be7d22f VK |
363 | return; |
364 | } | |
365 | ||
d58ff351 | 366 | rtap_vendor = skb_push(skb, rtap_len); |
2be7d22f VK |
367 | memset(rtap_vendor, 0, rtap_len); |
368 | ||
369 | rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; | |
370 | rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); | |
371 | rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( | |
372 | (1 << IEEE80211_RADIOTAP_FLAGS) | | |
373 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | |
374 | (1 << IEEE80211_RADIOTAP_MCS)); | |
375 | if (d->dma.status & RX_DMA_STATUS_ERROR) | |
376 | rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; | |
377 | ||
378 | rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); | |
379 | rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); | |
380 | ||
381 | rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; | |
382 | rtap_vendor->rtap.mcs_flags = 0; | |
383 | rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); | |
384 | ||
385 | if (rtap_include_phy_info) { | |
386 | rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << | |
387 | IEEE80211_RADIOTAP_VENDOR_NAMESPACE); | |
388 | /* OUI for Wilocity 04:ce:14 */ | |
389 | rtap_vendor->vendor_oui[0] = 0x04; | |
390 | rtap_vendor->vendor_oui[1] = 0xce; | |
391 | rtap_vendor->vendor_oui[2] = 0x14; | |
392 | rtap_vendor->vendor_ns = 1; | |
393 | /* Rx descriptor + PHY data */ | |
394 | rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + | |
395 | phy_length); | |
396 | memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); | |
397 | memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, | |
398 | phy_length); | |
399 | } | |
400 | } | |
401 | ||
a8313341 VK |
402 | /* similar to ieee80211_ version, but FC contain only 1-st byte */ |
403 | static inline int wil_is_back_req(u8 fc) | |
404 | { | |
405 | return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | |
406 | (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); | |
407 | } | |
408 | ||
2be7d22f VK |
409 | /** |
410 | * reap 1 frame from @swhead | |
411 | * | |
33e61169 VK |
412 | * Rx descriptor copied to skb->cb |
413 | * | |
2be7d22f VK |
414 | * Safe to call from IRQ |
415 | */ | |
416 | static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, | |
417 | struct vring *vring) | |
418 | { | |
419 | struct device *dev = wil_to_dev(wil); | |
420 | struct net_device *ndev = wil_to_ndev(wil); | |
68ada71e VK |
421 | volatile struct vring_rx_desc *_d; |
422 | struct vring_rx_desc *d; | |
2be7d22f VK |
423 | struct sk_buff *skb; |
424 | dma_addr_t pa; | |
c406ea7c | 425 | unsigned int snaplen = wil_rx_snaplen(); |
52a45702 | 426 | unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; |
7e594444 | 427 | u16 dmalen; |
2be7d22f | 428 | u8 ftype; |
c8b78b5f | 429 | int cid; |
3b282bc6 | 430 | int i; |
c8b78b5f VK |
431 | struct wil_net_stats *stats; |
432 | ||
33e61169 VK |
433 | BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); |
434 | ||
3b282bc6 | 435 | again: |
33c477fd | 436 | if (unlikely(wil_vring_is_empty(vring))) |
2be7d22f VK |
437 | return NULL; |
438 | ||
3b282bc6 | 439 | i = (int)vring->swhead; |
148416a9 | 440 | _d = &vring->va[i].rx; |
33c477fd | 441 | if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { |
2be7d22f VK |
442 | /* it is not error, we just reached end of Rx done area */ |
443 | return NULL; | |
444 | } | |
445 | ||
148416a9 VK |
446 | skb = vring->ctx[i].skb; |
447 | vring->ctx[i].skb = NULL; | |
448 | wil_vring_advance_head(vring, 1); | |
449 | if (!skb) { | |
450 | wil_err(wil, "No Rx skb at [%d]\n", i); | |
3b282bc6 | 451 | goto again; |
148416a9 | 452 | } |
68ada71e VK |
453 | d = wil_skb_rxdesc(skb); |
454 | *d = *_d; | |
455 | pa = wil_desc_addr(&d->dma.addr); | |
68ada71e | 456 | |
2be7d22f | 457 | dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); |
68ada71e VK |
458 | dmalen = le16_to_cpu(d->dma.length); |
459 | ||
148416a9 VK |
460 | trace_wil6210_rx(i, d); |
461 | wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen); | |
a8313341 | 462 | wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, |
68ada71e | 463 | (const void *)d, sizeof(*d), false); |
2be7d22f | 464 | |
3b282bc6 VK |
465 | cid = wil_rxdesc_cid(d); |
466 | stats = &wil->sta[cid].stats; | |
467 | ||
33c477fd | 468 | if (unlikely(dmalen > sz)) { |
e270045b | 469 | wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); |
3b282bc6 | 470 | stats->rx_large_frame++; |
110dea00 | 471 | kfree_skb(skb); |
3b282bc6 | 472 | goto again; |
e270045b | 473 | } |
7e594444 | 474 | skb_trim(skb, dmalen); |
33e61169 | 475 | |
1cbbcb08 VK |
476 | prefetch(skb->data); |
477 | ||
c0d37713 VK |
478 | wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, |
479 | skb->data, skb_headlen(skb), false); | |
480 | ||
c8b78b5f | 481 | stats->last_mcs_rx = wil_rxdesc_mcs(d); |
c4a110d8 VK |
482 | if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) |
483 | stats->rx_per_mcs[stats->last_mcs_rx]++; | |
2be7d22f VK |
484 | |
485 | /* use radiotap header only if required */ | |
486 | if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) | |
33e61169 | 487 | wil_rx_add_radiotap_header(wil, skb); |
2be7d22f | 488 | |
2be7d22f VK |
489 | /* no extra checks if in sniffer mode */ |
490 | if (ndev->type != ARPHRD_ETHER) | |
491 | return skb; | |
a8313341 | 492 | /* Non-data frames may be delivered through Rx DMA channel (ex: BAR) |
2be7d22f VK |
493 | * Driver should recognize it by frame type, that is found |
494 | * in Rx descriptor. If type is not data, it is 802.11 frame as is | |
495 | */ | |
68ada71e | 496 | ftype = wil_rxdesc_ftype(d) << 2; |
33c477fd | 497 | if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { |
a8313341 VK |
498 | u8 fc1 = wil_rxdesc_fc1(d); |
499 | int mid = wil_rxdesc_mid(d); | |
500 | int tid = wil_rxdesc_tid(d); | |
501 | u16 seq = wil_rxdesc_seq(d); | |
502 | ||
503 | wil_dbg_txrx(wil, | |
504 | "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", | |
505 | fc1, mid, cid, tid, seq); | |
3b282bc6 | 506 | stats->rx_non_data_frame++; |
a8313341 VK |
507 | if (wil_is_back_req(fc1)) { |
508 | wil_dbg_txrx(wil, | |
509 | "BAR: MID %d CID %d TID %d Seq 0x%03x\n", | |
510 | mid, cid, tid, seq); | |
511 | wil_rx_bar(wil, cid, tid, seq); | |
512 | } else { | |
513 | /* print again all info. One can enable only this | |
514 | * without overhead for printing every Rx frame | |
515 | */ | |
516 | wil_dbg_txrx(wil, | |
517 | "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", | |
518 | fc1, mid, cid, tid, seq); | |
519 | wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, | |
520 | (const void *)d, sizeof(*d), false); | |
521 | wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, | |
522 | skb->data, skb_headlen(skb), false); | |
523 | } | |
2be7d22f | 524 | kfree_skb(skb); |
3b282bc6 | 525 | goto again; |
2be7d22f VK |
526 | } |
527 | ||
c406ea7c | 528 | if (unlikely(skb->len < ETH_HLEN + snaplen)) { |
2be7d22f | 529 | wil_err(wil, "Short frame, len = %d\n", skb->len); |
3b282bc6 | 530 | stats->rx_short_frame++; |
2be7d22f | 531 | kfree_skb(skb); |
3b282bc6 | 532 | goto again; |
2be7d22f VK |
533 | } |
534 | ||
504937d4 KE |
535 | /* L4 IDENT is on when HW calculated checksum, check status |
536 | * and in case of error drop the packet | |
537 | * higher stack layers will handle retransmission (if required) | |
538 | */ | |
33c477fd | 539 | if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { |
504937d4 | 540 | /* L4 protocol identified, csum calculated */ |
33c477fd | 541 | if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) |
504937d4 | 542 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4a68ab10 VK |
543 | /* If HW reports bad checksum, let IP stack re-check it |
544 | * For example, HW don't understand Microsoft IP stack that | |
545 | * mis-calculates TCP checksum - if it should be 0x0, | |
546 | * it writes 0xffff in violation of RFC 1624 | |
547 | */ | |
504937d4 KE |
548 | } |
549 | ||
c406ea7c VK |
550 | if (snaplen) { |
551 | /* Packet layout | |
552 | * +-------+-------+---------+------------+------+ | |
553 | * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA | | |
554 | * +-------+-------+---------+------------+------+ | |
555 | * Need to remove SNAP, shifting SA and DA forward | |
556 | */ | |
557 | memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); | |
558 | skb_pull(skb, snaplen); | |
559 | } | |
560 | ||
2be7d22f VK |
561 | return skb; |
562 | } | |
563 | ||
564 | /** | |
565 | * allocate and fill up to @count buffers in rx ring | |
566 | * buffers posted at @swtail | |
567 | */ | |
568 | static int wil_rx_refill(struct wil6210_priv *wil, int count) | |
569 | { | |
570 | struct net_device *ndev = wil_to_ndev(wil); | |
571 | struct vring *v = &wil->vring_rx; | |
572 | u32 next_tail; | |
573 | int rc = 0; | |
574 | int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? | |
575 | WIL6210_RTAP_SIZE : 0; | |
576 | ||
577 | for (; next_tail = wil_vring_next_tail(v), | |
578 | (next_tail != v->swhead) && (count-- > 0); | |
579 | v->swtail = next_tail) { | |
580 | rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); | |
33c477fd | 581 | if (unlikely(rc)) { |
2be7d22f VK |
582 | wil_err(wil, "Error %d in wil_rx_refill[%d]\n", |
583 | rc, v->swtail); | |
584 | break; | |
585 | } | |
586 | } | |
ab6d7cc3 ME |
587 | |
588 | /* make sure all writes to descriptors (shared memory) are done before | |
589 | * committing them to HW | |
590 | */ | |
591 | wmb(); | |
592 | ||
b9eeb512 | 593 | wil_w(wil, v->hwtail, v->swtail); |
2be7d22f VK |
594 | |
595 | return rc; | |
596 | } | |
597 | ||
58527421 VK |
598 | /** |
599 | * reverse_memcmp - Compare two areas of memory, in reverse order | |
600 | * @cs: One area of memory | |
601 | * @ct: Another area of memory | |
602 | * @count: The size of the area. | |
603 | * | |
604 | * Cut'n'paste from original memcmp (see lib/string.c) | |
605 | * with minimal modifications | |
606 | */ | |
607 | static int reverse_memcmp(const void *cs, const void *ct, size_t count) | |
608 | { | |
609 | const unsigned char *su1, *su2; | |
610 | int res = 0; | |
611 | ||
612 | for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; | |
613 | --su1, --su2, count--) { | |
614 | res = *su1 - *su2; | |
615 | if (res) | |
616 | break; | |
617 | } | |
618 | return res; | |
619 | } | |
620 | ||
621 | static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) | |
622 | { | |
623 | struct vring_rx_desc *d = wil_skb_rxdesc(skb); | |
624 | int cid = wil_rxdesc_cid(d); | |
625 | int tid = wil_rxdesc_tid(d); | |
626 | int key_id = wil_rxdesc_key_id(d); | |
627 | int mc = wil_rxdesc_mcast(d); | |
628 | struct wil_sta_info *s = &wil->sta[cid]; | |
629 | struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : | |
630 | &s->tid_crypto_rx[tid]; | |
631 | struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; | |
632 | const u8 *pn = (u8 *)&d->mac.pn_15_0; | |
633 | ||
634 | if (!cc->key_set) { | |
635 | wil_err_ratelimited(wil, | |
636 | "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", | |
637 | cid, tid, mc, key_id); | |
638 | return -EINVAL; | |
639 | } | |
640 | ||
641 | if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { | |
642 | wil_err_ratelimited(wil, | |
643 | "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", | |
644 | cid, tid, mc, key_id, pn, cc->pn); | |
645 | return -EINVAL; | |
646 | } | |
647 | memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
2be7d22f VK |
652 | /* |
653 | * Pass Rx packet to the netif. Update statistics. | |
e0287c4a | 654 | * Called in softirq context (NAPI poll). |
2be7d22f | 655 | */ |
b4490f42 | 656 | void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) |
2be7d22f | 657 | { |
c42da999 | 658 | gro_result_t rc = GRO_NORMAL; |
c8b78b5f | 659 | struct wil6210_priv *wil = ndev_to_wil(ndev); |
c42da999 | 660 | struct wireless_dev *wdev = wil_to_wdev(wil); |
2be7d22f | 661 | unsigned int len = skb->len; |
c8b78b5f | 662 | struct vring_rx_desc *d = wil_skb_rxdesc(skb); |
41d6b093 | 663 | int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ |
58527421 | 664 | int security = wil_rxdesc_security(d); |
c42da999 VK |
665 | struct ethhdr *eth = (void *)skb->data; |
666 | /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication | |
667 | * is not suitable, need to look at data | |
668 | */ | |
669 | int mcast = is_multicast_ether_addr(eth->h_dest); | |
c8b78b5f | 670 | struct wil_net_stats *stats = &wil->sta[cid].stats; |
c42da999 VK |
671 | struct sk_buff *xmit_skb = NULL; |
672 | static const char * const gro_res_str[] = { | |
673 | [GRO_MERGED] = "GRO_MERGED", | |
674 | [GRO_MERGED_FREE] = "GRO_MERGED_FREE", | |
675 | [GRO_HELD] = "GRO_HELD", | |
676 | [GRO_NORMAL] = "GRO_NORMAL", | |
677 | [GRO_DROP] = "GRO_DROP", | |
678 | }; | |
2be7d22f | 679 | |
0553640d VS |
680 | if (ndev->features & NETIF_F_RXHASH) |
681 | /* fake L4 to ensure it won't be re-calculated later | |
682 | * set hash to any non-zero value to activate rps | |
683 | * mechanism, core will be chosen according | |
684 | * to user-level rps configuration. | |
685 | */ | |
686 | skb_set_hash(skb, 1, PKT_HASH_TYPE_L4); | |
687 | ||
241804cb VK |
688 | skb_orphan(skb); |
689 | ||
58527421 VK |
690 | if (security && (wil_rx_crypto_check(wil, skb) != 0)) { |
691 | rc = GRO_DROP; | |
692 | dev_kfree_skb(skb); | |
693 | stats->rx_replay++; | |
694 | goto stats; | |
695 | } | |
696 | ||
02beaf1a | 697 | if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { |
c42da999 VK |
698 | if (mcast) { |
699 | /* send multicast frames both to higher layers in | |
700 | * local net stack and back to the wireless medium | |
701 | */ | |
702 | xmit_skb = skb_copy(skb, GFP_ATOMIC); | |
703 | } else { | |
704 | int xmit_cid = wil_find_cid(wil, eth->h_dest); | |
705 | ||
706 | if (xmit_cid >= 0) { | |
707 | /* The destination station is associated to | |
708 | * this AP (in this VLAN), so send the frame | |
709 | * directly to it and do not pass it to local | |
710 | * net stack. | |
711 | */ | |
712 | xmit_skb = skb; | |
713 | skb = NULL; | |
714 | } | |
715 | } | |
716 | } | |
717 | if (xmit_skb) { | |
718 | /* Send to wireless media and increase priority by 256 to | |
719 | * keep the received priority instead of reclassifying | |
720 | * the frame (see cfg80211_classify8021d). | |
721 | */ | |
722 | xmit_skb->dev = ndev; | |
723 | xmit_skb->priority += 256; | |
724 | xmit_skb->protocol = htons(ETH_P_802_3); | |
725 | skb_reset_network_header(xmit_skb); | |
726 | skb_reset_mac_header(xmit_skb); | |
727 | wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len); | |
728 | dev_queue_xmit(xmit_skb); | |
729 | } | |
2be7d22f | 730 | |
c42da999 VK |
731 | if (skb) { /* deliver to local stack */ |
732 | ||
733 | skb->protocol = eth_type_trans(skb, ndev); | |
734 | rc = napi_gro_receive(&wil->napi_rx, skb); | |
735 | wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", | |
736 | len, gro_res_str[rc]); | |
737 | } | |
58527421 | 738 | stats: |
c42da999 | 739 | /* statistics. rc set to GRO_NORMAL for AP bridging */ |
b5998e6a VK |
740 | if (unlikely(rc == GRO_DROP)) { |
741 | ndev->stats.rx_dropped++; | |
742 | stats->rx_dropped++; | |
743 | wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); | |
744 | } else { | |
2be7d22f | 745 | ndev->stats.rx_packets++; |
c8b78b5f | 746 | stats->rx_packets++; |
2be7d22f | 747 | ndev->stats.rx_bytes += len; |
c8b78b5f | 748 | stats->rx_bytes += len; |
c42da999 VK |
749 | if (mcast) |
750 | ndev->stats.multicast++; | |
194b482b | 751 | } |
2be7d22f VK |
752 | } |
753 | ||
754 | /** | |
755 | * Proceed all completed skb's from Rx VRING | |
756 | * | |
e0287c4a | 757 | * Safe to call from NAPI poll, i.e. softirq with interrupts enabled |
2be7d22f | 758 | */ |
e0287c4a | 759 | void wil_rx_handle(struct wil6210_priv *wil, int *quota) |
2be7d22f VK |
760 | { |
761 | struct net_device *ndev = wil_to_ndev(wil); | |
762 | struct vring *v = &wil->vring_rx; | |
763 | struct sk_buff *skb; | |
764 | ||
33c477fd | 765 | if (unlikely(!v->va)) { |
2be7d22f VK |
766 | wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); |
767 | return; | |
768 | } | |
af3db60a | 769 | wil_dbg_txrx(wil, "rx_handle\n"); |
e0287c4a VK |
770 | while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { |
771 | (*quota)--; | |
2be7d22f | 772 | |
2be7d22f VK |
773 | if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { |
774 | skb->dev = ndev; | |
775 | skb_reset_mac_header(skb); | |
776 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
777 | skb->pkt_type = PACKET_OTHERHOST; | |
778 | skb->protocol = htons(ETH_P_802_2); | |
b4490f42 | 779 | wil_netif_rx_any(skb, ndev); |
2be7d22f | 780 | } else { |
e4373d8e | 781 | wil_rx_reorder(wil, skb); |
2be7d22f | 782 | } |
2be7d22f VK |
783 | } |
784 | wil_rx_refill(wil, v->size); | |
785 | } | |
786 | ||
52a45702 LD |
787 | static void wil_rx_buf_len_init(struct wil6210_priv *wil) |
788 | { | |
789 | wil->rx_buf_len = rx_large_buf ? | |
790 | WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; | |
791 | if (mtu_max > wil->rx_buf_len) { | |
792 | /* do not allow RX buffers to be smaller than mtu_max, for | |
793 | * backward compatibility (mtu_max parameter was also used | |
794 | * to support receiving large packets) | |
795 | */ | |
796 | wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max); | |
797 | wil->rx_buf_len = mtu_max; | |
798 | } | |
799 | } | |
800 | ||
d3762b40 | 801 | int wil_rx_init(struct wil6210_priv *wil, u16 size) |
2be7d22f | 802 | { |
2be7d22f VK |
803 | struct vring *vring = &wil->vring_rx; |
804 | int rc; | |
2be7d22f | 805 | |
af3db60a | 806 | wil_dbg_misc(wil, "rx_init\n"); |
9cf10d62 | 807 | |
8bf6adb9 VK |
808 | if (vring->va) { |
809 | wil_err(wil, "Rx ring already allocated\n"); | |
810 | return -EINVAL; | |
811 | } | |
812 | ||
52a45702 LD |
813 | wil_rx_buf_len_init(wil); |
814 | ||
d3762b40 | 815 | vring->size = size; |
2be7d22f VK |
816 | rc = wil_vring_alloc(wil, vring); |
817 | if (rc) | |
818 | return rc; | |
819 | ||
47e19af9 | 820 | rc = wmi_rx_chain_add(wil, vring); |
2be7d22f VK |
821 | if (rc) |
822 | goto err_free; | |
823 | ||
2be7d22f VK |
824 | rc = wil_rx_refill(wil, vring->size); |
825 | if (rc) | |
826 | goto err_free; | |
827 | ||
828 | return 0; | |
829 | err_free: | |
830 | wil_vring_free(wil, vring, 0); | |
831 | ||
832 | return rc; | |
833 | } | |
834 | ||
835 | void wil_rx_fini(struct wil6210_priv *wil) | |
836 | { | |
837 | struct vring *vring = &wil->vring_rx; | |
838 | ||
af3db60a | 839 | wil_dbg_misc(wil, "rx_fini\n"); |
9cf10d62 | 840 | |
2acb4220 | 841 | if (vring->va) |
2be7d22f | 842 | wil_vring_free(wil, vring, 0); |
2be7d22f VK |
843 | } |
844 | ||
875e9439 ME |
845 | static inline void wil_tx_data_init(struct vring_tx_data *txdata) |
846 | { | |
847 | spin_lock_bh(&txdata->lock); | |
848 | txdata->dot1x_open = 0; | |
849 | txdata->enabled = 0; | |
850 | txdata->idle = 0; | |
851 | txdata->last_idle = 0; | |
852 | txdata->begin = 0; | |
853 | txdata->agg_wsize = 0; | |
854 | txdata->agg_timeout = 0; | |
855 | txdata->agg_amsdu = 0; | |
856 | txdata->addba_in_progress = false; | |
857 | spin_unlock_bh(&txdata->lock); | |
858 | } | |
859 | ||
2be7d22f VK |
860 | int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, |
861 | int cid, int tid) | |
862 | { | |
863 | int rc; | |
864 | struct wmi_vring_cfg_cmd cmd = { | |
865 | .action = cpu_to_le32(WMI_VRING_CMD_ADD), | |
866 | .vring_cfg = { | |
867 | .tx_sw_ring = { | |
9a06bec9 | 868 | .max_mpdu_size = |
c44690a1 | 869 | cpu_to_le16(wil_mtu2macbuf(mtu_max)), |
b5d98e9d | 870 | .ring_size = cpu_to_le16(size), |
2be7d22f VK |
871 | }, |
872 | .ringid = id, | |
a70abea5 | 873 | .cidxtid = mk_cidxtid(cid, tid), |
2be7d22f VK |
874 | .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, |
875 | .mac_ctrl = 0, | |
876 | .to_resolution = 0, | |
3277213f | 877 | .agg_max_wsize = 0, |
2be7d22f VK |
878 | .schd_params = { |
879 | .priority = cpu_to_le16(0), | |
880 | .timeslot_us = cpu_to_le16(0xfff), | |
881 | }, | |
882 | }, | |
883 | }; | |
884 | struct { | |
b874ddec | 885 | struct wmi_cmd_hdr wmi; |
2be7d22f VK |
886 | struct wmi_vring_cfg_done_event cmd; |
887 | } __packed reply; | |
888 | struct vring *vring = &wil->vring_tx[id]; | |
097638a0 | 889 | struct vring_tx_data *txdata = &wil->vring_tx_data[id]; |
2be7d22f | 890 | |
af3db60a | 891 | wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", |
e0106ada | 892 | cmd.vring_cfg.tx_sw_ring.max_mpdu_size); |
9b1ba7b2 | 893 | lockdep_assert_held(&wil->mutex); |
9cf10d62 | 894 | |
2be7d22f VK |
895 | if (vring->va) { |
896 | wil_err(wil, "Tx ring [%d] already allocated\n", id); | |
897 | rc = -EINVAL; | |
898 | goto out; | |
899 | } | |
900 | ||
875e9439 | 901 | wil_tx_data_init(txdata); |
2be7d22f VK |
902 | vring->size = size; |
903 | rc = wil_vring_alloc(wil, vring); | |
904 | if (rc) | |
905 | goto out; | |
906 | ||
93ae6d49 VK |
907 | wil->vring2cid_tid[id][0] = cid; |
908 | wil->vring2cid_tid[id][1] = tid; | |
909 | ||
2be7d22f | 910 | cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); |
2be7d22f | 911 | |
230d8442 VK |
912 | if (!wil->privacy) |
913 | txdata->dot1x_open = true; | |
2be7d22f VK |
914 | rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), |
915 | WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); | |
916 | if (rc) | |
917 | goto out_free; | |
918 | ||
b8023177 | 919 | if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { |
2be7d22f VK |
920 | wil_err(wil, "Tx config failed, status 0x%02x\n", |
921 | reply.cmd.status); | |
c331997b | 922 | rc = -EINVAL; |
2be7d22f VK |
923 | goto out_free; |
924 | } | |
2be7d22f | 925 | |
dc90506f ME |
926 | spin_lock_bh(&txdata->lock); |
927 | vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); | |
097638a0 | 928 | txdata->enabled = 1; |
dc90506f ME |
929 | spin_unlock_bh(&txdata->lock); |
930 | ||
230d8442 | 931 | if (txdata->dot1x_open && (agg_wsize >= 0)) |
3a3def8d | 932 | wil_addba_tx_request(wil, id, agg_wsize); |
097638a0 | 933 | |
2be7d22f VK |
934 | return 0; |
935 | out_free: | |
875e9439 | 936 | spin_lock_bh(&txdata->lock); |
230d8442 VK |
937 | txdata->dot1x_open = false; |
938 | txdata->enabled = 0; | |
875e9439 | 939 | spin_unlock_bh(&txdata->lock); |
2be7d22f | 940 | wil_vring_free(wil, vring, 1); |
0916d9f2 ME |
941 | wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; |
942 | wil->vring2cid_tid[id][1] = 0; | |
943 | ||
2be7d22f VK |
944 | out: |
945 | ||
946 | return rc; | |
947 | } | |
948 | ||
41d6b093 VK |
949 | int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) |
950 | { | |
951 | int rc; | |
952 | struct wmi_bcast_vring_cfg_cmd cmd = { | |
953 | .action = cpu_to_le32(WMI_VRING_CMD_ADD), | |
954 | .vring_cfg = { | |
955 | .tx_sw_ring = { | |
956 | .max_mpdu_size = | |
957 | cpu_to_le16(wil_mtu2macbuf(mtu_max)), | |
958 | .ring_size = cpu_to_le16(size), | |
959 | }, | |
960 | .ringid = id, | |
961 | .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, | |
962 | }, | |
963 | }; | |
964 | struct { | |
b874ddec | 965 | struct wmi_cmd_hdr wmi; |
41d6b093 VK |
966 | struct wmi_vring_cfg_done_event cmd; |
967 | } __packed reply; | |
968 | struct vring *vring = &wil->vring_tx[id]; | |
969 | struct vring_tx_data *txdata = &wil->vring_tx_data[id]; | |
970 | ||
af3db60a | 971 | wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", |
41d6b093 | 972 | cmd.vring_cfg.tx_sw_ring.max_mpdu_size); |
9b1ba7b2 | 973 | lockdep_assert_held(&wil->mutex); |
41d6b093 VK |
974 | |
975 | if (vring->va) { | |
976 | wil_err(wil, "Tx ring [%d] already allocated\n", id); | |
977 | rc = -EINVAL; | |
978 | goto out; | |
979 | } | |
980 | ||
875e9439 | 981 | wil_tx_data_init(txdata); |
41d6b093 VK |
982 | vring->size = size; |
983 | rc = wil_vring_alloc(wil, vring); | |
984 | if (rc) | |
985 | goto out; | |
986 | ||
987 | wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ | |
988 | wil->vring2cid_tid[id][1] = 0; /* TID */ | |
989 | ||
990 | cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); | |
991 | ||
230d8442 VK |
992 | if (!wil->privacy) |
993 | txdata->dot1x_open = true; | |
41d6b093 VK |
994 | rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd), |
995 | WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); | |
996 | if (rc) | |
997 | goto out_free; | |
998 | ||
999 | if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { | |
1000 | wil_err(wil, "Tx config failed, status 0x%02x\n", | |
1001 | reply.cmd.status); | |
1002 | rc = -EINVAL; | |
1003 | goto out_free; | |
1004 | } | |
41d6b093 | 1005 | |
dc90506f ME |
1006 | spin_lock_bh(&txdata->lock); |
1007 | vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); | |
41d6b093 | 1008 | txdata->enabled = 1; |
dc90506f | 1009 | spin_unlock_bh(&txdata->lock); |
41d6b093 VK |
1010 | |
1011 | return 0; | |
1012 | out_free: | |
875e9439 | 1013 | spin_lock_bh(&txdata->lock); |
230d8442 VK |
1014 | txdata->enabled = 0; |
1015 | txdata->dot1x_open = false; | |
875e9439 | 1016 | spin_unlock_bh(&txdata->lock); |
41d6b093 VK |
1017 | wil_vring_free(wil, vring, 1); |
1018 | out: | |
1019 | ||
1020 | return rc; | |
1021 | } | |
1022 | ||
2be7d22f VK |
1023 | void wil_vring_fini_tx(struct wil6210_priv *wil, int id) |
1024 | { | |
1025 | struct vring *vring = &wil->vring_tx[id]; | |
3a124ed6 | 1026 | struct vring_tx_data *txdata = &wil->vring_tx_data[id]; |
2be7d22f | 1027 | |
9b1ba7b2 | 1028 | lockdep_assert_held(&wil->mutex); |
097638a0 | 1029 | |
2be7d22f VK |
1030 | if (!vring->va) |
1031 | return; | |
1032 | ||
af3db60a | 1033 | wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); |
9cf10d62 | 1034 | |
5933a06d | 1035 | spin_lock_bh(&txdata->lock); |
230d8442 | 1036 | txdata->dot1x_open = false; |
5933a06d VK |
1037 | txdata->enabled = 0; /* no Tx can be in progress or start anew */ |
1038 | spin_unlock_bh(&txdata->lock); | |
34b8886e ME |
1039 | /* napi_synchronize waits for completion of the current NAPI but will |
1040 | * not prevent the next NAPI run. | |
1041 | * Add a memory barrier to guarantee that txdata->enabled is zeroed | |
1042 | * before napi_synchronize so that the next scheduled NAPI will not | |
1043 | * handle this vring | |
1044 | */ | |
1045 | wmb(); | |
097638a0 | 1046 | /* make sure NAPI won't touch this vring */ |
9419b6a2 | 1047 | if (test_bit(wil_status_napi_en, wil->status)) |
097638a0 VK |
1048 | napi_synchronize(&wil->napi_tx); |
1049 | ||
2be7d22f VK |
1050 | wil_vring_free(wil, vring, 1); |
1051 | } | |
1052 | ||
41d6b093 | 1053 | static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, |
2be7d22f VK |
1054 | struct sk_buff *skb) |
1055 | { | |
3df2cd36 VK |
1056 | int i; |
1057 | struct ethhdr *eth = (void *)skb->data; | |
1058 | int cid = wil_find_cid(wil, eth->h_dest); | |
1059 | ||
1060 | if (cid < 0) | |
1061 | return NULL; | |
2be7d22f | 1062 | |
3df2cd36 VK |
1063 | /* TODO: fix for multiple TID */ |
1064 | for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { | |
230d8442 VK |
1065 | if (!wil->vring_tx_data[i].dot1x_open && |
1066 | (skb->protocol != cpu_to_be16(ETH_P_PAE))) | |
1067 | continue; | |
3df2cd36 VK |
1068 | if (wil->vring2cid_tid[i][0] == cid) { |
1069 | struct vring *v = &wil->vring_tx[i]; | |
b729aaf0 | 1070 | struct vring_tx_data *txdata = &wil->vring_tx_data[i]; |
8fe59627 | 1071 | |
af3db60a LA |
1072 | wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", |
1073 | eth->h_dest, i); | |
b729aaf0 | 1074 | if (v->va && txdata->enabled) { |
3df2cd36 VK |
1075 | return v; |
1076 | } else { | |
af3db60a LA |
1077 | wil_dbg_txrx(wil, |
1078 | "find_tx_ucast: vring[%d] not valid\n", | |
1079 | i); | |
3df2cd36 VK |
1080 | return NULL; |
1081 | } | |
1082 | } | |
1083 | } | |
2be7d22f VK |
1084 | |
1085 | return NULL; | |
1086 | } | |
1087 | ||
fb3cac57 VK |
1088 | static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, |
1089 | struct sk_buff *skb); | |
54ed90a8 VK |
1090 | |
1091 | static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, | |
1092 | struct sk_buff *skb) | |
1093 | { | |
1094 | struct vring *v; | |
1095 | int i; | |
1096 | u8 cid; | |
b729aaf0 | 1097 | struct vring_tx_data *txdata; |
54ed90a8 VK |
1098 | |
1099 | /* In the STA mode, it is expected to have only 1 VRING | |
1100 | * for the AP we connected to. | |
230d8442 | 1101 | * find 1-st vring eligible for this skb and use it. |
54ed90a8 VK |
1102 | */ |
1103 | for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { | |
1104 | v = &wil->vring_tx[i]; | |
b729aaf0 ME |
1105 | txdata = &wil->vring_tx_data[i]; |
1106 | if (!v->va || !txdata->enabled) | |
54ed90a8 VK |
1107 | continue; |
1108 | ||
1109 | cid = wil->vring2cid_tid[i][0]; | |
41d6b093 VK |
1110 | if (cid >= WIL6210_MAX_CID) /* skip BCAST */ |
1111 | continue; | |
1112 | ||
230d8442 | 1113 | if (!wil->vring_tx_data[i].dot1x_open && |
54ed90a8 | 1114 | (skb->protocol != cpu_to_be16(ETH_P_PAE))) |
230d8442 | 1115 | continue; |
54ed90a8 VK |
1116 | |
1117 | wil_dbg_txrx(wil, "Tx -> ring %d\n", i); | |
1118 | ||
1119 | return v; | |
1120 | } | |
1121 | ||
1122 | wil_dbg_txrx(wil, "Tx while no vrings active?\n"); | |
1123 | ||
1124 | return NULL; | |
1125 | } | |
1126 | ||
70812421 VK |
1127 | /* Use one of 2 strategies: |
1128 | * | |
1129 | * 1. New (real broadcast): | |
1130 | * use dedicated broadcast vring | |
1131 | * 2. Old (pseudo-DMS): | |
1132 | * Find 1-st vring and return it; | |
1133 | * duplicate skb and send it to other active vrings; | |
1134 | * in all cases override dest address to unicast peer's address | |
1135 | * Use old strategy when new is not supported yet: | |
1136 | * - for PBSS | |
70812421 VK |
1137 | */ |
1138 | static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, | |
1139 | struct sk_buff *skb) | |
fb3cac57 | 1140 | { |
41d6b093 | 1141 | struct vring *v; |
b729aaf0 | 1142 | struct vring_tx_data *txdata; |
41d6b093 | 1143 | int i = wil->bcast_vring; |
e58c9f70 | 1144 | |
41d6b093 VK |
1145 | if (i < 0) |
1146 | return NULL; | |
1147 | v = &wil->vring_tx[i]; | |
b729aaf0 ME |
1148 | txdata = &wil->vring_tx_data[i]; |
1149 | if (!v->va || !txdata->enabled) | |
41d6b093 | 1150 | return NULL; |
230d8442 VK |
1151 | if (!wil->vring_tx_data[i].dot1x_open && |
1152 | (skb->protocol != cpu_to_be16(ETH_P_PAE))) | |
1153 | return NULL; | |
fb3cac57 VK |
1154 | |
1155 | return v; | |
1156 | } | |
1157 | ||
70812421 VK |
1158 | static void wil_set_da_for_vring(struct wil6210_priv *wil, |
1159 | struct sk_buff *skb, int vring_index) | |
1160 | { | |
1161 | struct ethhdr *eth = (void *)skb->data; | |
1162 | int cid = wil->vring2cid_tid[vring_index][0]; | |
1163 | ||
1164 | ether_addr_copy(eth->h_dest, wil->sta[cid].addr); | |
1165 | } | |
1166 | ||
1167 | static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, | |
1168 | struct sk_buff *skb) | |
1169 | { | |
1170 | struct vring *v, *v2; | |
1171 | struct sk_buff *skb2; | |
1172 | int i; | |
1173 | u8 cid; | |
1174 | struct ethhdr *eth = (void *)skb->data; | |
1175 | char *src = eth->h_source; | |
b729aaf0 | 1176 | struct vring_tx_data *txdata; |
70812421 VK |
1177 | |
1178 | /* find 1-st vring eligible for data */ | |
1179 | for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { | |
1180 | v = &wil->vring_tx[i]; | |
b729aaf0 ME |
1181 | txdata = &wil->vring_tx_data[i]; |
1182 | if (!v->va || !txdata->enabled) | |
70812421 VK |
1183 | continue; |
1184 | ||
1185 | cid = wil->vring2cid_tid[i][0]; | |
1186 | if (cid >= WIL6210_MAX_CID) /* skip BCAST */ | |
1187 | continue; | |
230d8442 VK |
1188 | if (!wil->vring_tx_data[i].dot1x_open && |
1189 | (skb->protocol != cpu_to_be16(ETH_P_PAE))) | |
70812421 VK |
1190 | continue; |
1191 | ||
1192 | /* don't Tx back to source when re-routing Rx->Tx at the AP */ | |
1193 | if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) | |
1194 | continue; | |
1195 | ||
1196 | goto found; | |
1197 | } | |
1198 | ||
1199 | wil_dbg_txrx(wil, "Tx while no vrings active?\n"); | |
1200 | ||
1201 | return NULL; | |
1202 | ||
1203 | found: | |
1204 | wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); | |
1205 | wil_set_da_for_vring(wil, skb, i); | |
1206 | ||
1207 | /* find other active vrings and duplicate skb for each */ | |
1208 | for (i++; i < WIL6210_MAX_TX_RINGS; i++) { | |
1209 | v2 = &wil->vring_tx[i]; | |
1210 | if (!v2->va) | |
1211 | continue; | |
1212 | cid = wil->vring2cid_tid[i][0]; | |
1213 | if (cid >= WIL6210_MAX_CID) /* skip BCAST */ | |
1214 | continue; | |
230d8442 VK |
1215 | if (!wil->vring_tx_data[i].dot1x_open && |
1216 | (skb->protocol != cpu_to_be16(ETH_P_PAE))) | |
70812421 VK |
1217 | continue; |
1218 | ||
1219 | if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) | |
1220 | continue; | |
1221 | ||
1222 | skb2 = skb_copy(skb, GFP_ATOMIC); | |
1223 | if (skb2) { | |
1224 | wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); | |
1225 | wil_set_da_for_vring(wil, skb2, i); | |
1226 | wil_tx_vring(wil, v2, skb2); | |
1227 | } else { | |
1228 | wil_err(wil, "skb_copy failed\n"); | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | return v; | |
1233 | } | |
1234 | ||
99b55bd2 KE |
1235 | static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, |
1236 | int vring_index) | |
2be7d22f | 1237 | { |
68ada71e | 1238 | wil_desc_addr_set(&d->dma.addr, pa); |
2be7d22f VK |
1239 | d->dma.ip_length = 0; |
1240 | /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ | |
1241 | d->dma.b11 = 0/*14 | BIT(7)*/; | |
1242 | d->dma.error = 0; | |
1243 | d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ | |
7e594444 | 1244 | d->dma.length = cpu_to_le16((u16)len); |
99b55bd2 | 1245 | d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); |
2be7d22f VK |
1246 | d->mac.d[0] = 0; |
1247 | d->mac.d[1] = 0; | |
1248 | d->mac.d[2] = 0; | |
1249 | d->mac.ucode_cmd = 0; | |
2be7d22f VK |
1250 | /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ |
1251 | d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | | |
1252 | (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
c236658f VK |
1257 | static inline |
1258 | void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) | |
1259 | { | |
3d4bde15 | 1260 | d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); |
c236658f VK |
1261 | } |
1262 | ||
3d4bde15 VK |
1263 | /** |
1264 | * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding | |
1265 | * @skb is used to obtain the protocol and headers length. | |
1266 | * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, | |
1267 | * 2 - middle, 3 - last descriptor. | |
1268 | */ | |
1269 | ||
1270 | static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d, | |
1271 | struct sk_buff *skb, | |
1272 | int tso_desc_type, bool is_ipv4, | |
1273 | int tcp_hdr_len, int skb_net_hdr_len) | |
504937d4 | 1274 | { |
3d4bde15 VK |
1275 | d->dma.b11 = ETH_HLEN; /* MAC header length */ |
1276 | d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; | |
1277 | ||
1278 | d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); | |
1279 | /* L4 header len: TCP header length */ | |
1280 | d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); | |
1281 | ||
1282 | /* Setup TSO: bit and desc type */ | |
1283 | d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | | |
1284 | (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS); | |
1285 | d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); | |
1286 | ||
1287 | d->dma.ip_length = skb_net_hdr_len; | |
1288 | /* Enable TCP/UDP checksum */ | |
1289 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); | |
1290 | /* Calculate pseudo-header */ | |
1291 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); | |
1292 | } | |
1293 | ||
1294 | /** | |
1295 | * Sets the descriptor @d up for csum. The corresponding | |
1296 | * @skb is used to obtain the protocol and headers length. | |
1297 | * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6. | |
1298 | * Note, if d==NULL, the function only returns the protocol result. | |
1299 | * | |
1300 | * It is very similar to previous wil_tx_desc_offload_setup_tso. This | |
1301 | * is "if unrolling" to optimize the critical path. | |
1302 | */ | |
1303 | ||
1304 | static int wil_tx_desc_offload_setup(struct vring_tx_desc *d, | |
1305 | struct sk_buff *skb){ | |
504937d4 KE |
1306 | int protocol; |
1307 | ||
1308 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1309 | return 0; | |
1310 | ||
df2d08ee VK |
1311 | d->dma.b11 = ETH_HLEN; /* MAC header length */ |
1312 | ||
504937d4 KE |
1313 | switch (skb->protocol) { |
1314 | case cpu_to_be16(ETH_P_IP): | |
1315 | protocol = ip_hdr(skb)->protocol; | |
df2d08ee | 1316 | d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); |
504937d4 KE |
1317 | break; |
1318 | case cpu_to_be16(ETH_P_IPV6): | |
1319 | protocol = ipv6_hdr(skb)->nexthdr; | |
1320 | break; | |
1321 | default: | |
1322 | return -EINVAL; | |
1323 | } | |
1324 | ||
1325 | switch (protocol) { | |
1326 | case IPPROTO_TCP: | |
1327 | d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); | |
1328 | /* L4 header len: TCP header length */ | |
1329 | d->dma.d0 |= | |
1330 | (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); | |
1331 | break; | |
1332 | case IPPROTO_UDP: | |
1333 | /* L4 header len: UDP header length */ | |
1334 | d->dma.d0 |= | |
1335 | (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); | |
1336 | break; | |
1337 | default: | |
1338 | return -EINVAL; | |
1339 | } | |
1340 | ||
1341 | d->dma.ip_length = skb_network_header_len(skb); | |
504937d4 KE |
1342 | /* Enable TCP/UDP checksum */ |
1343 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); | |
1344 | /* Calculate pseudo-header */ | |
1345 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); | |
1346 | ||
1347 | return 0; | |
1348 | } | |
1349 | ||
3d4bde15 VK |
1350 | static inline void wil_tx_last_desc(struct vring_tx_desc *d) |
1351 | { | |
1352 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | | |
1353 | BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) | | |
1354 | BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); | |
1355 | } | |
1356 | ||
1357 | static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) | |
1358 | { | |
1359 | d->dma.d0 |= wil_tso_type_lst << | |
1360 | DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; | |
1361 | } | |
1362 | ||
1363 | static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, | |
1364 | struct sk_buff *skb) | |
1365 | { | |
1366 | struct device *dev = wil_to_dev(wil); | |
1367 | ||
1368 | /* point to descriptors in shared memory */ | |
1369 | volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc, | |
1370 | *_first_desc = NULL; | |
1371 | ||
1372 | /* pointers to shadow descriptors */ | |
1373 | struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem, | |
1374 | *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem, | |
1375 | *first_desc = &first_desc_mem; | |
1376 | ||
1377 | /* pointer to shadow descriptors' context */ | |
1378 | struct wil_ctx *hdr_ctx, *first_ctx = NULL; | |
1379 | ||
1380 | int descs_used = 0; /* total number of used descriptors */ | |
1381 | int sg_desc_cnt = 0; /* number of descriptors for current mss*/ | |
1382 | ||
1383 | u32 swhead = vring->swhead; | |
1384 | int used, avail = wil_vring_avail_tx(vring); | |
1385 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
1386 | int min_desc_required = nr_frags + 1; | |
1387 | int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ | |
1388 | int f, len, hdrlen, headlen; | |
1389 | int vring_index = vring - wil->vring_tx; | |
1390 | struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; | |
1391 | uint i = swhead; | |
1392 | dma_addr_t pa; | |
1393 | const skb_frag_t *frag = NULL; | |
1394 | int rem_data = mss; | |
1395 | int lenmss; | |
1396 | int hdr_compensation_need = true; | |
1397 | int desc_tso_type = wil_tso_type_first; | |
1398 | bool is_ipv4; | |
1399 | int tcp_hdr_len; | |
1400 | int skb_net_hdr_len; | |
1401 | int gso_type; | |
e3d2ed94 | 1402 | int rc = -EINVAL; |
3d4bde15 | 1403 | |
af3db60a LA |
1404 | wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, |
1405 | vring_index); | |
3d4bde15 VK |
1406 | |
1407 | if (unlikely(!txdata->enabled)) | |
1408 | return -EINVAL; | |
1409 | ||
1410 | /* A typical page 4K is 3-4 payloads, we assume each fragment | |
1411 | * is a full payload, that's how min_desc_required has been | |
1412 | * calculated. In real we might need more or less descriptors, | |
1413 | * this is the initial check only. | |
1414 | */ | |
1415 | if (unlikely(avail < min_desc_required)) { | |
1416 | wil_err_ratelimited(wil, | |
1417 | "TSO: Tx ring[%2d] full. No space for %d fragments\n", | |
1418 | vring_index, min_desc_required); | |
1419 | return -ENOMEM; | |
1420 | } | |
1421 | ||
1422 | /* Header Length = MAC header len + IP header len + TCP header len*/ | |
1423 | hdrlen = ETH_HLEN + | |
1424 | (int)skb_network_header_len(skb) + | |
1425 | tcp_hdrlen(skb); | |
1426 | ||
1427 | gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); | |
1428 | switch (gso_type) { | |
1429 | case SKB_GSO_TCPV4: | |
1430 | /* TCP v4, zero out the IP length and IPv4 checksum fields | |
1431 | * as required by the offloading doc | |
1432 | */ | |
1433 | ip_hdr(skb)->tot_len = 0; | |
1434 | ip_hdr(skb)->check = 0; | |
1435 | is_ipv4 = true; | |
1436 | break; | |
1437 | case SKB_GSO_TCPV6: | |
1438 | /* TCP v6, zero out the payload length */ | |
1439 | ipv6_hdr(skb)->payload_len = 0; | |
1440 | is_ipv4 = false; | |
1441 | break; | |
1442 | default: | |
1443 | /* other than TCPv4 or TCPv6 types are not supported for TSO. | |
1444 | * It is also illegal for both to be set simultaneously | |
1445 | */ | |
1446 | return -EINVAL; | |
1447 | } | |
1448 | ||
1449 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1450 | return -EINVAL; | |
1451 | ||
1452 | /* tcp header length and skb network header length are fixed for all | |
1453 | * packet's descriptors - read then once here | |
1454 | */ | |
1455 | tcp_hdr_len = tcp_hdrlen(skb); | |
1456 | skb_net_hdr_len = skb_network_header_len(skb); | |
1457 | ||
1458 | _hdr_desc = &vring->va[i].tx; | |
1459 | ||
1460 | pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); | |
1461 | if (unlikely(dma_mapping_error(dev, pa))) { | |
1462 | wil_err(wil, "TSO: Skb head DMA map error\n"); | |
1463 | goto err_exit; | |
1464 | } | |
1465 | ||
1466 | wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); | |
1467 | wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, | |
1468 | tcp_hdr_len, skb_net_hdr_len); | |
1469 | wil_tx_last_desc(hdr_desc); | |
1470 | ||
1471 | vring->ctx[i].mapped_as = wil_mapped_as_single; | |
1472 | hdr_ctx = &vring->ctx[i]; | |
1473 | ||
1474 | descs_used++; | |
1475 | headlen = skb_headlen(skb) - hdrlen; | |
1476 | ||
1477 | for (f = headlen ? -1 : 0; f < nr_frags; f++) { | |
1478 | if (headlen) { | |
1479 | len = headlen; | |
1480 | wil_dbg_txrx(wil, "TSO: process skb head, len %u\n", | |
1481 | len); | |
1482 | } else { | |
1483 | frag = &skb_shinfo(skb)->frags[f]; | |
1484 | len = frag->size; | |
1485 | wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); | |
1486 | } | |
1487 | ||
1488 | while (len) { | |
1489 | wil_dbg_txrx(wil, | |
1490 | "TSO: len %d, rem_data %d, descs_used %d\n", | |
1491 | len, rem_data, descs_used); | |
1492 | ||
1493 | if (descs_used == avail) { | |
e3d2ed94 HK |
1494 | wil_err_ratelimited(wil, "TSO: ring overflow\n"); |
1495 | rc = -ENOMEM; | |
1496 | goto mem_error; | |
3d4bde15 VK |
1497 | } |
1498 | ||
1499 | lenmss = min_t(int, rem_data, len); | |
1500 | i = (swhead + descs_used) % vring->size; | |
1501 | wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i); | |
1502 | ||
1503 | if (!headlen) { | |
1504 | pa = skb_frag_dma_map(dev, frag, | |
1505 | frag->size - len, lenmss, | |
1506 | DMA_TO_DEVICE); | |
1507 | vring->ctx[i].mapped_as = wil_mapped_as_page; | |
1508 | } else { | |
1509 | pa = dma_map_single(dev, | |
1510 | skb->data + | |
1511 | skb_headlen(skb) - headlen, | |
1512 | lenmss, | |
1513 | DMA_TO_DEVICE); | |
1514 | vring->ctx[i].mapped_as = wil_mapped_as_single; | |
1515 | headlen -= lenmss; | |
1516 | } | |
1517 | ||
e3d2ed94 HK |
1518 | if (unlikely(dma_mapping_error(dev, pa))) { |
1519 | wil_err(wil, "TSO: DMA map page error\n"); | |
1520 | goto mem_error; | |
1521 | } | |
3d4bde15 VK |
1522 | |
1523 | _desc = &vring->va[i].tx; | |
1524 | ||
1525 | if (!_first_desc) { | |
1526 | _first_desc = _desc; | |
1527 | first_ctx = &vring->ctx[i]; | |
1528 | d = first_desc; | |
1529 | } else { | |
1530 | d = &desc_mem; | |
1531 | } | |
1532 | ||
1533 | wil_tx_desc_map(d, pa, lenmss, vring_index); | |
1534 | wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, | |
1535 | is_ipv4, tcp_hdr_len, | |
1536 | skb_net_hdr_len); | |
1537 | ||
1538 | /* use tso_type_first only once */ | |
1539 | desc_tso_type = wil_tso_type_mid; | |
1540 | ||
1541 | descs_used++; /* desc used so far */ | |
1542 | sg_desc_cnt++; /* desc used for this segment */ | |
1543 | len -= lenmss; | |
1544 | rem_data -= lenmss; | |
1545 | ||
1546 | wil_dbg_txrx(wil, | |
1547 | "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n", | |
1548 | len, rem_data, descs_used, sg_desc_cnt); | |
1549 | ||
1550 | /* Close the segment if reached mss size or last frag*/ | |
1551 | if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { | |
1552 | if (hdr_compensation_need) { | |
1553 | /* first segment include hdr desc for | |
1554 | * release | |
1555 | */ | |
1556 | hdr_ctx->nr_frags = sg_desc_cnt; | |
1557 | wil_tx_desc_set_nr_frags(first_desc, | |
1558 | sg_desc_cnt + | |
1559 | 1); | |
1560 | hdr_compensation_need = false; | |
1561 | } else { | |
1562 | wil_tx_desc_set_nr_frags(first_desc, | |
1563 | sg_desc_cnt); | |
1564 | } | |
1565 | first_ctx->nr_frags = sg_desc_cnt - 1; | |
1566 | ||
1567 | wil_tx_last_desc(d); | |
1568 | ||
1569 | /* first descriptor may also be the last | |
1570 | * for this mss - make sure not to copy | |
1571 | * it twice | |
1572 | */ | |
1573 | if (first_desc != d) | |
1574 | *_first_desc = *first_desc; | |
1575 | ||
1576 | /*last descriptor will be copied at the end | |
1577 | * of this TS processing | |
1578 | */ | |
1579 | if (f < nr_frags - 1 || len > 0) | |
1580 | *_desc = *d; | |
1581 | ||
1582 | rem_data = mss; | |
1583 | _first_desc = NULL; | |
1584 | sg_desc_cnt = 0; | |
1585 | } else if (first_desc != d) /* update mid descriptor */ | |
1586 | *_desc = *d; | |
1587 | } | |
1588 | } | |
1589 | ||
1590 | /* first descriptor may also be the last. | |
1591 | * in this case d pointer is invalid | |
1592 | */ | |
1593 | if (_first_desc == _desc) | |
1594 | d = first_desc; | |
1595 | ||
1596 | /* Last data descriptor */ | |
1597 | wil_set_tx_desc_last_tso(d); | |
1598 | *_desc = *d; | |
1599 | ||
1600 | /* Fill the total number of descriptors in first desc (hdr)*/ | |
1601 | wil_tx_desc_set_nr_frags(hdr_desc, descs_used); | |
1602 | *_hdr_desc = *hdr_desc; | |
1603 | ||
1604 | /* hold reference to skb | |
1605 | * to prevent skb release before accounting | |
1606 | * in case of immediate "tx done" | |
1607 | */ | |
1608 | vring->ctx[i].skb = skb_get(skb); | |
1609 | ||
1610 | /* performance monitoring */ | |
1611 | used = wil_vring_used_tx(vring); | |
1612 | if (wil_val_in_range(vring_idle_trsh, | |
1613 | used, used + descs_used)) { | |
1614 | txdata->idle += get_cycles() - txdata->last_idle; | |
1615 | wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", | |
1616 | vring_index, used, used + descs_used); | |
1617 | } | |
1618 | ||
eb26cff1 ME |
1619 | /* Make sure to advance the head only after descriptor update is done. |
1620 | * This will prevent a race condition where the completion thread | |
1621 | * will see the DU bit set from previous run and will handle the | |
1622 | * skb before it was completed. | |
1623 | */ | |
1624 | wmb(); | |
1625 | ||
3d4bde15 | 1626 | /* advance swhead */ |
3d4bde15 | 1627 | wil_vring_advance_head(vring, descs_used); |
e3d2ed94 | 1628 | wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); |
3d4bde15 VK |
1629 | |
1630 | /* make sure all writes to descriptors (shared memory) are done before | |
1631 | * committing them to HW | |
1632 | */ | |
1633 | wmb(); | |
1634 | ||
b9eeb512 | 1635 | wil_w(wil, vring->hwtail, vring->swhead); |
3d4bde15 VK |
1636 | return 0; |
1637 | ||
e3d2ed94 | 1638 | mem_error: |
3d4bde15 VK |
1639 | while (descs_used > 0) { |
1640 | struct wil_ctx *ctx; | |
1641 | ||
a1526f7e | 1642 | i = (swhead + descs_used - 1) % vring->size; |
3d4bde15 VK |
1643 | d = (struct vring_tx_desc *)&vring->va[i].tx; |
1644 | _desc = &vring->va[i].tx; | |
1645 | *d = *_desc; | |
1646 | _desc->dma.status = TX_DMA_STATUS_DU; | |
1647 | ctx = &vring->ctx[i]; | |
1648 | wil_txdesc_unmap(dev, d, ctx); | |
3d4bde15 VK |
1649 | memset(ctx, 0, sizeof(*ctx)); |
1650 | descs_used--; | |
1651 | } | |
3d4bde15 | 1652 | err_exit: |
e3d2ed94 | 1653 | return rc; |
3d4bde15 VK |
1654 | } |
1655 | ||
5933a06d VK |
1656 | static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, |
1657 | struct sk_buff *skb) | |
2be7d22f VK |
1658 | { |
1659 | struct device *dev = wil_to_dev(wil); | |
68ada71e VK |
1660 | struct vring_tx_desc dd, *d = ⅆ |
1661 | volatile struct vring_tx_desc *_d; | |
2be7d22f VK |
1662 | u32 swhead = vring->swhead; |
1663 | int avail = wil_vring_avail_tx(vring); | |
1664 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
504937d4 | 1665 | uint f = 0; |
2be7d22f | 1666 | int vring_index = vring - wil->vring_tx; |
7c0acf86 | 1667 | struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; |
2be7d22f VK |
1668 | uint i = swhead; |
1669 | dma_addr_t pa; | |
0436fd9a | 1670 | int used; |
41d6b093 VK |
1671 | bool mcast = (vring_index == wil->bcast_vring); |
1672 | uint len = skb_headlen(skb); | |
2be7d22f | 1673 | |
af3db60a LA |
1674 | wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, |
1675 | vring_index); | |
2be7d22f | 1676 | |
5933a06d VK |
1677 | if (unlikely(!txdata->enabled)) |
1678 | return -EINVAL; | |
1679 | ||
33c477fd | 1680 | if (unlikely(avail < 1 + nr_frags)) { |
70801e1b | 1681 | wil_err_ratelimited(wil, |
5b29c573 VK |
1682 | "Tx ring[%2d] full. No space for %d fragments\n", |
1683 | vring_index, 1 + nr_frags); | |
2be7d22f VK |
1684 | return -ENOMEM; |
1685 | } | |
8fe59627 | 1686 | _d = &vring->va[i].tx; |
2be7d22f | 1687 | |
8fe59627 | 1688 | pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); |
2be7d22f | 1689 | |
5b29c573 VK |
1690 | wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index, |
1691 | skb_headlen(skb), skb->data, &pa); | |
7743882d | 1692 | wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, |
2be7d22f VK |
1693 | skb->data, skb_headlen(skb), false); |
1694 | ||
1695 | if (unlikely(dma_mapping_error(dev, pa))) | |
1696 | return -EINVAL; | |
2232abd5 | 1697 | vring->ctx[i].mapped_as = wil_mapped_as_single; |
2be7d22f | 1698 | /* 1-st segment */ |
41d6b093 VK |
1699 | wil_tx_desc_map(d, pa, len, vring_index); |
1700 | if (unlikely(mcast)) { | |
1701 | d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ | |
230d8442 | 1702 | if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ |
41d6b093 | 1703 | d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); |
41d6b093 | 1704 | } |
504937d4 | 1705 | /* Process TCP/UDP checksum offloading */ |
3d4bde15 | 1706 | if (unlikely(wil_tx_desc_offload_setup(d, skb))) { |
5b29c573 | 1707 | wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", |
504937d4 KE |
1708 | vring_index); |
1709 | goto dma_error; | |
1710 | } | |
1711 | ||
c236658f | 1712 | vring->ctx[i].nr_frags = nr_frags; |
3d4bde15 | 1713 | wil_tx_desc_set_nr_frags(d, nr_frags + 1); |
68ada71e | 1714 | |
2be7d22f | 1715 | /* middle segments */ |
504937d4 | 1716 | for (; f < nr_frags; f++) { |
2be7d22f VK |
1717 | const struct skb_frag_struct *frag = |
1718 | &skb_shinfo(skb)->frags[f]; | |
1719 | int len = skb_frag_size(frag); | |
8fe59627 | 1720 | |
e59d16c0 | 1721 | *_d = *d; |
5b29c573 VK |
1722 | wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); |
1723 | wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, | |
1724 | (const void *)d, sizeof(*d), false); | |
2be7d22f | 1725 | i = (swhead + f + 1) % vring->size; |
8fe59627 | 1726 | _d = &vring->va[i].tx; |
2be7d22f | 1727 | pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), |
8fe59627 | 1728 | DMA_TO_DEVICE); |
e3d2ed94 HK |
1729 | if (unlikely(dma_mapping_error(dev, pa))) { |
1730 | wil_err(wil, "Tx[%2d] failed to map fragment\n", | |
1731 | vring_index); | |
2be7d22f | 1732 | goto dma_error; |
e3d2ed94 | 1733 | } |
2232abd5 | 1734 | vring->ctx[i].mapped_as = wil_mapped_as_page; |
99b55bd2 | 1735 | wil_tx_desc_map(d, pa, len, vring_index); |
c236658f VK |
1736 | /* no need to check return code - |
1737 | * if it succeeded for 1-st descriptor, | |
1738 | * it will succeed here too | |
1739 | */ | |
3d4bde15 | 1740 | wil_tx_desc_offload_setup(d, skb); |
2be7d22f VK |
1741 | } |
1742 | /* for the last seg only */ | |
1743 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); | |
668b2bbd | 1744 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); |
2be7d22f | 1745 | d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); |
68ada71e | 1746 | *_d = *d; |
5b29c573 VK |
1747 | wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); |
1748 | wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, | |
1749 | (const void *)d, sizeof(*d), false); | |
2be7d22f | 1750 | |
6cdadd4d VK |
1751 | /* hold reference to skb |
1752 | * to prevent skb release before accounting | |
1753 | * in case of immediate "tx done" | |
1754 | */ | |
1755 | vring->ctx[i].skb = skb_get(skb); | |
1756 | ||
0436fd9a VS |
1757 | /* performance monitoring */ |
1758 | used = wil_vring_used_tx(vring); | |
1759 | if (wil_val_in_range(vring_idle_trsh, | |
1760 | used, used + nr_frags + 1)) { | |
7c0acf86 | 1761 | txdata->idle += get_cycles() - txdata->last_idle; |
0436fd9a VS |
1762 | wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", |
1763 | vring_index, used, used + nr_frags + 1); | |
1764 | } | |
7c0acf86 | 1765 | |
eb26cff1 ME |
1766 | /* Make sure to advance the head only after descriptor update is done. |
1767 | * This will prevent a race condition where the completion thread | |
1768 | * will see the DU bit set from previous run and will handle the | |
1769 | * skb before it was completed. | |
1770 | */ | |
1771 | wmb(); | |
1772 | ||
2be7d22f VK |
1773 | /* advance swhead */ |
1774 | wil_vring_advance_head(vring, nr_frags + 1); | |
5b29c573 VK |
1775 | wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, |
1776 | vring->swhead); | |
98658095 | 1777 | trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); |
3d4bde15 VK |
1778 | |
1779 | /* make sure all writes to descriptors (shared memory) are done before | |
1780 | * committing them to HW | |
1781 | */ | |
1782 | wmb(); | |
1783 | ||
b9eeb512 | 1784 | wil_w(wil, vring->hwtail, vring->swhead); |
2be7d22f VK |
1785 | |
1786 | return 0; | |
1787 | dma_error: | |
1788 | /* unmap what we have mapped */ | |
c2a146f6 VK |
1789 | nr_frags = f + 1; /* frags mapped + one for skb head */ |
1790 | for (f = 0; f < nr_frags; f++) { | |
c2a146f6 | 1791 | struct wil_ctx *ctx; |
7e594444 | 1792 | |
2be7d22f | 1793 | i = (swhead + f) % vring->size; |
c2a146f6 | 1794 | ctx = &vring->ctx[i]; |
8fe59627 | 1795 | _d = &vring->va[i].tx; |
68ada71e VK |
1796 | *d = *_d; |
1797 | _d->dma.status = TX_DMA_STATUS_DU; | |
2232abd5 | 1798 | wil_txdesc_unmap(dev, d, ctx); |
f88f113a | 1799 | |
f88f113a | 1800 | memset(ctx, 0, sizeof(*ctx)); |
2be7d22f VK |
1801 | } |
1802 | ||
1803 | return -EINVAL; | |
1804 | } | |
1805 | ||
5933a06d VK |
1806 | static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, |
1807 | struct sk_buff *skb) | |
1808 | { | |
1809 | int vring_index = vring - wil->vring_tx; | |
1810 | struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; | |
1811 | int rc; | |
1812 | ||
1813 | spin_lock(&txdata->lock); | |
3d4bde15 VK |
1814 | |
1815 | rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) | |
1816 | (wil, vring, skb); | |
1817 | ||
5933a06d | 1818 | spin_unlock(&txdata->lock); |
3d4bde15 | 1819 | |
5933a06d VK |
1820 | return rc; |
1821 | } | |
1822 | ||
f9e3033f DL |
1823 | /** |
1824 | * Check status of tx vrings and stop/wake net queues if needed | |
1825 | * | |
1826 | * This function does one of two checks: | |
1827 | * In case check_stop is true, will check if net queues need to be stopped. If | |
1828 | * the conditions for stopping are met, netif_tx_stop_all_queues() is called. | |
1829 | * In case check_stop is false, will check if net queues need to be waked. If | |
1830 | * the conditions for waking are met, netif_tx_wake_all_queues() is called. | |
1831 | * vring is the vring which is currently being modified by either adding | |
1832 | * descriptors (tx) into it or removing descriptors (tx complete) from it. Can | |
1833 | * be null when irrelevant (e.g. connect/disconnect events). | |
1834 | * | |
1835 | * The implementation is to stop net queues if modified vring has low | |
1836 | * descriptor availability. Wake if all vrings are not in low descriptor | |
1837 | * availability and modified vring has high descriptor availability. | |
1838 | */ | |
1839 | static inline void __wil_update_net_queues(struct wil6210_priv *wil, | |
1840 | struct vring *vring, | |
1841 | bool check_stop) | |
1842 | { | |
1843 | int i; | |
1844 | ||
1845 | if (vring) | |
1846 | wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d", | |
1847 | (int)(vring - wil->vring_tx), check_stop, | |
1848 | wil->net_queue_stopped); | |
1849 | else | |
1850 | wil_dbg_txrx(wil, "check_stop=%d, stopped=%d", | |
1851 | check_stop, wil->net_queue_stopped); | |
1852 | ||
1853 | if (check_stop == wil->net_queue_stopped) | |
1854 | /* net queues already in desired state */ | |
1855 | return; | |
1856 | ||
1857 | if (check_stop) { | |
1858 | if (!vring || unlikely(wil_vring_avail_low(vring))) { | |
1859 | /* not enough room in the vring */ | |
1860 | netif_tx_stop_all_queues(wil_to_ndev(wil)); | |
1861 | wil->net_queue_stopped = true; | |
1862 | wil_dbg_txrx(wil, "netif_tx_stop called\n"); | |
1863 | } | |
1864 | return; | |
1865 | } | |
1866 | ||
1867 | /* check wake */ | |
1868 | for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { | |
1869 | struct vring *cur_vring = &wil->vring_tx[i]; | |
1870 | struct vring_tx_data *txdata = &wil->vring_tx_data[i]; | |
1871 | ||
1872 | if (!cur_vring->va || !txdata->enabled || cur_vring == vring) | |
1873 | continue; | |
1874 | ||
1875 | if (wil_vring_avail_low(cur_vring)) { | |
1876 | wil_dbg_txrx(wil, "vring %d full, can't wake\n", | |
1877 | (int)(cur_vring - wil->vring_tx)); | |
1878 | return; | |
1879 | } | |
1880 | } | |
1881 | ||
1882 | if (!vring || wil_vring_avail_high(vring)) { | |
1883 | /* enough room in the vring */ | |
1884 | wil_dbg_txrx(wil, "calling netif_tx_wake\n"); | |
1885 | netif_tx_wake_all_queues(wil_to_ndev(wil)); | |
1886 | wil->net_queue_stopped = false; | |
1887 | } | |
1888 | } | |
1889 | ||
1890 | void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring, | |
1891 | bool check_stop) | |
1892 | { | |
1893 | spin_lock(&wil->net_queue_lock); | |
1894 | __wil_update_net_queues(wil, vring, check_stop); | |
1895 | spin_unlock(&wil->net_queue_lock); | |
1896 | } | |
1897 | ||
1898 | void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring, | |
1899 | bool check_stop) | |
1900 | { | |
1901 | spin_lock_bh(&wil->net_queue_lock); | |
1902 | __wil_update_net_queues(wil, vring, check_stop); | |
1903 | spin_unlock_bh(&wil->net_queue_lock); | |
1904 | } | |
1905 | ||
2be7d22f VK |
1906 | netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1907 | { | |
1908 | struct wil6210_priv *wil = ndev_to_wil(ndev); | |
3df2cd36 | 1909 | struct ethhdr *eth = (void *)skb->data; |
41d6b093 | 1910 | bool bcast = is_multicast_ether_addr(eth->h_dest); |
2be7d22f | 1911 | struct vring *vring; |
aa27deaa | 1912 | static bool pr_once_fw; |
2be7d22f VK |
1913 | int rc; |
1914 | ||
af3db60a | 1915 | wil_dbg_txrx(wil, "start_xmit\n"); |
33c477fd | 1916 | if (unlikely(!test_bit(wil_status_fwready, wil->status))) { |
aa27deaa VK |
1917 | if (!pr_once_fw) { |
1918 | wil_err(wil, "FW not ready\n"); | |
1919 | pr_once_fw = true; | |
1920 | } | |
2be7d22f VK |
1921 | goto drop; |
1922 | } | |
33c477fd | 1923 | if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { |
d8ed043a | 1924 | wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n"); |
2be7d22f VK |
1925 | goto drop; |
1926 | } | |
33c477fd | 1927 | if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { |
2be7d22f VK |
1928 | wil_err(wil, "Xmit in monitor mode not supported\n"); |
1929 | goto drop; | |
1930 | } | |
aa27deaa | 1931 | pr_once_fw = false; |
d58db4e4 VK |
1932 | |
1933 | /* find vring */ | |
a895cb8b LD |
1934 | if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) { |
1935 | /* in STA mode (ESS), all to same VRING (to AP) */ | |
54ed90a8 | 1936 | vring = wil_find_tx_vring_sta(wil, skb); |
a895cb8b LD |
1937 | } else if (bcast) { |
1938 | if (wil->pbss) | |
1939 | /* in pbss, no bcast VRING - duplicate skb in | |
1940 | * all stations VRINGs | |
1941 | */ | |
1942 | vring = wil_find_tx_bcast_2(wil, skb); | |
1943 | else if (wil->wdev->iftype == NL80211_IFTYPE_AP) | |
1944 | /* AP has a dedicated bcast VRING */ | |
1945 | vring = wil_find_tx_bcast_1(wil, skb); | |
1946 | else | |
1947 | /* unexpected combination, fallback to duplicating | |
1948 | * the skb in all stations VRINGs | |
1949 | */ | |
1950 | vring = wil_find_tx_bcast_2(wil, skb); | |
1951 | } else { | |
1952 | /* unicast, find specific VRING by dest. address */ | |
1953 | vring = wil_find_tx_ucast(wil, skb); | |
54ed90a8 | 1954 | } |
33c477fd | 1955 | if (unlikely(!vring)) { |
5aed1393 | 1956 | wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); |
d58db4e4 | 1957 | goto drop; |
2be7d22f | 1958 | } |
d58db4e4 VK |
1959 | /* set up vring entry */ |
1960 | rc = wil_tx_vring(wil, vring, skb); | |
1961 | ||
2be7d22f VK |
1962 | switch (rc) { |
1963 | case 0: | |
f9e3033f DL |
1964 | /* shall we stop net queues? */ |
1965 | wil_update_net_queues_bh(wil, vring, true); | |
795ce734 | 1966 | /* statistics will be updated on the tx_complete */ |
2be7d22f VK |
1967 | dev_kfree_skb_any(skb); |
1968 | return NETDEV_TX_OK; | |
1969 | case -ENOMEM: | |
1970 | return NETDEV_TX_BUSY; | |
1971 | default: | |
afda8bb5 | 1972 | break; /* goto drop; */ |
2be7d22f VK |
1973 | } |
1974 | drop: | |
2be7d22f VK |
1975 | ndev->stats.tx_dropped++; |
1976 | dev_kfree_skb_any(skb); | |
1977 | ||
1978 | return NET_XMIT_DROP; | |
1979 | } | |
1980 | ||
713c8a29 VK |
1981 | static inline bool wil_need_txstat(struct sk_buff *skb) |
1982 | { | |
1983 | struct ethhdr *eth = (void *)skb->data; | |
1984 | ||
1985 | return is_unicast_ether_addr(eth->h_dest) && skb->sk && | |
1986 | (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); | |
1987 | } | |
1988 | ||
1989 | static inline void wil_consume_skb(struct sk_buff *skb, bool acked) | |
1990 | { | |
1991 | if (unlikely(wil_need_txstat(skb))) | |
1992 | skb_complete_wifi_ack(skb, acked); | |
1993 | else | |
1994 | acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb); | |
1995 | } | |
1996 | ||
2be7d22f VK |
1997 | /** |
1998 | * Clean up transmitted skb's from the Tx VRING | |
1999 | * | |
e0287c4a VK |
2000 | * Return number of descriptors cleared |
2001 | * | |
2be7d22f VK |
2002 | * Safe to call from IRQ |
2003 | */ | |
e0287c4a | 2004 | int wil_tx_complete(struct wil6210_priv *wil, int ringid) |
2be7d22f | 2005 | { |
795ce734 | 2006 | struct net_device *ndev = wil_to_ndev(wil); |
2be7d22f VK |
2007 | struct device *dev = wil_to_dev(wil); |
2008 | struct vring *vring = &wil->vring_tx[ringid]; | |
097638a0 | 2009 | struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; |
e0287c4a | 2010 | int done = 0; |
c8b78b5f | 2011 | int cid = wil->vring2cid_tid[ringid][0]; |
41d6b093 | 2012 | struct wil_net_stats *stats = NULL; |
c236658f | 2013 | volatile struct vring_tx_desc *_d; |
0436fd9a VS |
2014 | int used_before_complete; |
2015 | int used_new; | |
2be7d22f | 2016 | |
33c477fd | 2017 | if (unlikely(!vring->va)) { |
2be7d22f | 2018 | wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); |
e0287c4a | 2019 | return 0; |
2be7d22f VK |
2020 | } |
2021 | ||
33c477fd | 2022 | if (unlikely(!txdata->enabled)) { |
097638a0 VK |
2023 | wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); |
2024 | return 0; | |
2025 | } | |
2026 | ||
af3db60a | 2027 | wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); |
2be7d22f | 2028 | |
0436fd9a VS |
2029 | used_before_complete = wil_vring_used_tx(vring); |
2030 | ||
41d6b093 VK |
2031 | if (cid < WIL6210_MAX_CID) |
2032 | stats = &wil->sta[cid].stats; | |
2033 | ||
2be7d22f | 2034 | while (!wil_vring_is_empty(vring)) { |
c236658f | 2035 | int new_swtail; |
f88f113a | 2036 | struct wil_ctx *ctx = &vring->ctx[vring->swtail]; |
c236658f VK |
2037 | /** |
2038 | * For the fragmented skb, HW will set DU bit only for the | |
3d4bde15 VK |
2039 | * last fragment. look for it. |
2040 | * In TSO the first DU will include hdr desc | |
c236658f VK |
2041 | */ |
2042 | int lf = (vring->swtail + ctx->nr_frags) % vring->size; | |
2043 | /* TODO: check we are not past head */ | |
4de41bef | 2044 | |
c236658f | 2045 | _d = &vring->va[lf].tx; |
33c477fd | 2046 | if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) |
2be7d22f VK |
2047 | break; |
2048 | ||
c236658f VK |
2049 | new_swtail = (lf + 1) % vring->size; |
2050 | while (vring->swtail != new_swtail) { | |
2051 | struct vring_tx_desc dd, *d = ⅆ | |
c236658f | 2052 | u16 dmalen; |
76dfa4b7 VK |
2053 | struct sk_buff *skb; |
2054 | ||
2055 | ctx = &vring->ctx[vring->swtail]; | |
2056 | skb = ctx->skb; | |
c236658f | 2057 | _d = &vring->va[vring->swtail].tx; |
2be7d22f | 2058 | |
c236658f | 2059 | *d = *_d; |
f88f113a | 2060 | |
c236658f VK |
2061 | dmalen = le16_to_cpu(d->dma.length); |
2062 | trace_wil6210_tx_done(ringid, vring->swtail, dmalen, | |
2063 | d->dma.error); | |
2064 | wil_dbg_txrx(wil, | |
5b29c573 VK |
2065 | "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n", |
2066 | ringid, vring->swtail, dmalen, | |
2067 | d->dma.status, d->dma.error); | |
2068 | wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4, | |
c236658f | 2069 | (const void *)d, sizeof(*d), false); |
795ce734 | 2070 | |
2232abd5 | 2071 | wil_txdesc_unmap(dev, d, ctx); |
c236658f VK |
2072 | |
2073 | if (skb) { | |
33c477fd | 2074 | if (likely(d->dma.error == 0)) { |
c236658f | 2075 | ndev->stats.tx_packets++; |
c236658f | 2076 | ndev->stats.tx_bytes += skb->len; |
41d6b093 VK |
2077 | if (stats) { |
2078 | stats->tx_packets++; | |
2079 | stats->tx_bytes += skb->len; | |
2080 | } | |
c236658f VK |
2081 | } else { |
2082 | ndev->stats.tx_errors++; | |
41d6b093 VK |
2083 | if (stats) |
2084 | stats->tx_errors++; | |
c236658f | 2085 | } |
713c8a29 | 2086 | wil_consume_skb(skb, d->dma.error == 0); |
c236658f VK |
2087 | } |
2088 | memset(ctx, 0, sizeof(*ctx)); | |
eb26cff1 ME |
2089 | /* Make sure the ctx is zeroed before updating the tail |
2090 | * to prevent a case where wil_tx_vring will see | |
2091 | * this descriptor as used and handle it before ctx zero | |
2092 | * is completed. | |
2093 | */ | |
2094 | wmb(); | |
c236658f VK |
2095 | /* There is no need to touch HW descriptor: |
2096 | * - ststus bit TX_DMA_STATUS_DU is set by design, | |
2097 | * so hardware will not try to process this desc., | |
2098 | * - rest of descriptor will be initialized on Tx. | |
2099 | */ | |
2100 | vring->swtail = wil_vring_next_tail(vring); | |
2101 | done++; | |
2be7d22f | 2102 | } |
2be7d22f | 2103 | } |
67c3e1b4 | 2104 | |
0436fd9a VS |
2105 | /* performance monitoring */ |
2106 | used_new = wil_vring_used_tx(vring); | |
2107 | if (wil_val_in_range(vring_idle_trsh, | |
2108 | used_new, used_before_complete)) { | |
2109 | wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", | |
2110 | ringid, used_before_complete, used_new); | |
7c0acf86 VK |
2111 | txdata->last_idle = get_cycles(); |
2112 | } | |
67c3e1b4 | 2113 | |
f9e3033f DL |
2114 | /* shall we wake net queues? */ |
2115 | if (done) | |
2116 | wil_update_net_queues(wil, vring, false); | |
e0287c4a VK |
2117 | |
2118 | return done; | |
2be7d22f | 2119 | } |