]>
Commit | Line | Data |
---|---|---|
fd3a4790 DM |
1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | |
3 | * | |
ce100b8b | 4 | * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. |
fd3a4790 DM |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/skbuff.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/etherdevice.h> | |
38 | #include <linux/if_vlan.h> | |
39 | #include <linux/ip.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/jiffies.h> | |
70c71606 | 42 | #include <linux/prefetch.h> |
ee40fa06 | 43 | #include <linux/export.h> |
a6ec572b | 44 | #include <net/xfrm.h> |
fd3a4790 DM |
45 | #include <net/ipv6.h> |
46 | #include <net/tcp.h> | |
3a336cb1 | 47 | #include <net/busy_poll.h> |
84a200b3 VP |
48 | #ifdef CONFIG_CHELSIO_T4_FCOE |
49 | #include <scsi/fc/fc_fcoe.h> | |
50 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
51 | #include "cxgb4.h" |
52 | #include "t4_regs.h" | |
f612b815 | 53 | #include "t4_values.h" |
fd3a4790 DM |
54 | #include "t4_msg.h" |
55 | #include "t4fw_api.h" | |
a4569504 | 56 | #include "cxgb4_ptp.h" |
a6ec572b | 57 | #include "cxgb4_uld.h" |
fd3a4790 DM |
58 | |
59 | /* | |
60 | * Rx buffer size. We use largish buffers if possible but settle for single | |
61 | * pages under memory shortage. | |
62 | */ | |
63 | #if PAGE_SHIFT >= 16 | |
64 | # define FL_PG_ORDER 0 | |
65 | #else | |
66 | # define FL_PG_ORDER (16 - PAGE_SHIFT) | |
67 | #endif | |
68 | ||
69 | /* RX_PULL_LEN should be <= RX_COPY_THRES */ | |
70 | #define RX_COPY_THRES 256 | |
71 | #define RX_PULL_LEN 128 | |
72 | ||
73 | /* | |
74 | * Main body length for sk_buffs used for Rx Ethernet packets with fragments. | |
75 | * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. | |
76 | */ | |
77 | #define RX_PKT_SKB_LEN 512 | |
78 | ||
fd3a4790 DM |
79 | /* |
80 | * Max number of Tx descriptors we clean up at a time. Should be modest as | |
81 | * freeing skbs isn't cheap and it happens while holding locks. We just need | |
82 | * to free packets faster than they arrive, we eventually catch up and keep | |
83 | * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. | |
84 | */ | |
85 | #define MAX_TX_RECLAIM 16 | |
86 | ||
87 | /* | |
88 | * Max number of Rx buffers we replenish at a time. Again keep this modest, | |
89 | * allocating buffers isn't cheap either. | |
90 | */ | |
91 | #define MAX_RX_REFILL 16U | |
92 | ||
93 | /* | |
94 | * Period of the Rx queue check timer. This timer is infrequent as it has | |
95 | * something to do only when the system experiences severe memory shortage. | |
96 | */ | |
97 | #define RX_QCHECK_PERIOD (HZ / 2) | |
98 | ||
99 | /* | |
100 | * Period of the Tx queue check timer. | |
101 | */ | |
102 | #define TX_QCHECK_PERIOD (HZ / 2) | |
103 | ||
104 | /* | |
105 | * Max number of Tx descriptors to be reclaimed by the Tx timer. | |
106 | */ | |
107 | #define MAX_TIMER_TX_RECLAIM 100 | |
108 | ||
109 | /* | |
110 | * Timer index used when backing off due to memory shortage. | |
111 | */ | |
112 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) | |
113 | ||
fd3a4790 DM |
114 | /* |
115 | * Suspension threshold for non-Ethernet Tx queues. We require enough room | |
116 | * for a full sized WR. | |
117 | */ | |
118 | #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) | |
119 | ||
120 | /* | |
121 | * Max Tx descriptor space we allow for an Ethernet packet to be inlined | |
122 | * into a WR. | |
123 | */ | |
21dcfad6 | 124 | #define MAX_IMM_TX_PKT_LEN 256 |
fd3a4790 DM |
125 | |
126 | /* | |
127 | * Max size of a WR sent through a control Tx queue. | |
128 | */ | |
129 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN | |
130 | ||
fd3a4790 DM |
131 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
132 | struct page *page; | |
133 | dma_addr_t dma_addr; | |
134 | }; | |
135 | ||
136 | /* | |
52367a76 VP |
137 | * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb |
138 | * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. | |
139 | * We could easily support more but there doesn't seem to be much need for | |
140 | * that ... | |
141 | */ | |
142 | #define FL_MTU_SMALL 1500 | |
143 | #define FL_MTU_LARGE 9000 | |
144 | ||
145 | static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, | |
146 | unsigned int mtu) | |
147 | { | |
148 | struct sge *s = &adapter->sge; | |
149 | ||
150 | return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); | |
151 | } | |
152 | ||
153 | #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) | |
154 | #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) | |
155 | ||
156 | /* | |
157 | * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses | |
158 | * these to specify the buffer size as an index into the SGE Free List Buffer | |
159 | * Size register array. We also use bit 4, when the buffer has been unmapped | |
160 | * for DMA, but this is of course never sent to the hardware and is only used | |
161 | * to prevent double unmappings. All of the above requires that the Free List | |
162 | * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are | |
163 | * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal | |
164 | * Free List Buffer alignment is 32 bytes, this works out for us ... | |
fd3a4790 DM |
165 | */ |
166 | enum { | |
52367a76 VP |
167 | RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ |
168 | RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ | |
169 | RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ | |
170 | ||
171 | /* | |
172 | * XXX We shouldn't depend on being able to use these indices. | |
173 | * XXX Especially when some other Master PF has initialized the | |
174 | * XXX adapter or we use the Firmware Configuration File. We | |
175 | * XXX should really search through the Host Buffer Size register | |
176 | * XXX array for the appropriately sized buffer indices. | |
177 | */ | |
178 | RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ | |
179 | RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ | |
180 | ||
181 | RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ | |
182 | RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ | |
fd3a4790 DM |
183 | }; |
184 | ||
e553ec3f HS |
185 | static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; |
186 | #define MIN_NAPI_WORK 1 | |
187 | ||
fd3a4790 DM |
188 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) |
189 | { | |
52367a76 | 190 | return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; |
fd3a4790 DM |
191 | } |
192 | ||
193 | static inline bool is_buf_mapped(const struct rx_sw_desc *d) | |
194 | { | |
195 | return !(d->dma_addr & RX_UNMAPPED_BUF); | |
196 | } | |
197 | ||
198 | /** | |
199 | * txq_avail - return the number of available slots in a Tx queue | |
200 | * @q: the Tx queue | |
201 | * | |
202 | * Returns the number of descriptors in a Tx queue available to write new | |
203 | * packets. | |
204 | */ | |
205 | static inline unsigned int txq_avail(const struct sge_txq *q) | |
206 | { | |
207 | return q->size - 1 - q->in_use; | |
208 | } | |
209 | ||
210 | /** | |
211 | * fl_cap - return the capacity of a free-buffer list | |
212 | * @fl: the FL | |
213 | * | |
214 | * Returns the capacity of a free-buffer list. The capacity is less than | |
215 | * the size because one descriptor needs to be left unpopulated, otherwise | |
216 | * HW will think the FL is empty. | |
217 | */ | |
218 | static inline unsigned int fl_cap(const struct sge_fl *fl) | |
219 | { | |
220 | return fl->size - 8; /* 1 descriptor = 8 buffers */ | |
221 | } | |
222 | ||
c098b026 HS |
223 | /** |
224 | * fl_starving - return whether a Free List is starving. | |
225 | * @adapter: pointer to the adapter | |
226 | * @fl: the Free List | |
227 | * | |
228 | * Tests specified Free List to see whether the number of buffers | |
229 | * available to the hardware has falled below our "starvation" | |
230 | * threshold. | |
231 | */ | |
232 | static inline bool fl_starving(const struct adapter *adapter, | |
233 | const struct sge_fl *fl) | |
fd3a4790 | 234 | { |
c098b026 HS |
235 | const struct sge *s = &adapter->sge; |
236 | ||
237 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; | |
fd3a4790 DM |
238 | } |
239 | ||
a6ec572b AG |
240 | int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, |
241 | dma_addr_t *addr) | |
fd3a4790 DM |
242 | { |
243 | const skb_frag_t *fp, *end; | |
244 | const struct skb_shared_info *si; | |
245 | ||
246 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | |
247 | if (dma_mapping_error(dev, *addr)) | |
248 | goto out_err; | |
249 | ||
250 | si = skb_shinfo(skb); | |
251 | end = &si->frags[si->nr_frags]; | |
252 | ||
253 | for (fp = si->frags; fp < end; fp++) { | |
e91b0f24 IC |
254 | *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), |
255 | DMA_TO_DEVICE); | |
fd3a4790 DM |
256 | if (dma_mapping_error(dev, *addr)) |
257 | goto unwind; | |
258 | } | |
259 | return 0; | |
260 | ||
261 | unwind: | |
262 | while (fp-- > si->frags) | |
9e903e08 | 263 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
fd3a4790 DM |
264 | |
265 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); | |
266 | out_err: | |
267 | return -ENOMEM; | |
268 | } | |
a6ec572b | 269 | EXPORT_SYMBOL(cxgb4_map_skb); |
fd3a4790 DM |
270 | |
271 | #ifdef CONFIG_NEED_DMA_MAP_STATE | |
272 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, | |
273 | const dma_addr_t *addr) | |
274 | { | |
275 | const skb_frag_t *fp, *end; | |
276 | const struct skb_shared_info *si; | |
277 | ||
278 | dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); | |
279 | ||
280 | si = skb_shinfo(skb); | |
281 | end = &si->frags[si->nr_frags]; | |
282 | for (fp = si->frags; fp < end; fp++) | |
9e903e08 | 283 | dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); |
fd3a4790 DM |
284 | } |
285 | ||
286 | /** | |
287 | * deferred_unmap_destructor - unmap a packet when it is freed | |
288 | * @skb: the packet | |
289 | * | |
290 | * This is the packet destructor used for Tx packets that need to remain | |
291 | * mapped until they are freed rather than until their Tx descriptors are | |
292 | * freed. | |
293 | */ | |
294 | static void deferred_unmap_destructor(struct sk_buff *skb) | |
295 | { | |
296 | unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); | |
297 | } | |
298 | #endif | |
299 | ||
300 | static void unmap_sgl(struct device *dev, const struct sk_buff *skb, | |
301 | const struct ulptx_sgl *sgl, const struct sge_txq *q) | |
302 | { | |
303 | const struct ulptx_sge_pair *p; | |
304 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | |
305 | ||
306 | if (likely(skb_headlen(skb))) | |
307 | dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | |
308 | DMA_TO_DEVICE); | |
309 | else { | |
310 | dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | |
311 | DMA_TO_DEVICE); | |
312 | nfrags--; | |
313 | } | |
314 | ||
315 | /* | |
316 | * the complexity below is because of the possibility of a wrap-around | |
317 | * in the middle of an SGL | |
318 | */ | |
319 | for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { | |
320 | if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { | |
321 | unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | |
322 | ntohl(p->len[0]), DMA_TO_DEVICE); | |
323 | dma_unmap_page(dev, be64_to_cpu(p->addr[1]), | |
324 | ntohl(p->len[1]), DMA_TO_DEVICE); | |
325 | p++; | |
326 | } else if ((u8 *)p == (u8 *)q->stat) { | |
327 | p = (const struct ulptx_sge_pair *)q->desc; | |
328 | goto unmap; | |
329 | } else if ((u8 *)p + 8 == (u8 *)q->stat) { | |
330 | const __be64 *addr = (const __be64 *)q->desc; | |
331 | ||
332 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | |
333 | ntohl(p->len[0]), DMA_TO_DEVICE); | |
334 | dma_unmap_page(dev, be64_to_cpu(addr[1]), | |
335 | ntohl(p->len[1]), DMA_TO_DEVICE); | |
336 | p = (const struct ulptx_sge_pair *)&addr[2]; | |
337 | } else { | |
338 | const __be64 *addr = (const __be64 *)q->desc; | |
339 | ||
340 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | |
341 | ntohl(p->len[0]), DMA_TO_DEVICE); | |
342 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | |
343 | ntohl(p->len[1]), DMA_TO_DEVICE); | |
344 | p = (const struct ulptx_sge_pair *)&addr[1]; | |
345 | } | |
346 | } | |
347 | if (nfrags) { | |
348 | __be64 addr; | |
349 | ||
350 | if ((u8 *)p == (u8 *)q->stat) | |
351 | p = (const struct ulptx_sge_pair *)q->desc; | |
352 | addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : | |
353 | *(const __be64 *)q->desc; | |
354 | dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), | |
355 | DMA_TO_DEVICE); | |
356 | } | |
357 | } | |
358 | ||
359 | /** | |
360 | * free_tx_desc - reclaims Tx descriptors and their buffers | |
361 | * @adapter: the adapter | |
362 | * @q: the Tx queue to reclaim descriptors from | |
363 | * @n: the number of descriptors to reclaim | |
364 | * @unmap: whether the buffers should be unmapped for DMA | |
365 | * | |
366 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated | |
367 | * Tx buffers. Called with the Tx queue lock held. | |
368 | */ | |
ab677ff4 HS |
369 | void free_tx_desc(struct adapter *adap, struct sge_txq *q, |
370 | unsigned int n, bool unmap) | |
fd3a4790 DM |
371 | { |
372 | struct tx_sw_desc *d; | |
373 | unsigned int cidx = q->cidx; | |
374 | struct device *dev = adap->pdev_dev; | |
375 | ||
376 | d = &q->sdesc[cidx]; | |
377 | while (n--) { | |
378 | if (d->skb) { /* an SGL is present */ | |
379 | if (unmap) | |
380 | unmap_sgl(dev, d->skb, d->sgl, q); | |
a7525198 | 381 | dev_consume_skb_any(d->skb); |
fd3a4790 DM |
382 | d->skb = NULL; |
383 | } | |
384 | ++d; | |
385 | if (++cidx == q->size) { | |
386 | cidx = 0; | |
387 | d = q->sdesc; | |
388 | } | |
389 | } | |
390 | q->cidx = cidx; | |
391 | } | |
392 | ||
393 | /* | |
394 | * Return the number of reclaimable descriptors in a Tx queue. | |
395 | */ | |
396 | static inline int reclaimable(const struct sge_txq *q) | |
397 | { | |
6aa7de05 | 398 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
fd3a4790 DM |
399 | hw_cidx -= q->cidx; |
400 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | |
401 | } | |
402 | ||
403 | /** | |
a6ec572b | 404 | * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors |
fd3a4790 DM |
405 | * @adap: the adapter |
406 | * @q: the Tx queue to reclaim completed descriptors from | |
407 | * @unmap: whether the buffers should be unmapped for DMA | |
408 | * | |
409 | * Reclaims Tx descriptors that the SGE has indicated it has processed, | |
410 | * and frees the associated buffers if possible. Called with the Tx | |
411 | * queue locked. | |
412 | */ | |
a6ec572b | 413 | inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, |
fd3a4790 DM |
414 | bool unmap) |
415 | { | |
416 | int avail = reclaimable(q); | |
417 | ||
418 | if (avail) { | |
419 | /* | |
420 | * Limit the amount of clean up work we do at a time to keep | |
421 | * the Tx lock hold time O(1). | |
422 | */ | |
423 | if (avail > MAX_TX_RECLAIM) | |
424 | avail = MAX_TX_RECLAIM; | |
425 | ||
426 | free_tx_desc(adap, q, avail, unmap); | |
427 | q->in_use -= avail; | |
428 | } | |
429 | } | |
a6ec572b | 430 | EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); |
fd3a4790 | 431 | |
52367a76 VP |
432 | static inline int get_buf_size(struct adapter *adapter, |
433 | const struct rx_sw_desc *d) | |
fd3a4790 | 434 | { |
52367a76 VP |
435 | struct sge *s = &adapter->sge; |
436 | unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; | |
437 | int buf_size; | |
438 | ||
439 | switch (rx_buf_size_idx) { | |
440 | case RX_SMALL_PG_BUF: | |
441 | buf_size = PAGE_SIZE; | |
442 | break; | |
443 | ||
444 | case RX_LARGE_PG_BUF: | |
445 | buf_size = PAGE_SIZE << s->fl_pg_order; | |
446 | break; | |
447 | ||
448 | case RX_SMALL_MTU_BUF: | |
449 | buf_size = FL_MTU_SMALL_BUFSIZE(adapter); | |
450 | break; | |
451 | ||
452 | case RX_LARGE_MTU_BUF: | |
453 | buf_size = FL_MTU_LARGE_BUFSIZE(adapter); | |
454 | break; | |
455 | ||
456 | default: | |
457 | BUG_ON(1); | |
458 | } | |
459 | ||
460 | return buf_size; | |
fd3a4790 DM |
461 | } |
462 | ||
463 | /** | |
464 | * free_rx_bufs - free the Rx buffers on an SGE free list | |
465 | * @adap: the adapter | |
466 | * @q: the SGE free list to free buffers from | |
467 | * @n: how many buffers to free | |
468 | * | |
469 | * Release the next @n buffers on an SGE free-buffer Rx queue. The | |
470 | * buffers must be made inaccessible to HW before calling this function. | |
471 | */ | |
472 | static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) | |
473 | { | |
474 | while (n--) { | |
475 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | |
476 | ||
477 | if (is_buf_mapped(d)) | |
478 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | |
52367a76 VP |
479 | get_buf_size(adap, d), |
480 | PCI_DMA_FROMDEVICE); | |
fd3a4790 DM |
481 | put_page(d->page); |
482 | d->page = NULL; | |
483 | if (++q->cidx == q->size) | |
484 | q->cidx = 0; | |
485 | q->avail--; | |
486 | } | |
487 | } | |
488 | ||
489 | /** | |
490 | * unmap_rx_buf - unmap the current Rx buffer on an SGE free list | |
491 | * @adap: the adapter | |
492 | * @q: the SGE free list | |
493 | * | |
494 | * Unmap the current buffer on an SGE free-buffer Rx queue. The | |
495 | * buffer must be made inaccessible to HW before calling this function. | |
496 | * | |
497 | * This is similar to @free_rx_bufs above but does not free the buffer. | |
498 | * Do note that the FL still loses any further access to the buffer. | |
499 | */ | |
500 | static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) | |
501 | { | |
502 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | |
503 | ||
504 | if (is_buf_mapped(d)) | |
505 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | |
52367a76 | 506 | get_buf_size(adap, d), PCI_DMA_FROMDEVICE); |
fd3a4790 DM |
507 | d->page = NULL; |
508 | if (++q->cidx == q->size) | |
509 | q->cidx = 0; | |
510 | q->avail--; | |
511 | } | |
512 | ||
513 | static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) | |
514 | { | |
515 | if (q->pend_cred >= 8) { | |
3ccc6cf7 HS |
516 | u32 val = adap->params.arch.sge_fl_db; |
517 | ||
f612b815 | 518 | if (is_t4(adap->params.chip)) |
3ccc6cf7 | 519 | val |= PIDX_V(q->pend_cred / 8); |
f612b815 | 520 | else |
3ccc6cf7 | 521 | val |= PIDX_T5_V(q->pend_cred / 8); |
1ecc7b7a HS |
522 | |
523 | /* Make sure all memory writes to the Free List queue are | |
524 | * committed before we tell the hardware about them. | |
525 | */ | |
fd3a4790 | 526 | wmb(); |
d63a6dcf | 527 | |
df64e4d3 HS |
528 | /* If we don't have access to the new User Doorbell (T5+), use |
529 | * the old doorbell mechanism; otherwise use the new BAR2 | |
530 | * mechanism. | |
d63a6dcf | 531 | */ |
df64e4d3 | 532 | if (unlikely(q->bar2_addr == NULL)) { |
f612b815 HS |
533 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
534 | val | QID_V(q->cntxt_id)); | |
d63a6dcf | 535 | } else { |
f612b815 | 536 | writel(val | QID_V(q->bar2_qid), |
df64e4d3 | 537 | q->bar2_addr + SGE_UDB_KDOORBELL); |
d63a6dcf HS |
538 | |
539 | /* This Write memory Barrier will force the write to | |
540 | * the User Doorbell area to be flushed. | |
541 | */ | |
542 | wmb(); | |
543 | } | |
fd3a4790 DM |
544 | q->pend_cred &= 7; |
545 | } | |
546 | } | |
547 | ||
548 | static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, | |
549 | dma_addr_t mapping) | |
550 | { | |
551 | sd->page = pg; | |
552 | sd->dma_addr = mapping; /* includes size low bits */ | |
553 | } | |
554 | ||
555 | /** | |
556 | * refill_fl - refill an SGE Rx buffer ring | |
557 | * @adap: the adapter | |
558 | * @q: the ring to refill | |
559 | * @n: the number of new buffers to allocate | |
560 | * @gfp: the gfp flags for the allocations | |
561 | * | |
562 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, | |
563 | * allocated with the supplied gfp flags. The caller must assure that | |
564 | * @n does not exceed the queue's capacity. If afterwards the queue is | |
565 | * found critically low mark it as starving in the bitmap of starving FLs. | |
566 | * | |
567 | * Returns the number of buffers allocated. | |
568 | */ | |
569 | static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, | |
570 | gfp_t gfp) | |
571 | { | |
52367a76 | 572 | struct sge *s = &adap->sge; |
fd3a4790 DM |
573 | struct page *pg; |
574 | dma_addr_t mapping; | |
575 | unsigned int cred = q->avail; | |
576 | __be64 *d = &q->desc[q->pidx]; | |
577 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | |
d52ce920 | 578 | int node; |
fd3a4790 | 579 | |
5b377d11 HS |
580 | #ifdef CONFIG_DEBUG_FS |
581 | if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) | |
582 | goto out; | |
583 | #endif | |
584 | ||
aa9cd31c | 585 | gfp |= __GFP_NOWARN; |
d52ce920 | 586 | node = dev_to_node(adap->pdev_dev); |
fd3a4790 | 587 | |
52367a76 VP |
588 | if (s->fl_pg_order == 0) |
589 | goto alloc_small_pages; | |
590 | ||
fd3a4790 DM |
591 | /* |
592 | * Prefer large buffers | |
593 | */ | |
594 | while (n) { | |
d52ce920 | 595 | pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); |
fd3a4790 DM |
596 | if (unlikely(!pg)) { |
597 | q->large_alloc_failed++; | |
598 | break; /* fall back to single pages */ | |
599 | } | |
600 | ||
601 | mapping = dma_map_page(adap->pdev_dev, pg, 0, | |
52367a76 | 602 | PAGE_SIZE << s->fl_pg_order, |
fd3a4790 DM |
603 | PCI_DMA_FROMDEVICE); |
604 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | |
52367a76 | 605 | __free_pages(pg, s->fl_pg_order); |
70055dd0 | 606 | q->mapping_err++; |
fd3a4790 DM |
607 | goto out; /* do not try small pages for this error */ |
608 | } | |
52367a76 | 609 | mapping |= RX_LARGE_PG_BUF; |
fd3a4790 DM |
610 | *d++ = cpu_to_be64(mapping); |
611 | ||
612 | set_rx_sw_desc(sd, pg, mapping); | |
613 | sd++; | |
614 | ||
615 | q->avail++; | |
616 | if (++q->pidx == q->size) { | |
617 | q->pidx = 0; | |
618 | sd = q->sdesc; | |
619 | d = q->desc; | |
620 | } | |
621 | n--; | |
622 | } | |
fd3a4790 | 623 | |
52367a76 | 624 | alloc_small_pages: |
fd3a4790 | 625 | while (n--) { |
d52ce920 | 626 | pg = alloc_pages_node(node, gfp, 0); |
fd3a4790 DM |
627 | if (unlikely(!pg)) { |
628 | q->alloc_failed++; | |
629 | break; | |
630 | } | |
631 | ||
632 | mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, | |
633 | PCI_DMA_FROMDEVICE); | |
634 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | |
1f2149c1 | 635 | put_page(pg); |
70055dd0 | 636 | q->mapping_err++; |
fd3a4790 DM |
637 | goto out; |
638 | } | |
639 | *d++ = cpu_to_be64(mapping); | |
640 | ||
641 | set_rx_sw_desc(sd, pg, mapping); | |
642 | sd++; | |
643 | ||
644 | q->avail++; | |
645 | if (++q->pidx == q->size) { | |
646 | q->pidx = 0; | |
647 | sd = q->sdesc; | |
648 | d = q->desc; | |
649 | } | |
650 | } | |
651 | ||
652 | out: cred = q->avail - cred; | |
653 | q->pend_cred += cred; | |
654 | ring_fl_db(adap, q); | |
655 | ||
c098b026 | 656 | if (unlikely(fl_starving(adap, q))) { |
fd3a4790 | 657 | smp_wmb(); |
70055dd0 | 658 | q->low++; |
e46dab4d DM |
659 | set_bit(q->cntxt_id - adap->sge.egr_start, |
660 | adap->sge.starving_fl); | |
fd3a4790 DM |
661 | } |
662 | ||
663 | return cred; | |
664 | } | |
665 | ||
666 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | |
667 | { | |
668 | refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), | |
669 | GFP_ATOMIC); | |
670 | } | |
671 | ||
672 | /** | |
673 | * alloc_ring - allocate resources for an SGE descriptor ring | |
674 | * @dev: the PCI device's core device | |
675 | * @nelem: the number of descriptors | |
676 | * @elem_size: the size of each descriptor | |
677 | * @sw_size: the size of the SW state associated with each ring element | |
678 | * @phys: the physical address of the allocated ring | |
679 | * @metadata: address of the array holding the SW state for the ring | |
680 | * @stat_size: extra space in HW ring for status information | |
ad6bad3e | 681 | * @node: preferred node for memory allocations |
fd3a4790 DM |
682 | * |
683 | * Allocates resources for an SGE descriptor ring, such as Tx queues, | |
684 | * free buffer lists, or response queues. Each SGE ring requires | |
685 | * space for its HW descriptors plus, optionally, space for the SW state | |
686 | * associated with each HW entry (the metadata). The function returns | |
687 | * three values: the virtual address for the HW ring (the return value | |
688 | * of the function), the bus address of the HW ring, and the address | |
689 | * of the SW ring. | |
690 | */ | |
691 | static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, | |
692 | size_t sw_size, dma_addr_t *phys, void *metadata, | |
ad6bad3e | 693 | size_t stat_size, int node) |
fd3a4790 DM |
694 | { |
695 | size_t len = nelem * elem_size + stat_size; | |
696 | void *s = NULL; | |
750afb08 | 697 | void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); |
fd3a4790 DM |
698 | |
699 | if (!p) | |
700 | return NULL; | |
701 | if (sw_size) { | |
590b5b7d | 702 | s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node); |
fd3a4790 DM |
703 | |
704 | if (!s) { | |
705 | dma_free_coherent(dev, len, p, *phys); | |
706 | return NULL; | |
707 | } | |
708 | } | |
709 | if (metadata) | |
710 | *(void **)metadata = s; | |
fd3a4790 DM |
711 | return p; |
712 | } | |
713 | ||
714 | /** | |
715 | * sgl_len - calculates the size of an SGL of the given capacity | |
716 | * @n: the number of SGL entries | |
717 | * | |
718 | * Calculates the number of flits needed for a scatter/gather list that | |
719 | * can hold the given number of entries. | |
720 | */ | |
721 | static inline unsigned int sgl_len(unsigned int n) | |
722 | { | |
0aac3f56 HS |
723 | /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA |
724 | * addresses. The DSGL Work Request starts off with a 32-bit DSGL | |
725 | * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, | |
726 | * repeated sequences of { Length[i], Length[i+1], Address[i], | |
727 | * Address[i+1] } (this ensures that all addresses are on 64-bit | |
728 | * boundaries). If N is even, then Length[N+1] should be set to 0 and | |
729 | * Address[N+1] is omitted. | |
730 | * | |
731 | * The following calculation incorporates all of the above. It's | |
732 | * somewhat hard to follow but, briefly: the "+2" accounts for the | |
733 | * first two flits which include the DSGL header, Length0 and | |
734 | * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 | |
735 | * flits for every pair of the remaining N) +1 if (n-1) is odd; and | |
736 | * finally the "+((n-1)&1)" adds the one remaining flit needed if | |
737 | * (n-1) is odd ... | |
738 | */ | |
fd3a4790 DM |
739 | n--; |
740 | return (3 * n) / 2 + (n & 1) + 2; | |
741 | } | |
742 | ||
743 | /** | |
744 | * flits_to_desc - returns the num of Tx descriptors for the given flits | |
745 | * @n: the number of flits | |
746 | * | |
747 | * Returns the number of Tx descriptors needed for the supplied number | |
748 | * of flits. | |
749 | */ | |
750 | static inline unsigned int flits_to_desc(unsigned int n) | |
751 | { | |
752 | BUG_ON(n > SGE_MAX_WR_LEN / 8); | |
753 | return DIV_ROUND_UP(n, 8); | |
754 | } | |
755 | ||
756 | /** | |
757 | * is_eth_imm - can an Ethernet packet be sent as immediate data? | |
758 | * @skb: the packet | |
759 | * | |
760 | * Returns whether an Ethernet packet is small enough to fit as | |
0034b298 | 761 | * immediate data. Return value corresponds to headroom required. |
fd3a4790 | 762 | */ |
d0a1299c | 763 | static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) |
fd3a4790 | 764 | { |
d0a1299c | 765 | int hdrlen = 0; |
0034b298 | 766 | |
d0a1299c GG |
767 | if (skb->encapsulation && skb_shinfo(skb)->gso_size && |
768 | chip_ver > CHELSIO_T5) { | |
769 | hdrlen = sizeof(struct cpl_tx_tnl_lso); | |
770 | hdrlen += sizeof(struct cpl_tx_pkt_core); | |
771 | } else { | |
772 | hdrlen = skb_shinfo(skb)->gso_size ? | |
773 | sizeof(struct cpl_tx_pkt_lso_core) : 0; | |
774 | hdrlen += sizeof(struct cpl_tx_pkt); | |
775 | } | |
0034b298 KS |
776 | if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) |
777 | return hdrlen; | |
778 | return 0; | |
fd3a4790 DM |
779 | } |
780 | ||
781 | /** | |
782 | * calc_tx_flits - calculate the number of flits for a packet Tx WR | |
783 | * @skb: the packet | |
784 | * | |
785 | * Returns the number of flits needed for a Tx WR for the given Ethernet | |
786 | * packet, including the needed WR and CPL headers. | |
787 | */ | |
d0a1299c GG |
788 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb, |
789 | unsigned int chip_ver) | |
fd3a4790 DM |
790 | { |
791 | unsigned int flits; | |
d0a1299c | 792 | int hdrlen = is_eth_imm(skb, chip_ver); |
fd3a4790 | 793 | |
0aac3f56 HS |
794 | /* If the skb is small enough, we can pump it out as a work request |
795 | * with only immediate data. In that case we just have to have the | |
796 | * TX Packet header plus the skb data in the Work Request. | |
797 | */ | |
798 | ||
0034b298 KS |
799 | if (hdrlen) |
800 | return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); | |
fd3a4790 | 801 | |
0aac3f56 HS |
802 | /* Otherwise, we're going to have to construct a Scatter gather list |
803 | * of the skb body and fragments. We also include the flits necessary | |
804 | * for the TX Packet Work Request and CPL. We always have a firmware | |
805 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and | |
806 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL | |
807 | * message or, if we're doing a Large Send Offload, an LSO CPL message | |
808 | * with an embedded TX Packet Write CPL message. | |
809 | */ | |
fd1754fb | 810 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); |
d0a1299c GG |
811 | if (skb_shinfo(skb)->gso_size) { |
812 | if (skb->encapsulation && chip_ver > CHELSIO_T5) | |
813 | hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + | |
814 | sizeof(struct cpl_tx_tnl_lso); | |
815 | else | |
816 | hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + | |
817 | sizeof(struct cpl_tx_pkt_lso_core); | |
818 | ||
819 | hdrlen += sizeof(struct cpl_tx_pkt_core); | |
820 | flits += (hdrlen / sizeof(__be64)); | |
821 | } else { | |
0aac3f56 HS |
822 | flits += (sizeof(struct fw_eth_tx_pkt_wr) + |
823 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
d0a1299c | 824 | } |
fd3a4790 DM |
825 | return flits; |
826 | } | |
827 | ||
828 | /** | |
829 | * calc_tx_descs - calculate the number of Tx descriptors for a packet | |
830 | * @skb: the packet | |
831 | * | |
832 | * Returns the number of Tx descriptors needed for the given Ethernet | |
833 | * packet, including the needed WR and CPL headers. | |
834 | */ | |
d0a1299c GG |
835 | static inline unsigned int calc_tx_descs(const struct sk_buff *skb, |
836 | unsigned int chip_ver) | |
fd3a4790 | 837 | { |
d0a1299c | 838 | return flits_to_desc(calc_tx_flits(skb, chip_ver)); |
fd3a4790 DM |
839 | } |
840 | ||
841 | /** | |
a6ec572b | 842 | * cxgb4_write_sgl - populate a scatter/gather list for a packet |
fd3a4790 DM |
843 | * @skb: the packet |
844 | * @q: the Tx queue we are writing into | |
845 | * @sgl: starting location for writing the SGL | |
846 | * @end: points right after the end of the SGL | |
847 | * @start: start offset into skb main-body data to include in the SGL | |
848 | * @addr: the list of bus addresses for the SGL elements | |
849 | * | |
850 | * Generates a gather list for the buffers that make up a packet. | |
851 | * The caller must provide adequate space for the SGL that will be written. | |
852 | * The SGL includes all of the packet's page fragments and the data in its | |
853 | * main body except for the first @start bytes. @sgl must be 16-byte | |
854 | * aligned and within a Tx descriptor with available space. @end points | |
855 | * right after the end of the SGL but does not account for any potential | |
856 | * wrap around, i.e., @end > @sgl. | |
857 | */ | |
a6ec572b AG |
858 | void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, |
859 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | |
860 | const dma_addr_t *addr) | |
fd3a4790 DM |
861 | { |
862 | unsigned int i, len; | |
863 | struct ulptx_sge_pair *to; | |
864 | const struct skb_shared_info *si = skb_shinfo(skb); | |
865 | unsigned int nfrags = si->nr_frags; | |
866 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; | |
867 | ||
868 | len = skb_headlen(skb) - start; | |
869 | if (likely(len)) { | |
870 | sgl->len0 = htonl(len); | |
871 | sgl->addr0 = cpu_to_be64(addr[0] + start); | |
872 | nfrags++; | |
873 | } else { | |
9e903e08 | 874 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
fd3a4790 DM |
875 | sgl->addr0 = cpu_to_be64(addr[1]); |
876 | } | |
877 | ||
bdc590b9 HS |
878 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
879 | ULPTX_NSGE_V(nfrags)); | |
fd3a4790 DM |
880 | if (likely(--nfrags == 0)) |
881 | return; | |
882 | /* | |
883 | * Most of the complexity below deals with the possibility we hit the | |
884 | * end of the queue in the middle of writing the SGL. For this case | |
885 | * only we create the SGL in a temporary buffer and then copy it. | |
886 | */ | |
887 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; | |
888 | ||
889 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | |
9e903e08 ED |
890 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
891 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); | |
fd3a4790 DM |
892 | to->addr[0] = cpu_to_be64(addr[i]); |
893 | to->addr[1] = cpu_to_be64(addr[++i]); | |
894 | } | |
895 | if (nfrags) { | |
9e903e08 | 896 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
fd3a4790 DM |
897 | to->len[1] = cpu_to_be32(0); |
898 | to->addr[0] = cpu_to_be64(addr[i + 1]); | |
899 | } | |
900 | if (unlikely((u8 *)end > (u8 *)q->stat)) { | |
901 | unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; | |
902 | ||
903 | if (likely(part0)) | |
904 | memcpy(sgl->sge, buf, part0); | |
905 | part1 = (u8 *)end - (u8 *)q->stat; | |
906 | memcpy(q->desc, (u8 *)buf + part0, part1); | |
907 | end = (void *)q->desc + part1; | |
908 | } | |
909 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ | |
64699336 | 910 | *end = 0; |
fd3a4790 | 911 | } |
a6ec572b | 912 | EXPORT_SYMBOL(cxgb4_write_sgl); |
fd3a4790 | 913 | |
df64e4d3 HS |
914 | /* This function copies 64 byte coalesced work request to |
915 | * memory mapped BAR2 space. For coalesced WR SGE fetches | |
916 | * data from the FIFO instead of from Host. | |
22adfe0a | 917 | */ |
df64e4d3 | 918 | static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) |
22adfe0a | 919 | { |
df64e4d3 | 920 | int count = 8; |
22adfe0a SR |
921 | |
922 | while (count) { | |
923 | writeq(*src, dst); | |
924 | src++; | |
925 | dst++; | |
926 | count--; | |
927 | } | |
928 | } | |
929 | ||
fd3a4790 | 930 | /** |
a6ec572b | 931 | * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell |
fd3a4790 DM |
932 | * @adap: the adapter |
933 | * @q: the Tx queue | |
934 | * @n: number of new descriptors to give to HW | |
935 | * | |
936 | * Ring the doorbel for a Tx queue. | |
937 | */ | |
a6ec572b | 938 | inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) |
fd3a4790 | 939 | { |
1ecc7b7a HS |
940 | /* Make sure that all writes to the TX Descriptors are committed |
941 | * before we tell the hardware about them. | |
942 | */ | |
943 | wmb(); | |
d63a6dcf | 944 | |
df64e4d3 HS |
945 | /* If we don't have access to the new User Doorbell (T5+), use the old |
946 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
947 | */ | |
948 | if (unlikely(q->bar2_addr == NULL)) { | |
f612b815 | 949 | u32 val = PIDX_V(n); |
d63a6dcf HS |
950 | unsigned long flags; |
951 | ||
952 | /* For T4 we need to participate in the Doorbell Recovery | |
953 | * mechanism. | |
954 | */ | |
955 | spin_lock_irqsave(&q->db_lock, flags); | |
956 | if (!q->db_disabled) | |
f612b815 HS |
957 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
958 | QID_V(q->cntxt_id) | val); | |
d63a6dcf HS |
959 | else |
960 | q->db_pidx_inc += n; | |
961 | q->db_pidx = q->pidx; | |
962 | spin_unlock_irqrestore(&q->db_lock, flags); | |
963 | } else { | |
f612b815 | 964 | u32 val = PIDX_T5_V(n); |
d63a6dcf HS |
965 | |
966 | /* T4 and later chips share the same PIDX field offset within | |
967 | * the doorbell, but T5 and later shrank the field in order to | |
968 | * gain a bit for Doorbell Priority. The field was absurdly | |
969 | * large in the first place (14 bits) so we just use the T5 | |
970 | * and later limits and warn if a Queue ID is too large. | |
971 | */ | |
f612b815 | 972 | WARN_ON(val & DBPRIO_F); |
d63a6dcf | 973 | |
df64e4d3 HS |
974 | /* If we're only writing a single TX Descriptor and we can use |
975 | * Inferred QID registers, we can use the Write Combining | |
976 | * Gather Buffer; otherwise we use the simple doorbell. | |
d63a6dcf | 977 | */ |
df64e4d3 | 978 | if (n == 1 && q->bar2_qid == 0) { |
d63a6dcf HS |
979 | int index = (q->pidx |
980 | ? (q->pidx - 1) | |
981 | : (q->size - 1)); | |
df64e4d3 | 982 | u64 *wr = (u64 *)&q->desc[index]; |
d63a6dcf | 983 | |
df64e4d3 HS |
984 | cxgb_pio_copy((u64 __iomem *) |
985 | (q->bar2_addr + SGE_UDB_WCDOORBELL), | |
986 | wr); | |
22adfe0a | 987 | } else { |
f612b815 | 988 | writel(val | QID_V(q->bar2_qid), |
df64e4d3 | 989 | q->bar2_addr + SGE_UDB_KDOORBELL); |
22adfe0a | 990 | } |
d63a6dcf HS |
991 | |
992 | /* This Write Memory Barrier will force the write to the User | |
993 | * Doorbell area to be flushed. This is needed to prevent | |
994 | * writes on different CPUs for the same queue from hitting | |
995 | * the adapter out of order. This is required when some Work | |
996 | * Requests take the Write Combine Gather Buffer path (user | |
997 | * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some | |
998 | * take the traditional path where we simply increment the | |
999 | * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the | |
1000 | * hardware DMA read the actual Work Request. | |
1001 | */ | |
1002 | wmb(); | |
1003 | } | |
fd3a4790 | 1004 | } |
a6ec572b | 1005 | EXPORT_SYMBOL(cxgb4_ring_tx_db); |
fd3a4790 DM |
1006 | |
1007 | /** | |
a6ec572b | 1008 | * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors |
fd3a4790 DM |
1009 | * @skb: the packet |
1010 | * @q: the Tx queue where the packet will be inlined | |
1011 | * @pos: starting position in the Tx queue where to inline the packet | |
1012 | * | |
1013 | * Inline a packet's contents directly into Tx descriptors, starting at | |
1014 | * the given position within the Tx DMA ring. | |
1015 | * Most of the complexity of this operation is dealing with wrap arounds | |
1016 | * in the middle of the packet we want to inline. | |
1017 | */ | |
a6ec572b AG |
1018 | void cxgb4_inline_tx_skb(const struct sk_buff *skb, |
1019 | const struct sge_txq *q, void *pos) | |
fd3a4790 | 1020 | { |
fd3a4790 | 1021 | int left = (void *)q->stat - pos; |
e383f248 | 1022 | u64 *p; |
fd3a4790 DM |
1023 | |
1024 | if (likely(skb->len <= left)) { | |
1025 | if (likely(!skb->data_len)) | |
1026 | skb_copy_from_linear_data(skb, pos, skb->len); | |
1027 | else | |
1028 | skb_copy_bits(skb, 0, pos, skb->len); | |
1029 | pos += skb->len; | |
1030 | } else { | |
1031 | skb_copy_bits(skb, 0, pos, left); | |
1032 | skb_copy_bits(skb, left, q->desc, skb->len - left); | |
1033 | pos = (void *)q->desc + (skb->len - left); | |
1034 | } | |
1035 | ||
1036 | /* 0-pad to multiple of 16 */ | |
1037 | p = PTR_ALIGN(pos, 8); | |
1038 | if ((uintptr_t)p & 8) | |
1039 | *p = 0; | |
1040 | } | |
a6ec572b | 1041 | EXPORT_SYMBOL(cxgb4_inline_tx_skb); |
fd3a4790 | 1042 | |
8d0557d2 HS |
1043 | static void *inline_tx_skb_header(const struct sk_buff *skb, |
1044 | const struct sge_txq *q, void *pos, | |
1045 | int length) | |
1046 | { | |
1047 | u64 *p; | |
1048 | int left = (void *)q->stat - pos; | |
1049 | ||
1050 | if (likely(length <= left)) { | |
1051 | memcpy(pos, skb->data, length); | |
1052 | pos += length; | |
1053 | } else { | |
1054 | memcpy(pos, skb->data, left); | |
1055 | memcpy(q->desc, skb->data + left, length - left); | |
1056 | pos = (void *)q->desc + (length - left); | |
1057 | } | |
1058 | /* 0-pad to multiple of 16 */ | |
1059 | p = PTR_ALIGN(pos, 8); | |
1060 | if ((uintptr_t)p & 8) { | |
1061 | *p = 0; | |
1062 | return p + 1; | |
1063 | } | |
1064 | return p; | |
1065 | } | |
1066 | ||
fd3a4790 DM |
1067 | /* |
1068 | * Figure out what HW csum a packet wants and return the appropriate control | |
1069 | * bits. | |
1070 | */ | |
3ccc6cf7 | 1071 | static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) |
fd3a4790 DM |
1072 | { |
1073 | int csum_type; | |
c50ae55e GG |
1074 | bool inner_hdr_csum = false; |
1075 | u16 proto, ver; | |
fd3a4790 | 1076 | |
c50ae55e GG |
1077 | if (skb->encapsulation && |
1078 | (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) | |
1079 | inner_hdr_csum = true; | |
1080 | ||
1081 | if (inner_hdr_csum) { | |
1082 | ver = inner_ip_hdr(skb)->version; | |
1083 | proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : | |
1084 | inner_ipv6_hdr(skb)->nexthdr; | |
1085 | } else { | |
1086 | ver = ip_hdr(skb)->version; | |
1087 | proto = (ver == 4) ? ip_hdr(skb)->protocol : | |
1088 | ipv6_hdr(skb)->nexthdr; | |
1089 | } | |
1090 | ||
1091 | if (ver == 4) { | |
1092 | if (proto == IPPROTO_TCP) | |
fd3a4790 | 1093 | csum_type = TX_CSUM_TCPIP; |
c50ae55e | 1094 | else if (proto == IPPROTO_UDP) |
fd3a4790 DM |
1095 | csum_type = TX_CSUM_UDPIP; |
1096 | else { | |
1097 | nocsum: /* | |
1098 | * unknown protocol, disable HW csum | |
1099 | * and hope a bad packet is detected | |
1100 | */ | |
1ecc7b7a | 1101 | return TXPKT_L4CSUM_DIS_F; |
fd3a4790 DM |
1102 | } |
1103 | } else { | |
1104 | /* | |
1105 | * this doesn't work with extension headers | |
1106 | */ | |
c50ae55e | 1107 | if (proto == IPPROTO_TCP) |
fd3a4790 | 1108 | csum_type = TX_CSUM_TCPIP6; |
c50ae55e | 1109 | else if (proto == IPPROTO_UDP) |
fd3a4790 DM |
1110 | csum_type = TX_CSUM_UDPIP6; |
1111 | else | |
1112 | goto nocsum; | |
1113 | } | |
1114 | ||
3ccc6cf7 | 1115 | if (likely(csum_type >= TX_CSUM_TCPIP)) { |
c50ae55e GG |
1116 | int eth_hdr_len, l4_len; |
1117 | u64 hdr_len; | |
1118 | ||
1119 | if (inner_hdr_csum) { | |
1120 | /* This allows checksum offload for all encapsulated | |
1121 | * packets like GRE etc.. | |
1122 | */ | |
1123 | l4_len = skb_inner_network_header_len(skb); | |
1124 | eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; | |
1125 | } else { | |
1126 | l4_len = skb_network_header_len(skb); | |
1127 | eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; | |
1128 | } | |
1129 | hdr_len = TXPKT_IPHDR_LEN_V(l4_len); | |
3ccc6cf7 HS |
1130 | |
1131 | if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) | |
1132 | hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); | |
1133 | else | |
1134 | hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); | |
1135 | return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; | |
1136 | } else { | |
fd3a4790 DM |
1137 | int start = skb_transport_offset(skb); |
1138 | ||
1ecc7b7a HS |
1139 | return TXPKT_CSUM_TYPE_V(csum_type) | |
1140 | TXPKT_CSUM_START_V(start) | | |
1141 | TXPKT_CSUM_LOC_V(start + skb->csum_offset); | |
fd3a4790 DM |
1142 | } |
1143 | } | |
1144 | ||
1145 | static void eth_txq_stop(struct sge_eth_txq *q) | |
1146 | { | |
1147 | netif_tx_stop_queue(q->txq); | |
1148 | q->q.stops++; | |
1149 | } | |
1150 | ||
1151 | static inline void txq_advance(struct sge_txq *q, unsigned int n) | |
1152 | { | |
1153 | q->in_use += n; | |
1154 | q->pidx += n; | |
1155 | if (q->pidx >= q->size) | |
1156 | q->pidx -= q->size; | |
1157 | } | |
1158 | ||
84a200b3 VP |
1159 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1160 | static inline int | |
1161 | cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, | |
1162 | const struct port_info *pi, u64 *cntrl) | |
1163 | { | |
1164 | const struct cxgb_fcoe *fcoe = &pi->fcoe; | |
1165 | ||
1166 | if (!(fcoe->flags & CXGB_FCOE_ENABLED)) | |
1167 | return 0; | |
1168 | ||
1169 | if (skb->protocol != htons(ETH_P_FCOE)) | |
1170 | return 0; | |
1171 | ||
1172 | skb_reset_mac_header(skb); | |
1173 | skb->mac_len = sizeof(struct ethhdr); | |
1174 | ||
1175 | skb_set_network_header(skb, skb->mac_len); | |
1176 | skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); | |
1177 | ||
1178 | if (!cxgb_fcoe_sof_eof_supported(adap, skb)) | |
1179 | return -ENOTSUPP; | |
1180 | ||
1181 | /* FC CRC offload */ | |
1ecc7b7a HS |
1182 | *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | |
1183 | TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | | |
1184 | TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | | |
1185 | TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | | |
1186 | TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); | |
84a200b3 VP |
1187 | return 0; |
1188 | } | |
1189 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
1190 | ||
d0a1299c GG |
1191 | /* Returns tunnel type if hardware supports offloading of the same. |
1192 | * It is called only for T5 and onwards. | |
1193 | */ | |
1194 | enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) | |
1195 | { | |
1196 | u8 l4_hdr = 0; | |
1197 | enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; | |
1198 | struct port_info *pi = netdev_priv(skb->dev); | |
1199 | struct adapter *adapter = pi->adapter; | |
1200 | ||
1201 | if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || | |
1202 | skb->inner_protocol != htons(ETH_P_TEB)) | |
1203 | return tnl_type; | |
1204 | ||
1205 | switch (vlan_get_protocol(skb)) { | |
1206 | case htons(ETH_P_IP): | |
1207 | l4_hdr = ip_hdr(skb)->protocol; | |
1208 | break; | |
1209 | case htons(ETH_P_IPV6): | |
1210 | l4_hdr = ipv6_hdr(skb)->nexthdr; | |
1211 | break; | |
1212 | default: | |
1213 | return tnl_type; | |
1214 | } | |
1215 | ||
1216 | switch (l4_hdr) { | |
1217 | case IPPROTO_UDP: | |
1218 | if (adapter->vxlan_port == udp_hdr(skb)->dest) | |
1219 | tnl_type = TX_TNL_TYPE_VXLAN; | |
c746fc0e GG |
1220 | else if (adapter->geneve_port == udp_hdr(skb)->dest) |
1221 | tnl_type = TX_TNL_TYPE_GENEVE; | |
d0a1299c GG |
1222 | break; |
1223 | default: | |
1224 | return tnl_type; | |
1225 | } | |
1226 | ||
1227 | return tnl_type; | |
1228 | } | |
1229 | ||
1230 | static inline void t6_fill_tnl_lso(struct sk_buff *skb, | |
1231 | struct cpl_tx_tnl_lso *tnl_lso, | |
1232 | enum cpl_tx_tnl_lso_type tnl_type) | |
1233 | { | |
1234 | u32 val; | |
1235 | int in_eth_xtra_len; | |
1236 | int l3hdr_len = skb_network_header_len(skb); | |
1237 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | |
1238 | const struct skb_shared_info *ssi = skb_shinfo(skb); | |
1239 | bool v6 = (ip_hdr(skb)->version == 6); | |
1240 | ||
1241 | val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | | |
1242 | CPL_TX_TNL_LSO_FIRST_F | | |
1243 | CPL_TX_TNL_LSO_LAST_F | | |
1244 | (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | | |
1245 | CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | | |
1246 | CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | | |
1247 | (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | | |
1248 | CPL_TX_TNL_LSO_IPLENSETOUT_F | | |
1249 | (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); | |
1250 | tnl_lso->op_to_IpIdSplitOut = htonl(val); | |
1251 | ||
1252 | tnl_lso->IpIdOffsetOut = 0; | |
1253 | ||
1254 | /* Get the tunnel header length */ | |
1255 | val = skb_inner_mac_header(skb) - skb_mac_header(skb); | |
1256 | in_eth_xtra_len = skb_inner_network_header(skb) - | |
1257 | skb_inner_mac_header(skb) - ETH_HLEN; | |
1258 | ||
1259 | switch (tnl_type) { | |
1260 | case TX_TNL_TYPE_VXLAN: | |
c746fc0e | 1261 | case TX_TNL_TYPE_GENEVE: |
d0a1299c GG |
1262 | tnl_lso->UdpLenSetOut_to_TnlHdrLen = |
1263 | htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | | |
1264 | CPL_TX_TNL_LSO_UDPLENSETOUT_F); | |
1265 | break; | |
1266 | default: | |
1267 | tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; | |
1268 | break; | |
1269 | } | |
1270 | ||
1271 | tnl_lso->UdpLenSetOut_to_TnlHdrLen |= | |
1272 | htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | | |
1273 | CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); | |
1274 | ||
1275 | tnl_lso->r1 = 0; | |
1276 | ||
1277 | val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | | |
1278 | CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | | |
1279 | CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | | |
1280 | CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); | |
1281 | tnl_lso->Flow_to_TcpHdrLen = htonl(val); | |
1282 | ||
1283 | tnl_lso->IpIdOffset = htons(0); | |
1284 | ||
1285 | tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); | |
1286 | tnl_lso->TCPSeqOffset = htonl(0); | |
1287 | tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); | |
1288 | } | |
1289 | ||
fd3a4790 | 1290 | /** |
d5fbda61 | 1291 | * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue |
fd3a4790 DM |
1292 | * @skb: the packet |
1293 | * @dev: the egress net device | |
1294 | * | |
1295 | * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. | |
1296 | */ | |
d5fbda61 | 1297 | static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) |
fd3a4790 | 1298 | { |
a4569504 | 1299 | u32 wr_mid, ctrl0, op; |
c50ae55e | 1300 | u64 cntrl, *end, *sgl; |
fd3a4790 DM |
1301 | int qidx, credits; |
1302 | unsigned int flits, ndesc; | |
1303 | struct adapter *adap; | |
1304 | struct sge_eth_txq *q; | |
1305 | const struct port_info *pi; | |
1306 | struct fw_eth_tx_pkt_wr *wr; | |
1307 | struct cpl_tx_pkt_core *cpl; | |
1308 | const struct skb_shared_info *ssi; | |
1309 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | |
0034b298 | 1310 | bool immediate = false; |
637d3e99 | 1311 | int len, max_pkt_len; |
a4569504 | 1312 | bool ptp_enabled = is_ptp_enabled(skb, dev); |
d0a1299c GG |
1313 | unsigned int chip_ver; |
1314 | enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; | |
1315 | ||
84a200b3 VP |
1316 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1317 | int err; | |
1318 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
1319 | |
1320 | /* | |
1321 | * The chip min packet length is 10 octets but play safe and reject | |
1322 | * anything shorter than an Ethernet header. | |
1323 | */ | |
1324 | if (unlikely(skb->len < ETH_HLEN)) { | |
a7525198 | 1325 | out_free: dev_kfree_skb_any(skb); |
fd3a4790 DM |
1326 | return NETDEV_TX_OK; |
1327 | } | |
1328 | ||
637d3e99 HS |
1329 | /* Discard the packet if the length is greater than mtu */ |
1330 | max_pkt_len = ETH_HLEN + dev->mtu; | |
8d09e6b8 | 1331 | if (skb_vlan_tagged(skb)) |
637d3e99 HS |
1332 | max_pkt_len += VLAN_HLEN; |
1333 | if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) | |
1334 | goto out_free; | |
1335 | ||
fd3a4790 DM |
1336 | pi = netdev_priv(dev); |
1337 | adap = pi->adapter; | |
a6ec572b AG |
1338 | ssi = skb_shinfo(skb); |
1339 | #ifdef CONFIG_CHELSIO_IPSEC_INLINE | |
1340 | if (xfrm_offload(skb) && !ssi->gso_size) | |
1341 | return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); | |
1342 | #endif /* CHELSIO_IPSEC_INLINE */ | |
1343 | ||
fd3a4790 | 1344 | qidx = skb_get_queue_mapping(skb); |
a4569504 AG |
1345 | if (ptp_enabled) { |
1346 | spin_lock(&adap->ptp_lock); | |
1347 | if (!(adap->ptp_tx_skb)) { | |
1348 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
1349 | adap->ptp_tx_skb = skb_get(skb); | |
1350 | } else { | |
1351 | spin_unlock(&adap->ptp_lock); | |
1352 | goto out_free; | |
1353 | } | |
1354 | q = &adap->sge.ptptxq; | |
1355 | } else { | |
1356 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | |
1357 | } | |
1358 | skb_tx_timestamp(skb); | |
fd3a4790 | 1359 | |
a6ec572b | 1360 | cxgb4_reclaim_completed_tx(adap, &q->q, true); |
1ecc7b7a | 1361 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; |
84a200b3 VP |
1362 | |
1363 | #ifdef CONFIG_CHELSIO_T4_FCOE | |
1364 | err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); | |
a4569504 AG |
1365 | if (unlikely(err == -ENOTSUPP)) { |
1366 | if (ptp_enabled) | |
1367 | spin_unlock(&adap->ptp_lock); | |
84a200b3 | 1368 | goto out_free; |
a4569504 | 1369 | } |
84a200b3 | 1370 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
fd3a4790 | 1371 | |
d0a1299c GG |
1372 | chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); |
1373 | flits = calc_tx_flits(skb, chip_ver); | |
fd3a4790 DM |
1374 | ndesc = flits_to_desc(flits); |
1375 | credits = txq_avail(&q->q) - ndesc; | |
1376 | ||
1377 | if (unlikely(credits < 0)) { | |
1378 | eth_txq_stop(q); | |
1379 | dev_err(adap->pdev_dev, | |
1380 | "%s: Tx ring %u full while queue awake!\n", | |
1381 | dev->name, qidx); | |
a4569504 AG |
1382 | if (ptp_enabled) |
1383 | spin_unlock(&adap->ptp_lock); | |
fd3a4790 DM |
1384 | return NETDEV_TX_BUSY; |
1385 | } | |
1386 | ||
d0a1299c | 1387 | if (is_eth_imm(skb, chip_ver)) |
0034b298 KS |
1388 | immediate = true; |
1389 | ||
d0a1299c GG |
1390 | if (skb->encapsulation && chip_ver > CHELSIO_T5) |
1391 | tnl_type = cxgb_encap_offload_supported(skb); | |
1392 | ||
0034b298 | 1393 | if (!immediate && |
a6ec572b | 1394 | unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { |
fd3a4790 | 1395 | q->mapping_err++; |
a4569504 AG |
1396 | if (ptp_enabled) |
1397 | spin_unlock(&adap->ptp_lock); | |
fd3a4790 DM |
1398 | goto out_free; |
1399 | } | |
1400 | ||
e2ac9628 | 1401 | wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); |
fd3a4790 DM |
1402 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1403 | eth_txq_stop(q); | |
e2ac9628 | 1404 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; |
fd3a4790 DM |
1405 | } |
1406 | ||
1407 | wr = (void *)&q->q.desc[q->q.pidx]; | |
1408 | wr->equiq_to_len16 = htonl(wr_mid); | |
1409 | wr->r3 = cpu_to_be64(0); | |
1410 | end = (u64 *)wr + flits; | |
1411 | ||
0034b298 | 1412 | len = immediate ? skb->len : 0; |
a6076fcd | 1413 | len += sizeof(*cpl); |
fd3a4790 | 1414 | if (ssi->gso_size) { |
a6076fcd | 1415 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); |
fd3a4790 DM |
1416 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; |
1417 | int l3hdr_len = skb_network_header_len(skb); | |
1418 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | |
d0a1299c | 1419 | struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); |
fd3a4790 | 1420 | |
d0a1299c GG |
1421 | if (tnl_type) |
1422 | len += sizeof(*tnl_lso); | |
7207c0d1 | 1423 | else |
d0a1299c | 1424 | len += sizeof(*lso); |
3ccc6cf7 | 1425 | |
d0a1299c GG |
1426 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | |
1427 | FW_WR_IMMDLEN_V(len)); | |
1428 | if (tnl_type) { | |
1429 | struct iphdr *iph = ip_hdr(skb); | |
3ccc6cf7 | 1430 | |
d0a1299c GG |
1431 | t6_fill_tnl_lso(skb, tnl_lso, tnl_type); |
1432 | cpl = (void *)(tnl_lso + 1); | |
1433 | /* Driver is expected to compute partial checksum that | |
1434 | * does not include the IP Total Length. | |
1435 | */ | |
1436 | if (iph->version == 4) { | |
1437 | iph->check = 0; | |
1438 | iph->tot_len = 0; | |
1439 | iph->check = (u16)(~ip_fast_csum((u8 *)iph, | |
1440 | iph->ihl)); | |
1441 | } | |
1442 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
1443 | cntrl = hwcsum(adap->params.chip, skb); | |
1444 | } else { | |
a6076fcd GG |
1445 | lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | |
1446 | LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | | |
1447 | LSO_IPV6_V(v6) | | |
1448 | LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | | |
1449 | LSO_IPHDR_LEN_V(l3hdr_len / 4) | | |
1450 | LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); | |
1451 | lso->ipid_ofst = htons(0); | |
1452 | lso->mss = htons(ssi->gso_size); | |
1453 | lso->seqno_offset = htonl(0); | |
d0a1299c | 1454 | if (is_t4(adap->params.chip)) |
a6076fcd | 1455 | lso->len = htonl(skb->len); |
d0a1299c | 1456 | else |
a6076fcd | 1457 | lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); |
d0a1299c GG |
1458 | cpl = (void *)(lso + 1); |
1459 | ||
1460 | if (CHELSIO_CHIP_VERSION(adap->params.chip) | |
1461 | <= CHELSIO_T5) | |
1462 | cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); | |
1463 | else | |
1464 | cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); | |
1465 | ||
1466 | cntrl |= TXPKT_CSUM_TYPE_V(v6 ? | |
1467 | TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | | |
1468 | TXPKT_IPHDR_LEN_V(l3hdr_len); | |
1469 | } | |
c50ae55e GG |
1470 | sgl = (u64 *)(cpl + 1); /* sgl start here */ |
1471 | if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { | |
1472 | /* If current position is already at the end of the | |
1473 | * txq, reset the current to point to start of the queue | |
1474 | * and update the end ptr as well. | |
1475 | */ | |
1476 | if (sgl == (u64 *)q->q.stat) { | |
1477 | int left = (u8 *)end - (u8 *)q->q.stat; | |
1478 | ||
1479 | end = (void *)q->q.desc + left; | |
1480 | sgl = (void *)q->q.desc; | |
1481 | } | |
1482 | } | |
fd3a4790 DM |
1483 | q->tso++; |
1484 | q->tx_cso += ssi->gso_segs; | |
1485 | } else { | |
a4569504 AG |
1486 | if (ptp_enabled) |
1487 | op = FW_PTP_TX_PKT_WR; | |
1488 | else | |
1489 | op = FW_ETH_TX_PKT_WR; | |
1490 | wr->op_immdlen = htonl(FW_WR_OP_V(op) | | |
e2ac9628 | 1491 | FW_WR_IMMDLEN_V(len)); |
fd3a4790 | 1492 | cpl = (void *)(wr + 1); |
c50ae55e | 1493 | sgl = (u64 *)(cpl + 1); |
fd3a4790 | 1494 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3ccc6cf7 HS |
1495 | cntrl = hwcsum(adap->params.chip, skb) | |
1496 | TXPKT_IPCSUM_DIS_F; | |
fd3a4790 | 1497 | q->tx_cso++; |
84a200b3 | 1498 | } |
fd3a4790 DM |
1499 | } |
1500 | ||
df8a39de | 1501 | if (skb_vlan_tag_present(skb)) { |
fd3a4790 | 1502 | q->vlan_ins++; |
1ecc7b7a | 1503 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); |
84a200b3 VP |
1504 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1505 | if (skb->protocol == htons(ETH_P_FCOE)) | |
1ecc7b7a | 1506 | cntrl |= TXPKT_VLAN_V( |
84a200b3 VP |
1507 | ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); |
1508 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
1509 | } |
1510 | ||
397665da AB |
1511 | ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | |
1512 | TXPKT_PF_V(adap->pf); | |
a4569504 AG |
1513 | if (ptp_enabled) |
1514 | ctrl0 |= TXPKT_TSTAMP_F; | |
397665da AB |
1515 | #ifdef CONFIG_CHELSIO_T4_DCB |
1516 | if (is_t4(adap->params.chip)) | |
1517 | ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); | |
1518 | else | |
1519 | ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); | |
1520 | #endif | |
1521 | cpl->ctrl0 = htonl(ctrl0); | |
fd3a4790 DM |
1522 | cpl->pack = htons(0); |
1523 | cpl->len = htons(skb->len); | |
1524 | cpl->ctrl1 = cpu_to_be64(cntrl); | |
1525 | ||
0034b298 | 1526 | if (immediate) { |
c50ae55e | 1527 | cxgb4_inline_tx_skb(skb, &q->q, sgl); |
a7525198 | 1528 | dev_consume_skb_any(skb); |
fd3a4790 DM |
1529 | } else { |
1530 | int last_desc; | |
1531 | ||
c50ae55e | 1532 | cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); |
fd3a4790 DM |
1533 | skb_orphan(skb); |
1534 | ||
1535 | last_desc = q->q.pidx + ndesc - 1; | |
1536 | if (last_desc >= q->q.size) | |
1537 | last_desc -= q->q.size; | |
1538 | q->q.sdesc[last_desc].skb = skb; | |
a6076fcd | 1539 | q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; |
fd3a4790 DM |
1540 | } |
1541 | ||
1542 | txq_advance(&q->q, ndesc); | |
1543 | ||
a6ec572b | 1544 | cxgb4_ring_tx_db(adap, &q->q, ndesc); |
a4569504 AG |
1545 | if (ptp_enabled) |
1546 | spin_unlock(&adap->ptp_lock); | |
fd3a4790 DM |
1547 | return NETDEV_TX_OK; |
1548 | } | |
1549 | ||
d5fbda61 AV |
1550 | /* Constants ... */ |
1551 | enum { | |
1552 | /* Egress Queue sizes, producer and consumer indices are all in units | |
1553 | * of Egress Context Units bytes. Note that as far as the hardware is | |
1554 | * concerned, the free list is an Egress Queue (the host produces free | |
1555 | * buffers which the hardware consumes) and free list entries are | |
1556 | * 64-bit PCI DMA addresses. | |
1557 | */ | |
1558 | EQ_UNIT = SGE_EQ_IDXSIZE, | |
1559 | FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), | |
1560 | TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), | |
1561 | ||
1562 | T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + | |
1563 | sizeof(struct cpl_tx_pkt_lso_core) + | |
1564 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), | |
1565 | }; | |
1566 | ||
1567 | /** | |
1568 | * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? | |
1569 | * @skb: the packet | |
1570 | * | |
1571 | * Returns whether an Ethernet packet is small enough to fit completely as | |
1572 | * immediate data. | |
1573 | */ | |
1574 | static inline int t4vf_is_eth_imm(const struct sk_buff *skb) | |
1575 | { | |
1576 | /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request | |
1577 | * which does not accommodate immediate data. We could dike out all | |
1578 | * of the support code for immediate data but that would tie our hands | |
1579 | * too much if we ever want to enhace the firmware. It would also | |
1580 | * create more differences between the PF and VF Drivers. | |
1581 | */ | |
1582 | return false; | |
1583 | } | |
1584 | ||
1585 | /** | |
1586 | * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR | |
1587 | * @skb: the packet | |
1588 | * | |
1589 | * Returns the number of flits needed for a TX Work Request for the | |
1590 | * given Ethernet packet, including the needed WR and CPL headers. | |
1591 | */ | |
1592 | static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) | |
1593 | { | |
1594 | unsigned int flits; | |
1595 | ||
1596 | /* If the skb is small enough, we can pump it out as a work request | |
1597 | * with only immediate data. In that case we just have to have the | |
1598 | * TX Packet header plus the skb data in the Work Request. | |
1599 | */ | |
1600 | if (t4vf_is_eth_imm(skb)) | |
1601 | return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), | |
1602 | sizeof(__be64)); | |
1603 | ||
1604 | /* Otherwise, we're going to have to construct a Scatter gather list | |
1605 | * of the skb body and fragments. We also include the flits necessary | |
1606 | * for the TX Packet Work Request and CPL. We always have a firmware | |
1607 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and | |
1608 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL | |
1609 | * message or, if we're doing a Large Send Offload, an LSO CPL message | |
1610 | * with an embedded TX Packet Write CPL message. | |
1611 | */ | |
1612 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); | |
1613 | if (skb_shinfo(skb)->gso_size) | |
1614 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + | |
1615 | sizeof(struct cpl_tx_pkt_lso_core) + | |
1616 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
1617 | else | |
1618 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + | |
1619 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
1620 | return flits; | |
1621 | } | |
1622 | ||
1623 | /** | |
1624 | * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue | |
1625 | * @skb: the packet | |
1626 | * @dev: the egress net device | |
1627 | * | |
1628 | * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. | |
1629 | */ | |
1630 | static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, | |
1631 | struct net_device *dev) | |
1632 | { | |
1633 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | |
1634 | const struct skb_shared_info *ssi; | |
1635 | struct fw_eth_tx_pkt_vm_wr *wr; | |
1636 | int qidx, credits, max_pkt_len; | |
1637 | struct cpl_tx_pkt_core *cpl; | |
1638 | const struct port_info *pi; | |
1639 | unsigned int flits, ndesc; | |
1640 | struct sge_eth_txq *txq; | |
1641 | struct adapter *adapter; | |
1642 | u64 cntrl, *end; | |
1643 | u32 wr_mid; | |
1644 | const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) + | |
1645 | sizeof(wr->ethmacsrc) + | |
1646 | sizeof(wr->ethtype) + | |
1647 | sizeof(wr->vlantci); | |
1648 | ||
1649 | /* The chip minimum packet length is 10 octets but the firmware | |
1650 | * command that we are using requires that we copy the Ethernet header | |
1651 | * (including the VLAN tag) into the header so we reject anything | |
1652 | * smaller than that ... | |
1653 | */ | |
1654 | if (unlikely(skb->len < fw_hdr_copy_len)) | |
1655 | goto out_free; | |
1656 | ||
1657 | /* Discard the packet if the length is greater than mtu */ | |
1658 | max_pkt_len = ETH_HLEN + dev->mtu; | |
1659 | if (skb_vlan_tag_present(skb)) | |
1660 | max_pkt_len += VLAN_HLEN; | |
1661 | if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) | |
1662 | goto out_free; | |
1663 | ||
1664 | /* Figure out which TX Queue we're going to use. */ | |
1665 | pi = netdev_priv(dev); | |
1666 | adapter = pi->adapter; | |
1667 | qidx = skb_get_queue_mapping(skb); | |
1668 | WARN_ON(qidx >= pi->nqsets); | |
1669 | txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; | |
1670 | ||
1671 | /* Take this opportunity to reclaim any TX Descriptors whose DMA | |
1672 | * transfers have completed. | |
1673 | */ | |
1674 | cxgb4_reclaim_completed_tx(adapter, &txq->q, true); | |
1675 | ||
1676 | /* Calculate the number of flits and TX Descriptors we're going to | |
1677 | * need along with how many TX Descriptors will be left over after | |
1678 | * we inject our Work Request. | |
1679 | */ | |
1680 | flits = t4vf_calc_tx_flits(skb); | |
1681 | ndesc = flits_to_desc(flits); | |
1682 | credits = txq_avail(&txq->q) - ndesc; | |
1683 | ||
1684 | if (unlikely(credits < 0)) { | |
1685 | /* Not enough room for this packet's Work Request. Stop the | |
1686 | * TX Queue and return a "busy" condition. The queue will get | |
1687 | * started later on when the firmware informs us that space | |
1688 | * has opened up. | |
1689 | */ | |
1690 | eth_txq_stop(txq); | |
1691 | dev_err(adapter->pdev_dev, | |
1692 | "%s: TX ring %u full while queue awake!\n", | |
1693 | dev->name, qidx); | |
1694 | return NETDEV_TX_BUSY; | |
1695 | } | |
1696 | ||
1697 | if (!t4vf_is_eth_imm(skb) && | |
1698 | unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) { | |
1699 | /* We need to map the skb into PCI DMA space (because it can't | |
1700 | * be in-lined directly into the Work Request) and the mapping | |
1701 | * operation failed. Record the error and drop the packet. | |
1702 | */ | |
1703 | txq->mapping_err++; | |
1704 | goto out_free; | |
1705 | } | |
1706 | ||
1707 | wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); | |
1708 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | |
1709 | /* After we're done injecting the Work Request for this | |
1710 | * packet, we'll be below our "stop threshold" so stop the TX | |
1711 | * Queue now and schedule a request for an SGE Egress Queue | |
1712 | * Update message. The queue will get started later on when | |
1713 | * the firmware processes this Work Request and sends us an | |
1714 | * Egress Queue Status Update message indicating that space | |
1715 | * has opened up. | |
1716 | */ | |
1717 | eth_txq_stop(txq); | |
1718 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; | |
1719 | } | |
1720 | ||
1721 | /* Start filling in our Work Request. Note that we do _not_ handle | |
1722 | * the WR Header wrapping around the TX Descriptor Ring. If our | |
1723 | * maximum header size ever exceeds one TX Descriptor, we'll need to | |
1724 | * do something else here. | |
1725 | */ | |
1726 | WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); | |
1727 | wr = (void *)&txq->q.desc[txq->q.pidx]; | |
1728 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); | |
1729 | wr->r3[0] = cpu_to_be32(0); | |
1730 | wr->r3[1] = cpu_to_be32(0); | |
1731 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); | |
1732 | end = (u64 *)wr + flits; | |
1733 | ||
1734 | /* If this is a Large Send Offload packet we'll put in an LSO CPL | |
1735 | * message with an encapsulated TX Packet CPL message. Otherwise we | |
1736 | * just use a TX Packet CPL message. | |
1737 | */ | |
1738 | ssi = skb_shinfo(skb); | |
1739 | if (ssi->gso_size) { | |
1740 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); | |
1741 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; | |
1742 | int l3hdr_len = skb_network_header_len(skb); | |
1743 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | |
1744 | ||
1745 | wr->op_immdlen = | |
1746 | cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | | |
1747 | FW_WR_IMMDLEN_V(sizeof(*lso) + | |
1748 | sizeof(*cpl))); | |
1749 | /* Fill in the LSO CPL message. */ | |
1750 | lso->lso_ctrl = | |
1751 | cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | | |
1752 | LSO_FIRST_SLICE_F | | |
1753 | LSO_LAST_SLICE_F | | |
1754 | LSO_IPV6_V(v6) | | |
1755 | LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | | |
1756 | LSO_IPHDR_LEN_V(l3hdr_len / 4) | | |
1757 | LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); | |
1758 | lso->ipid_ofst = cpu_to_be16(0); | |
1759 | lso->mss = cpu_to_be16(ssi->gso_size); | |
1760 | lso->seqno_offset = cpu_to_be32(0); | |
1761 | if (is_t4(adapter->params.chip)) | |
1762 | lso->len = cpu_to_be32(skb->len); | |
1763 | else | |
1764 | lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); | |
1765 | ||
1766 | /* Set up TX Packet CPL pointer, control word and perform | |
1767 | * accounting. | |
1768 | */ | |
1769 | cpl = (void *)(lso + 1); | |
1770 | ||
1771 | if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) | |
1772 | cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); | |
1773 | else | |
1774 | cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); | |
1775 | ||
1776 | cntrl |= TXPKT_CSUM_TYPE_V(v6 ? | |
1777 | TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | | |
1778 | TXPKT_IPHDR_LEN_V(l3hdr_len); | |
1779 | txq->tso++; | |
1780 | txq->tx_cso += ssi->gso_segs; | |
1781 | } else { | |
1782 | int len; | |
1783 | ||
1784 | len = (t4vf_is_eth_imm(skb) | |
1785 | ? skb->len + sizeof(*cpl) | |
1786 | : sizeof(*cpl)); | |
1787 | wr->op_immdlen = | |
1788 | cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | | |
1789 | FW_WR_IMMDLEN_V(len)); | |
1790 | ||
1791 | /* Set up TX Packet CPL pointer, control word and perform | |
1792 | * accounting. | |
1793 | */ | |
1794 | cpl = (void *)(wr + 1); | |
1795 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1796 | cntrl = hwcsum(adapter->params.chip, skb) | | |
1797 | TXPKT_IPCSUM_DIS_F; | |
1798 | txq->tx_cso++; | |
1799 | } else { | |
1800 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; | |
1801 | } | |
1802 | } | |
1803 | ||
1804 | /* If there's a VLAN tag present, add that to the list of things to | |
1805 | * do in this Work Request. | |
1806 | */ | |
1807 | if (skb_vlan_tag_present(skb)) { | |
1808 | txq->vlan_ins++; | |
1809 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); | |
1810 | } | |
1811 | ||
1812 | /* Fill in the TX Packet CPL message header. */ | |
1813 | cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | | |
1814 | TXPKT_INTF_V(pi->port_id) | | |
1815 | TXPKT_PF_V(0)); | |
1816 | cpl->pack = cpu_to_be16(0); | |
1817 | cpl->len = cpu_to_be16(skb->len); | |
1818 | cpl->ctrl1 = cpu_to_be64(cntrl); | |
1819 | ||
1820 | /* Fill in the body of the TX Packet CPL message with either in-lined | |
1821 | * data or a Scatter/Gather List. | |
1822 | */ | |
1823 | if (t4vf_is_eth_imm(skb)) { | |
1824 | /* In-line the packet's data and free the skb since we don't | |
1825 | * need it any longer. | |
1826 | */ | |
1827 | cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); | |
1828 | dev_consume_skb_any(skb); | |
1829 | } else { | |
1830 | /* Write the skb's Scatter/Gather list into the TX Packet CPL | |
1831 | * message and retain a pointer to the skb so we can free it | |
1832 | * later when its DMA completes. (We store the skb pointer | |
1833 | * in the Software Descriptor corresponding to the last TX | |
1834 | * Descriptor used by the Work Request.) | |
1835 | * | |
1836 | * The retained skb will be freed when the corresponding TX | |
1837 | * Descriptors are reclaimed after their DMAs complete. | |
1838 | * However, this could take quite a while since, in general, | |
1839 | * the hardware is set up to be lazy about sending DMA | |
1840 | * completion notifications to us and we mostly perform TX | |
1841 | * reclaims in the transmit routine. | |
1842 | * | |
1843 | * This is good for performamce but means that we rely on new | |
1844 | * TX packets arriving to run the destructors of completed | |
1845 | * packets, which open up space in their sockets' send queues. | |
1846 | * Sometimes we do not get such new packets causing TX to | |
1847 | * stall. A single UDP transmitter is a good example of this | |
1848 | * situation. We have a clean up timer that periodically | |
1849 | * reclaims completed packets but it doesn't run often enough | |
1850 | * (nor do we want it to) to prevent lengthy stalls. A | |
1851 | * solution to this problem is to run the destructor early, | |
1852 | * after the packet is queued but before it's DMAd. A con is | |
1853 | * that we lie to socket memory accounting, but the amount of | |
1854 | * extra memory is reasonable (limited by the number of TX | |
1855 | * descriptors), the packets do actually get freed quickly by | |
1856 | * new packets almost always, and for protocols like TCP that | |
1857 | * wait for acks to really free up the data the extra memory | |
1858 | * is even less. On the positive side we run the destructors | |
1859 | * on the sending CPU rather than on a potentially different | |
1860 | * completing CPU, usually a good thing. | |
1861 | * | |
1862 | * Run the destructor before telling the DMA engine about the | |
1863 | * packet to make sure it doesn't complete and get freed | |
1864 | * prematurely. | |
1865 | */ | |
1866 | struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); | |
1867 | struct sge_txq *tq = &txq->q; | |
1868 | int last_desc; | |
1869 | ||
1870 | /* If the Work Request header was an exact multiple of our TX | |
1871 | * Descriptor length, then it's possible that the starting SGL | |
1872 | * pointer lines up exactly with the end of our TX Descriptor | |
1873 | * ring. If that's the case, wrap around to the beginning | |
1874 | * here ... | |
1875 | */ | |
1876 | if (unlikely((void *)sgl == (void *)tq->stat)) { | |
1877 | sgl = (void *)tq->desc; | |
1878 | end = (void *)((void *)tq->desc + | |
1879 | ((void *)end - (void *)tq->stat)); | |
1880 | } | |
1881 | ||
1882 | cxgb4_write_sgl(skb, tq, sgl, end, 0, addr); | |
1883 | skb_orphan(skb); | |
1884 | ||
1885 | last_desc = tq->pidx + ndesc - 1; | |
1886 | if (last_desc >= tq->size) | |
1887 | last_desc -= tq->size; | |
1888 | tq->sdesc[last_desc].skb = skb; | |
1889 | tq->sdesc[last_desc].sgl = sgl; | |
1890 | } | |
1891 | ||
1892 | /* Advance our internal TX Queue state, tell the hardware about | |
1893 | * the new TX descriptors and return success. | |
1894 | */ | |
1895 | txq_advance(&txq->q, ndesc); | |
1896 | ||
1897 | cxgb4_ring_tx_db(adapter, &txq->q, ndesc); | |
1898 | return NETDEV_TX_OK; | |
1899 | ||
1900 | out_free: | |
1901 | /* An error of some sort happened. Free the TX skb and tell the | |
1902 | * OS that we've "dealt" with the packet ... | |
1903 | */ | |
1904 | dev_kfree_skb_any(skb); | |
1905 | return NETDEV_TX_OK; | |
1906 | } | |
1907 | ||
1908 | netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1909 | { | |
1910 | struct port_info *pi = netdev_priv(dev); | |
1911 | ||
1912 | if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) | |
1913 | return cxgb4_vf_eth_xmit(skb, dev); | |
1914 | ||
1915 | return cxgb4_eth_xmit(skb, dev); | |
1916 | } | |
1917 | ||
fd3a4790 DM |
1918 | /** |
1919 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | |
1920 | * @q: the SGE control Tx queue | |
1921 | * | |
a6ec572b AG |
1922 | * This is a variant of cxgb4_reclaim_completed_tx() that is used |
1923 | * for Tx queues that send only immediate data (presently just | |
1924 | * the control queues) and thus do not have any sk_buffs to release. | |
fd3a4790 DM |
1925 | */ |
1926 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | |
1927 | { | |
6aa7de05 | 1928 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
fd3a4790 DM |
1929 | int reclaim = hw_cidx - q->cidx; |
1930 | ||
1931 | if (reclaim < 0) | |
1932 | reclaim += q->size; | |
1933 | ||
1934 | q->in_use -= reclaim; | |
1935 | q->cidx = hw_cidx; | |
1936 | } | |
1937 | ||
1938 | /** | |
1939 | * is_imm - check whether a packet can be sent as immediate data | |
1940 | * @skb: the packet | |
1941 | * | |
1942 | * Returns true if a packet can be sent as a WR with immediate data. | |
1943 | */ | |
1944 | static inline int is_imm(const struct sk_buff *skb) | |
1945 | { | |
1946 | return skb->len <= MAX_CTRL_WR_LEN; | |
1947 | } | |
1948 | ||
1949 | /** | |
1950 | * ctrlq_check_stop - check if a control queue is full and should stop | |
1951 | * @q: the queue | |
1952 | * @wr: most recent WR written to the queue | |
1953 | * | |
1954 | * Check if a control queue has become full and should be stopped. | |
1955 | * We clean up control queue descriptors very lazily, only when we are out. | |
1956 | * If the queue is still full after reclaiming any completed descriptors | |
1957 | * we suspend it and have the last WR wake it up. | |
1958 | */ | |
1959 | static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) | |
1960 | { | |
1961 | reclaim_completed_tx_imm(&q->q); | |
1962 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | |
e2ac9628 | 1963 | wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); |
fd3a4790 DM |
1964 | q->q.stops++; |
1965 | q->full = 1; | |
1966 | } | |
1967 | } | |
1968 | ||
1969 | /** | |
1970 | * ctrl_xmit - send a packet through an SGE control Tx queue | |
1971 | * @q: the control queue | |
1972 | * @skb: the packet | |
1973 | * | |
1974 | * Send a packet through an SGE control Tx queue. Packets sent through | |
1975 | * a control queue must fit entirely as immediate data. | |
1976 | */ | |
1977 | static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) | |
1978 | { | |
1979 | unsigned int ndesc; | |
1980 | struct fw_wr_hdr *wr; | |
1981 | ||
1982 | if (unlikely(!is_imm(skb))) { | |
1983 | WARN_ON(1); | |
1984 | dev_kfree_skb(skb); | |
1985 | return NET_XMIT_DROP; | |
1986 | } | |
1987 | ||
1988 | ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); | |
1989 | spin_lock(&q->sendq.lock); | |
1990 | ||
1991 | if (unlikely(q->full)) { | |
1992 | skb->priority = ndesc; /* save for restart */ | |
1993 | __skb_queue_tail(&q->sendq, skb); | |
1994 | spin_unlock(&q->sendq.lock); | |
1995 | return NET_XMIT_CN; | |
1996 | } | |
1997 | ||
1998 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | |
a6ec572b | 1999 | cxgb4_inline_tx_skb(skb, &q->q, wr); |
fd3a4790 DM |
2000 | |
2001 | txq_advance(&q->q, ndesc); | |
2002 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) | |
2003 | ctrlq_check_stop(q, wr); | |
2004 | ||
a6ec572b | 2005 | cxgb4_ring_tx_db(q->adap, &q->q, ndesc); |
fd3a4790 DM |
2006 | spin_unlock(&q->sendq.lock); |
2007 | ||
2008 | kfree_skb(skb); | |
2009 | return NET_XMIT_SUCCESS; | |
2010 | } | |
2011 | ||
2012 | /** | |
2013 | * restart_ctrlq - restart a suspended control queue | |
2014 | * @data: the control queue to restart | |
2015 | * | |
2016 | * Resumes transmission on a suspended Tx control queue. | |
2017 | */ | |
2018 | static void restart_ctrlq(unsigned long data) | |
2019 | { | |
2020 | struct sk_buff *skb; | |
2021 | unsigned int written = 0; | |
2022 | struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; | |
2023 | ||
2024 | spin_lock(&q->sendq.lock); | |
2025 | reclaim_completed_tx_imm(&q->q); | |
2026 | BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ | |
2027 | ||
2028 | while ((skb = __skb_dequeue(&q->sendq)) != NULL) { | |
2029 | struct fw_wr_hdr *wr; | |
2030 | unsigned int ndesc = skb->priority; /* previously saved */ | |
2031 | ||
a4011fd4 HS |
2032 | written += ndesc; |
2033 | /* Write descriptors and free skbs outside the lock to limit | |
fd3a4790 DM |
2034 | * wait times. q->full is still set so new skbs will be queued. |
2035 | */ | |
a4011fd4 HS |
2036 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; |
2037 | txq_advance(&q->q, ndesc); | |
fd3a4790 DM |
2038 | spin_unlock(&q->sendq.lock); |
2039 | ||
a6ec572b | 2040 | cxgb4_inline_tx_skb(skb, &q->q, wr); |
fd3a4790 DM |
2041 | kfree_skb(skb); |
2042 | ||
fd3a4790 DM |
2043 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { |
2044 | unsigned long old = q->q.stops; | |
2045 | ||
2046 | ctrlq_check_stop(q, wr); | |
2047 | if (q->q.stops != old) { /* suspended anew */ | |
2048 | spin_lock(&q->sendq.lock); | |
2049 | goto ringdb; | |
2050 | } | |
2051 | } | |
2052 | if (written > 16) { | |
a6ec572b | 2053 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
fd3a4790 DM |
2054 | written = 0; |
2055 | } | |
2056 | spin_lock(&q->sendq.lock); | |
2057 | } | |
2058 | q->full = 0; | |
a6ec572b AG |
2059 | ringdb: |
2060 | if (written) | |
2061 | cxgb4_ring_tx_db(q->adap, &q->q, written); | |
fd3a4790 DM |
2062 | spin_unlock(&q->sendq.lock); |
2063 | } | |
2064 | ||
2065 | /** | |
2066 | * t4_mgmt_tx - send a management message | |
2067 | * @adap: the adapter | |
2068 | * @skb: the packet containing the management message | |
2069 | * | |
2070 | * Send a management message through control queue 0. | |
2071 | */ | |
2072 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | |
2073 | { | |
2074 | int ret; | |
2075 | ||
2076 | local_bh_disable(); | |
2077 | ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); | |
2078 | local_bh_enable(); | |
2079 | return ret; | |
2080 | } | |
2081 | ||
2082 | /** | |
2083 | * is_ofld_imm - check whether a packet can be sent as immediate data | |
2084 | * @skb: the packet | |
2085 | * | |
2086 | * Returns true if a packet can be sent as an offload WR with immediate | |
2087 | * data. We currently use the same limit as for Ethernet packets. | |
2088 | */ | |
2089 | static inline int is_ofld_imm(const struct sk_buff *skb) | |
2090 | { | |
2f47d580 HJ |
2091 | struct work_request_hdr *req = (struct work_request_hdr *)skb->data; |
2092 | unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); | |
2093 | ||
2094 | if (opcode == FW_CRYPTO_LOOKASIDE_WR) | |
2095 | return skb->len <= SGE_MAX_WR_LEN; | |
2096 | else | |
2097 | return skb->len <= MAX_IMM_TX_PKT_LEN; | |
fd3a4790 DM |
2098 | } |
2099 | ||
2100 | /** | |
2101 | * calc_tx_flits_ofld - calculate # of flits for an offload packet | |
2102 | * @skb: the packet | |
2103 | * | |
2104 | * Returns the number of flits needed for the given offload packet. | |
2105 | * These packets are already fully constructed and no additional headers | |
2106 | * will be added. | |
2107 | */ | |
2108 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | |
2109 | { | |
2110 | unsigned int flits, cnt; | |
2111 | ||
2112 | if (is_ofld_imm(skb)) | |
2113 | return DIV_ROUND_UP(skb->len, 8); | |
2114 | ||
2115 | flits = skb_transport_offset(skb) / 8U; /* headers */ | |
2116 | cnt = skb_shinfo(skb)->nr_frags; | |
15dd16c2 | 2117 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) |
fd3a4790 DM |
2118 | cnt++; |
2119 | return flits + sgl_len(cnt); | |
2120 | } | |
2121 | ||
2122 | /** | |
2123 | * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion | |
2124 | * @adap: the adapter | |
2125 | * @q: the queue to stop | |
2126 | * | |
2127 | * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting | |
2128 | * inability to map packets. A periodic timer attempts to restart | |
2129 | * queues so marked. | |
2130 | */ | |
ab677ff4 | 2131 | static void txq_stop_maperr(struct sge_uld_txq *q) |
fd3a4790 DM |
2132 | { |
2133 | q->mapping_err++; | |
2134 | q->q.stops++; | |
e46dab4d DM |
2135 | set_bit(q->q.cntxt_id - q->adap->sge.egr_start, |
2136 | q->adap->sge.txq_maperr); | |
fd3a4790 DM |
2137 | } |
2138 | ||
2139 | /** | |
2140 | * ofldtxq_stop - stop an offload Tx queue that has become full | |
2141 | * @q: the queue to stop | |
e383f248 | 2142 | * @wr: the Work Request causing the queue to become full |
fd3a4790 DM |
2143 | * |
2144 | * Stops an offload Tx queue that has become full and modifies the packet | |
2145 | * being written to request a wakeup. | |
2146 | */ | |
e383f248 | 2147 | static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) |
fd3a4790 | 2148 | { |
e2ac9628 | 2149 | wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); |
fd3a4790 DM |
2150 | q->q.stops++; |
2151 | q->full = 1; | |
2152 | } | |
2153 | ||
2154 | /** | |
126fca64 | 2155 | * service_ofldq - service/restart a suspended offload queue |
fd3a4790 DM |
2156 | * @q: the offload queue |
2157 | * | |
126fca64 HS |
2158 | * Services an offload Tx queue by moving packets from its Pending Send |
2159 | * Queue to the Hardware TX ring. The function starts and ends with the | |
2160 | * Send Queue locked, but drops the lock while putting the skb at the | |
2161 | * head of the Send Queue onto the Hardware TX Ring. Dropping the lock | |
2162 | * allows more skbs to be added to the Send Queue by other threads. | |
2163 | * The packet being processed at the head of the Pending Send Queue is | |
2164 | * left on the queue in case we experience DMA Mapping errors, etc. | |
2165 | * and need to give up and restart later. | |
2166 | * | |
2167 | * service_ofldq() can be thought of as a task which opportunistically | |
2168 | * uses other threads execution contexts. We use the Offload Queue | |
2169 | * boolean "service_ofldq_running" to make sure that only one instance | |
2170 | * is ever running at a time ... | |
fd3a4790 | 2171 | */ |
ab677ff4 | 2172 | static void service_ofldq(struct sge_uld_txq *q) |
fd3a4790 | 2173 | { |
8d0557d2 | 2174 | u64 *pos, *before, *end; |
fd3a4790 DM |
2175 | int credits; |
2176 | struct sk_buff *skb; | |
8d0557d2 HS |
2177 | struct sge_txq *txq; |
2178 | unsigned int left; | |
fd3a4790 DM |
2179 | unsigned int written = 0; |
2180 | unsigned int flits, ndesc; | |
2181 | ||
126fca64 HS |
2182 | /* If another thread is currently in service_ofldq() processing the |
2183 | * Pending Send Queue then there's nothing to do. Otherwise, flag | |
2184 | * that we're doing the work and continue. Examining/modifying | |
2185 | * the Offload Queue boolean "service_ofldq_running" must be done | |
2186 | * while holding the Pending Send Queue Lock. | |
2187 | */ | |
2188 | if (q->service_ofldq_running) | |
2189 | return; | |
2190 | q->service_ofldq_running = true; | |
2191 | ||
fd3a4790 | 2192 | while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { |
126fca64 HS |
2193 | /* We drop the lock while we're working with the skb at the |
2194 | * head of the Pending Send Queue. This allows more skbs to | |
2195 | * be added to the Pending Send Queue while we're working on | |
2196 | * this one. We don't need to lock to guard the TX Ring | |
2197 | * updates because only one thread of execution is ever | |
2198 | * allowed into service_ofldq() at a time. | |
fd3a4790 DM |
2199 | */ |
2200 | spin_unlock(&q->sendq.lock); | |
2201 | ||
a6ec572b | 2202 | cxgb4_reclaim_completed_tx(q->adap, &q->q, false); |
fd3a4790 DM |
2203 | |
2204 | flits = skb->priority; /* previously saved */ | |
2205 | ndesc = flits_to_desc(flits); | |
2206 | credits = txq_avail(&q->q) - ndesc; | |
2207 | BUG_ON(credits < 0); | |
2208 | if (unlikely(credits < TXQ_STOP_THRES)) | |
e383f248 | 2209 | ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); |
fd3a4790 DM |
2210 | |
2211 | pos = (u64 *)&q->q.desc[q->q.pidx]; | |
2212 | if (is_ofld_imm(skb)) | |
a6ec572b AG |
2213 | cxgb4_inline_tx_skb(skb, &q->q, pos); |
2214 | else if (cxgb4_map_skb(q->adap->pdev_dev, skb, | |
2215 | (dma_addr_t *)skb->head)) { | |
fd3a4790 DM |
2216 | txq_stop_maperr(q); |
2217 | spin_lock(&q->sendq.lock); | |
2218 | break; | |
2219 | } else { | |
2220 | int last_desc, hdr_len = skb_transport_offset(skb); | |
2221 | ||
8d0557d2 HS |
2222 | /* The WR headers may not fit within one descriptor. |
2223 | * So we need to deal with wrap-around here. | |
2224 | */ | |
2225 | before = (u64 *)pos; | |
2226 | end = (u64 *)pos + flits; | |
2227 | txq = &q->q; | |
2228 | pos = (void *)inline_tx_skb_header(skb, &q->q, | |
2229 | (void *)pos, | |
2230 | hdr_len); | |
2231 | if (before > (u64 *)pos) { | |
2232 | left = (u8 *)end - (u8 *)txq->stat; | |
2233 | end = (void *)txq->desc + left; | |
2234 | } | |
2235 | ||
2236 | /* If current position is already at the end of the | |
2237 | * ofld queue, reset the current to point to | |
2238 | * start of the queue and update the end ptr as well. | |
2239 | */ | |
2240 | if (pos == (u64 *)txq->stat) { | |
2241 | left = (u8 *)end - (u8 *)txq->stat; | |
2242 | end = (void *)txq->desc + left; | |
2243 | pos = (void *)txq->desc; | |
2244 | } | |
2245 | ||
a6ec572b AG |
2246 | cxgb4_write_sgl(skb, &q->q, (void *)pos, |
2247 | end, hdr_len, | |
2248 | (dma_addr_t *)skb->head); | |
fd3a4790 DM |
2249 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
2250 | skb->dev = q->adap->port[0]; | |
2251 | skb->destructor = deferred_unmap_destructor; | |
2252 | #endif | |
2253 | last_desc = q->q.pidx + ndesc - 1; | |
2254 | if (last_desc >= q->q.size) | |
2255 | last_desc -= q->q.size; | |
2256 | q->q.sdesc[last_desc].skb = skb; | |
2257 | } | |
2258 | ||
2259 | txq_advance(&q->q, ndesc); | |
2260 | written += ndesc; | |
2261 | if (unlikely(written > 32)) { | |
a6ec572b | 2262 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
fd3a4790 DM |
2263 | written = 0; |
2264 | } | |
2265 | ||
126fca64 HS |
2266 | /* Reacquire the Pending Send Queue Lock so we can unlink the |
2267 | * skb we've just successfully transferred to the TX Ring and | |
2268 | * loop for the next skb which may be at the head of the | |
2269 | * Pending Send Queue. | |
2270 | */ | |
fd3a4790 DM |
2271 | spin_lock(&q->sendq.lock); |
2272 | __skb_unlink(skb, &q->sendq); | |
2273 | if (is_ofld_imm(skb)) | |
2274 | kfree_skb(skb); | |
2275 | } | |
2276 | if (likely(written)) | |
a6ec572b | 2277 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
126fca64 HS |
2278 | |
2279 | /*Indicate that no thread is processing the Pending Send Queue | |
2280 | * currently. | |
2281 | */ | |
2282 | q->service_ofldq_running = false; | |
fd3a4790 DM |
2283 | } |
2284 | ||
2285 | /** | |
2286 | * ofld_xmit - send a packet through an offload queue | |
2287 | * @q: the Tx offload queue | |
2288 | * @skb: the packet | |
2289 | * | |
2290 | * Send an offload packet through an SGE offload queue. | |
2291 | */ | |
ab677ff4 | 2292 | static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) |
fd3a4790 DM |
2293 | { |
2294 | skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ | |
2295 | spin_lock(&q->sendq.lock); | |
126fca64 HS |
2296 | |
2297 | /* Queue the new skb onto the Offload Queue's Pending Send Queue. If | |
2298 | * that results in this new skb being the only one on the queue, start | |
2299 | * servicing it. If there are other skbs already on the list, then | |
2300 | * either the queue is currently being processed or it's been stopped | |
2301 | * for some reason and it'll be restarted at a later time. Restart | |
2302 | * paths are triggered by events like experiencing a DMA Mapping Error | |
2303 | * or filling the Hardware TX Ring. | |
2304 | */ | |
fd3a4790 DM |
2305 | __skb_queue_tail(&q->sendq, skb); |
2306 | if (q->sendq.qlen == 1) | |
2307 | service_ofldq(q); | |
126fca64 | 2308 | |
fd3a4790 DM |
2309 | spin_unlock(&q->sendq.lock); |
2310 | return NET_XMIT_SUCCESS; | |
2311 | } | |
2312 | ||
2313 | /** | |
2314 | * restart_ofldq - restart a suspended offload queue | |
2315 | * @data: the offload queue to restart | |
2316 | * | |
2317 | * Resumes transmission on a suspended Tx offload queue. | |
2318 | */ | |
2319 | static void restart_ofldq(unsigned long data) | |
2320 | { | |
ab677ff4 | 2321 | struct sge_uld_txq *q = (struct sge_uld_txq *)data; |
fd3a4790 DM |
2322 | |
2323 | spin_lock(&q->sendq.lock); | |
2324 | q->full = 0; /* the queue actually is completely empty now */ | |
2325 | service_ofldq(q); | |
2326 | spin_unlock(&q->sendq.lock); | |
2327 | } | |
2328 | ||
2329 | /** | |
2330 | * skb_txq - return the Tx queue an offload packet should use | |
2331 | * @skb: the packet | |
2332 | * | |
2333 | * Returns the Tx queue an offload packet should use as indicated by bits | |
2334 | * 1-15 in the packet's queue_mapping. | |
2335 | */ | |
2336 | static inline unsigned int skb_txq(const struct sk_buff *skb) | |
2337 | { | |
2338 | return skb->queue_mapping >> 1; | |
2339 | } | |
2340 | ||
2341 | /** | |
2342 | * is_ctrl_pkt - return whether an offload packet is a control packet | |
2343 | * @skb: the packet | |
2344 | * | |
2345 | * Returns whether an offload packet should use an OFLD or a CTRL | |
2346 | * Tx queue as indicated by bit 0 in the packet's queue_mapping. | |
2347 | */ | |
2348 | static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) | |
2349 | { | |
2350 | return skb->queue_mapping & 1; | |
2351 | } | |
2352 | ||
ab677ff4 HS |
2353 | static inline int uld_send(struct adapter *adap, struct sk_buff *skb, |
2354 | unsigned int tx_uld_type) | |
fd3a4790 | 2355 | { |
ab677ff4 HS |
2356 | struct sge_uld_txq_info *txq_info; |
2357 | struct sge_uld_txq *txq; | |
fd3a4790 DM |
2358 | unsigned int idx = skb_txq(skb); |
2359 | ||
4fe44dd7 KS |
2360 | if (unlikely(is_ctrl_pkt(skb))) { |
2361 | /* Single ctrl queue is a requirement for LE workaround path */ | |
2362 | if (adap->tids.nsftids) | |
2363 | idx = 0; | |
fd3a4790 | 2364 | return ctrl_xmit(&adap->sge.ctrlq[idx], skb); |
4fe44dd7 | 2365 | } |
0d4b729d A |
2366 | |
2367 | txq_info = adap->sge.uld_txq_info[tx_uld_type]; | |
2368 | if (unlikely(!txq_info)) { | |
2369 | WARN_ON(true); | |
2370 | return NET_XMIT_DROP; | |
2371 | } | |
2372 | ||
2373 | txq = &txq_info->uldtxq[idx]; | |
ab677ff4 | 2374 | return ofld_xmit(txq, skb); |
fd3a4790 DM |
2375 | } |
2376 | ||
2377 | /** | |
2378 | * t4_ofld_send - send an offload packet | |
2379 | * @adap: the adapter | |
2380 | * @skb: the packet | |
2381 | * | |
2382 | * Sends an offload packet. We use the packet queue_mapping to select the | |
2383 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | |
2384 | * should be sent as regular or control, bits 1-15 select the queue. | |
2385 | */ | |
2386 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) | |
2387 | { | |
2388 | int ret; | |
2389 | ||
2390 | local_bh_disable(); | |
ab677ff4 | 2391 | ret = uld_send(adap, skb, CXGB4_TX_OFLD); |
fd3a4790 DM |
2392 | local_bh_enable(); |
2393 | return ret; | |
2394 | } | |
2395 | ||
2396 | /** | |
2397 | * cxgb4_ofld_send - send an offload packet | |
2398 | * @dev: the net device | |
2399 | * @skb: the packet | |
2400 | * | |
2401 | * Sends an offload packet. This is an exported version of @t4_ofld_send, | |
2402 | * intended for ULDs. | |
2403 | */ | |
2404 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) | |
2405 | { | |
2406 | return t4_ofld_send(netdev2adap(dev), skb); | |
2407 | } | |
2408 | EXPORT_SYMBOL(cxgb4_ofld_send); | |
2409 | ||
e383f248 AG |
2410 | static void *inline_tx_header(const void *src, |
2411 | const struct sge_txq *q, | |
2412 | void *pos, int length) | |
2413 | { | |
2414 | int left = (void *)q->stat - pos; | |
2415 | u64 *p; | |
2416 | ||
2417 | if (likely(length <= left)) { | |
2418 | memcpy(pos, src, length); | |
2419 | pos += length; | |
2420 | } else { | |
2421 | memcpy(pos, src, left); | |
2422 | memcpy(q->desc, src + left, length - left); | |
2423 | pos = (void *)q->desc + (length - left); | |
2424 | } | |
2425 | /* 0-pad to multiple of 16 */ | |
2426 | p = PTR_ALIGN(pos, 8); | |
2427 | if ((uintptr_t)p & 8) { | |
2428 | *p = 0; | |
2429 | return p + 1; | |
2430 | } | |
2431 | return p; | |
2432 | } | |
2433 | ||
2434 | /** | |
2435 | * ofld_xmit_direct - copy a WR into offload queue | |
2436 | * @q: the Tx offload queue | |
2437 | * @src: location of WR | |
2438 | * @len: WR length | |
2439 | * | |
2440 | * Copy an immediate WR into an uncontended SGE offload queue. | |
2441 | */ | |
2442 | static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, | |
2443 | unsigned int len) | |
2444 | { | |
2445 | unsigned int ndesc; | |
2446 | int credits; | |
2447 | u64 *pos; | |
2448 | ||
2449 | /* Use the lower limit as the cut-off */ | |
2450 | if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { | |
2451 | WARN_ON(1); | |
2452 | return NET_XMIT_DROP; | |
2453 | } | |
2454 | ||
2455 | /* Don't return NET_XMIT_CN here as the current | |
2456 | * implementation doesn't queue the request | |
2457 | * using an skb when the following conditions not met | |
2458 | */ | |
2459 | if (!spin_trylock(&q->sendq.lock)) | |
2460 | return NET_XMIT_DROP; | |
2461 | ||
2462 | if (q->full || !skb_queue_empty(&q->sendq) || | |
2463 | q->service_ofldq_running) { | |
2464 | spin_unlock(&q->sendq.lock); | |
2465 | return NET_XMIT_DROP; | |
2466 | } | |
2467 | ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); | |
2468 | credits = txq_avail(&q->q) - ndesc; | |
2469 | pos = (u64 *)&q->q.desc[q->q.pidx]; | |
2470 | ||
2471 | /* ofldtxq_stop modifies WR header in-situ */ | |
2472 | inline_tx_header(src, &q->q, pos, len); | |
2473 | if (unlikely(credits < TXQ_STOP_THRES)) | |
2474 | ofldtxq_stop(q, (struct fw_wr_hdr *)pos); | |
2475 | txq_advance(&q->q, ndesc); | |
2476 | cxgb4_ring_tx_db(q->adap, &q->q, ndesc); | |
2477 | ||
2478 | spin_unlock(&q->sendq.lock); | |
2479 | return NET_XMIT_SUCCESS; | |
2480 | } | |
2481 | ||
2482 | int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, | |
2483 | const void *src, unsigned int len) | |
2484 | { | |
2485 | struct sge_uld_txq_info *txq_info; | |
2486 | struct sge_uld_txq *txq; | |
2487 | struct adapter *adap; | |
2488 | int ret; | |
2489 | ||
2490 | adap = netdev2adap(dev); | |
2491 | ||
2492 | local_bh_disable(); | |
2493 | txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; | |
2494 | if (unlikely(!txq_info)) { | |
2495 | WARN_ON(true); | |
2496 | local_bh_enable(); | |
2497 | return NET_XMIT_DROP; | |
2498 | } | |
2499 | txq = &txq_info->uldtxq[idx]; | |
2500 | ||
2501 | ret = ofld_xmit_direct(txq, src, len); | |
2502 | local_bh_enable(); | |
2503 | return net_xmit_eval(ret); | |
2504 | } | |
2505 | EXPORT_SYMBOL(cxgb4_immdata_send); | |
2506 | ||
ab677ff4 HS |
2507 | /** |
2508 | * t4_crypto_send - send crypto packet | |
2509 | * @adap: the adapter | |
2510 | * @skb: the packet | |
2511 | * | |
2512 | * Sends crypto packet. We use the packet queue_mapping to select the | |
2513 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | |
2514 | * should be sent as regular or control, bits 1-15 select the queue. | |
2515 | */ | |
2516 | static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) | |
2517 | { | |
2518 | int ret; | |
2519 | ||
2520 | local_bh_disable(); | |
2521 | ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); | |
2522 | local_bh_enable(); | |
2523 | return ret; | |
2524 | } | |
2525 | ||
2526 | /** | |
2527 | * cxgb4_crypto_send - send crypto packet | |
2528 | * @dev: the net device | |
2529 | * @skb: the packet | |
2530 | * | |
2531 | * Sends crypto packet. This is an exported version of @t4_crypto_send, | |
2532 | * intended for ULDs. | |
2533 | */ | |
2534 | int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) | |
2535 | { | |
2536 | return t4_crypto_send(netdev2adap(dev), skb); | |
2537 | } | |
2538 | EXPORT_SYMBOL(cxgb4_crypto_send); | |
2539 | ||
e91b0f24 | 2540 | static inline void copy_frags(struct sk_buff *skb, |
fd3a4790 DM |
2541 | const struct pkt_gl *gl, unsigned int offset) |
2542 | { | |
e91b0f24 | 2543 | int i; |
fd3a4790 DM |
2544 | |
2545 | /* usually there's just one frag */ | |
e91b0f24 IC |
2546 | __skb_fill_page_desc(skb, 0, gl->frags[0].page, |
2547 | gl->frags[0].offset + offset, | |
2548 | gl->frags[0].size - offset); | |
2549 | skb_shinfo(skb)->nr_frags = gl->nfrags; | |
2550 | for (i = 1; i < gl->nfrags; i++) | |
2551 | __skb_fill_page_desc(skb, i, gl->frags[i].page, | |
2552 | gl->frags[i].offset, | |
2553 | gl->frags[i].size); | |
fd3a4790 DM |
2554 | |
2555 | /* get a reference to the last page, we don't own it */ | |
e91b0f24 | 2556 | get_page(gl->frags[gl->nfrags - 1].page); |
fd3a4790 DM |
2557 | } |
2558 | ||
2559 | /** | |
2560 | * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list | |
2561 | * @gl: the gather list | |
2562 | * @skb_len: size of sk_buff main body if it carries fragments | |
2563 | * @pull_len: amount of data to move to the sk_buff's main body | |
2564 | * | |
2565 | * Builds an sk_buff from the given packet gather list. Returns the | |
2566 | * sk_buff or %NULL if sk_buff allocation failed. | |
2567 | */ | |
2568 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | |
2569 | unsigned int skb_len, unsigned int pull_len) | |
2570 | { | |
2571 | struct sk_buff *skb; | |
2572 | ||
2573 | /* | |
2574 | * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer | |
2575 | * size, which is expected since buffers are at least PAGE_SIZEd. | |
2576 | * In this case packets up to RX_COPY_THRES have only one fragment. | |
2577 | */ | |
2578 | if (gl->tot_len <= RX_COPY_THRES) { | |
2579 | skb = dev_alloc_skb(gl->tot_len); | |
2580 | if (unlikely(!skb)) | |
2581 | goto out; | |
2582 | __skb_put(skb, gl->tot_len); | |
2583 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | |
2584 | } else { | |
2585 | skb = dev_alloc_skb(skb_len); | |
2586 | if (unlikely(!skb)) | |
2587 | goto out; | |
2588 | __skb_put(skb, pull_len); | |
2589 | skb_copy_to_linear_data(skb, gl->va, pull_len); | |
2590 | ||
e91b0f24 | 2591 | copy_frags(skb, gl, pull_len); |
fd3a4790 DM |
2592 | skb->len = gl->tot_len; |
2593 | skb->data_len = skb->len - pull_len; | |
2594 | skb->truesize += skb->data_len; | |
2595 | } | |
2596 | out: return skb; | |
2597 | } | |
2598 | EXPORT_SYMBOL(cxgb4_pktgl_to_skb); | |
2599 | ||
2600 | /** | |
2601 | * t4_pktgl_free - free a packet gather list | |
2602 | * @gl: the gather list | |
2603 | * | |
2604 | * Releases the pages of a packet gather list. We do not own the last | |
2605 | * page on the list and do not free it. | |
2606 | */ | |
de498c89 | 2607 | static void t4_pktgl_free(const struct pkt_gl *gl) |
fd3a4790 DM |
2608 | { |
2609 | int n; | |
e91b0f24 | 2610 | const struct page_frag *p; |
fd3a4790 DM |
2611 | |
2612 | for (p = gl->frags, n = gl->nfrags - 1; n--; p++) | |
2613 | put_page(p->page); | |
2614 | } | |
2615 | ||
2616 | /* | |
2617 | * Process an MPS trace packet. Give it an unused protocol number so it won't | |
2618 | * be delivered to anyone and send it to the stack for capture. | |
2619 | */ | |
2620 | static noinline int handle_trace_pkt(struct adapter *adap, | |
2621 | const struct pkt_gl *gl) | |
2622 | { | |
2623 | struct sk_buff *skb; | |
fd3a4790 DM |
2624 | |
2625 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); | |
2626 | if (unlikely(!skb)) { | |
2627 | t4_pktgl_free(gl); | |
2628 | return 0; | |
2629 | } | |
2630 | ||
d14807dd | 2631 | if (is_t4(adap->params.chip)) |
0a57a536 SR |
2632 | __skb_pull(skb, sizeof(struct cpl_trace_pkt)); |
2633 | else | |
2634 | __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); | |
2635 | ||
fd3a4790 DM |
2636 | skb_reset_mac_header(skb); |
2637 | skb->protocol = htons(0xffff); | |
2638 | skb->dev = adap->port[0]; | |
2639 | netif_receive_skb(skb); | |
2640 | return 0; | |
2641 | } | |
2642 | ||
5e2a5ebc HS |
2643 | /** |
2644 | * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp | |
2645 | * @adap: the adapter | |
2646 | * @hwtstamps: time stamp structure to update | |
2647 | * @sgetstamp: 60bit iqe timestamp | |
2648 | * | |
2649 | * Every ingress queue entry has the 60-bit timestamp, convert that timestamp | |
2650 | * which is in Core Clock ticks into ktime_t and assign it | |
2651 | **/ | |
2652 | static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, | |
2653 | struct skb_shared_hwtstamps *hwtstamps, | |
2654 | u64 sgetstamp) | |
2655 | { | |
2656 | u64 ns; | |
2657 | u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); | |
2658 | ||
2659 | ns = div_u64(tmp, adap->params.vpd.cclk); | |
2660 | ||
2661 | memset(hwtstamps, 0, sizeof(*hwtstamps)); | |
2662 | hwtstamps->hwtstamp = ns_to_ktime(ns); | |
2663 | } | |
2664 | ||
fd3a4790 | 2665 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, |
c50ae55e | 2666 | const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) |
fd3a4790 | 2667 | { |
52367a76 VP |
2668 | struct adapter *adapter = rxq->rspq.adap; |
2669 | struct sge *s = &adapter->sge; | |
5e2a5ebc | 2670 | struct port_info *pi; |
fd3a4790 DM |
2671 | int ret; |
2672 | struct sk_buff *skb; | |
2673 | ||
2674 | skb = napi_get_frags(&rxq->rspq.napi); | |
2675 | if (unlikely(!skb)) { | |
2676 | t4_pktgl_free(gl); | |
2677 | rxq->stats.rx_drops++; | |
2678 | return; | |
2679 | } | |
2680 | ||
52367a76 | 2681 | copy_frags(skb, gl, s->pktshift); |
c50ae55e GG |
2682 | if (tnl_hdr_len) |
2683 | skb->csum_level = 1; | |
52367a76 | 2684 | skb->len = gl->tot_len - s->pktshift; |
fd3a4790 DM |
2685 | skb->data_len = skb->len; |
2686 | skb->truesize += skb->data_len; | |
2687 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2688 | skb_record_rx_queue(skb, rxq->rspq.idx); | |
5e2a5ebc HS |
2689 | pi = netdev_priv(skb->dev); |
2690 | if (pi->rxtstamp) | |
2691 | cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), | |
2692 | gl->sgetstamp); | |
87b6cf51 | 2693 | if (rxq->rspq.netdev->features & NETIF_F_RXHASH) |
8264989c TH |
2694 | skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, |
2695 | PKT_HASH_TYPE_L3); | |
fd3a4790 DM |
2696 | |
2697 | if (unlikely(pkt->vlan_ex)) { | |
86a9bad3 | 2698 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); |
fd3a4790 | 2699 | rxq->stats.vlan_ex++; |
fd3a4790 DM |
2700 | } |
2701 | ret = napi_gro_frags(&rxq->rspq.napi); | |
19ecae2c | 2702 | if (ret == GRO_HELD) |
fd3a4790 DM |
2703 | rxq->stats.lro_pkts++; |
2704 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) | |
2705 | rxq->stats.lro_merged++; | |
2706 | rxq->stats.pkts++; | |
2707 | rxq->stats.rx_cso++; | |
2708 | } | |
2709 | ||
a4569504 AG |
2710 | enum { |
2711 | RX_NON_PTP_PKT = 0, | |
2712 | RX_PTP_PKT_SUC = 1, | |
2713 | RX_PTP_PKT_ERR = 2 | |
2714 | }; | |
2715 | ||
2716 | /** | |
2717 | * t4_systim_to_hwstamp - read hardware time stamp | |
2718 | * @adap: the adapter | |
2719 | * @skb: the packet | |
2720 | * | |
2721 | * Read Time Stamp from MPS packet and insert in skb which | |
2722 | * is forwarded to PTP application | |
2723 | */ | |
2724 | static noinline int t4_systim_to_hwstamp(struct adapter *adapter, | |
2725 | struct sk_buff *skb) | |
2726 | { | |
2727 | struct skb_shared_hwtstamps *hwtstamps; | |
2728 | struct cpl_rx_mps_pkt *cpl = NULL; | |
2729 | unsigned char *data; | |
2730 | int offset; | |
2731 | ||
2732 | cpl = (struct cpl_rx_mps_pkt *)skb->data; | |
2733 | if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & | |
2734 | X_CPL_RX_MPS_PKT_TYPE_PTP)) | |
2735 | return RX_PTP_PKT_ERR; | |
2736 | ||
2737 | data = skb->data + sizeof(*cpl); | |
2738 | skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); | |
2739 | offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; | |
2740 | if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) | |
2741 | return RX_PTP_PKT_ERR; | |
2742 | ||
2743 | hwtstamps = skb_hwtstamps(skb); | |
2744 | memset(hwtstamps, 0, sizeof(*hwtstamps)); | |
2745 | hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); | |
2746 | ||
2747 | return RX_PTP_PKT_SUC; | |
2748 | } | |
2749 | ||
2750 | /** | |
2751 | * t4_rx_hststamp - Recv PTP Event Message | |
2752 | * @adap: the adapter | |
2753 | * @rsp: the response queue descriptor holding the RX_PKT message | |
2754 | * @skb: the packet | |
2755 | * | |
2756 | * PTP enabled and MPS packet, read HW timestamp | |
2757 | */ | |
2758 | static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, | |
2759 | struct sge_eth_rxq *rxq, struct sk_buff *skb) | |
2760 | { | |
2761 | int ret; | |
2762 | ||
2763 | if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && | |
2764 | !is_t4(adapter->params.chip))) { | |
2765 | ret = t4_systim_to_hwstamp(adapter, skb); | |
2766 | if (ret == RX_PTP_PKT_ERR) { | |
2767 | kfree_skb(skb); | |
2768 | rxq->stats.rx_drops++; | |
2769 | } | |
2770 | return ret; | |
2771 | } | |
2772 | return RX_NON_PTP_PKT; | |
2773 | } | |
2774 | ||
2775 | /** | |
2776 | * t4_tx_hststamp - Loopback PTP Transmit Event Message | |
2777 | * @adap: the adapter | |
2778 | * @skb: the packet | |
2779 | * @dev: the ingress net device | |
2780 | * | |
2781 | * Read hardware timestamp for the loopback PTP Tx event message | |
2782 | */ | |
2783 | static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, | |
2784 | struct net_device *dev) | |
2785 | { | |
2786 | struct port_info *pi = netdev_priv(dev); | |
2787 | ||
2788 | if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { | |
2789 | cxgb4_ptp_read_hwstamp(adapter, pi); | |
2790 | kfree_skb(skb); | |
2791 | return 0; | |
2792 | } | |
2793 | return 1; | |
2794 | } | |
2795 | ||
fd3a4790 DM |
2796 | /** |
2797 | * t4_ethrx_handler - process an ingress ethernet packet | |
2798 | * @q: the response queue that received the packet | |
2799 | * @rsp: the response queue descriptor holding the RX_PKT message | |
2800 | * @si: the gather list of packet fragments | |
2801 | * | |
2802 | * Process an ingress ethernet packet and deliver it to the stack. | |
2803 | */ | |
2804 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | |
2805 | const struct pkt_gl *si) | |
2806 | { | |
2807 | bool csum_ok; | |
2808 | struct sk_buff *skb; | |
fd3a4790 DM |
2809 | const struct cpl_rx_pkt *pkt; |
2810 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | |
a4569504 | 2811 | struct adapter *adapter = q->adap; |
52367a76 | 2812 | struct sge *s = &q->adap->sge; |
d14807dd | 2813 | int cpl_trace_pkt = is_t4(q->adap->params.chip) ? |
0a57a536 | 2814 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; |
c50ae55e | 2815 | u16 err_vec, tnl_hdr_len = 0; |
84a200b3 | 2816 | struct port_info *pi; |
a4569504 | 2817 | int ret = 0; |
fd3a4790 | 2818 | |
0a57a536 | 2819 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) |
fd3a4790 DM |
2820 | return handle_trace_pkt(q->adap, si); |
2821 | ||
87b6cf51 | 2822 | pkt = (const struct cpl_rx_pkt *)rsp; |
8eb9f2f9 | 2823 | /* Compressed error vector is enabled for T6 only */ |
c50ae55e | 2824 | if (q->adap->params.tp.rx_pkt_encap) { |
8eb9f2f9 | 2825 | err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); |
c50ae55e GG |
2826 | tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); |
2827 | } else { | |
8eb9f2f9 | 2828 | err_vec = be16_to_cpu(pkt->err_vec); |
c50ae55e | 2829 | } |
8eb9f2f9 A |
2830 | |
2831 | csum_ok = pkt->csum_calc && !err_vec && | |
cca2822d | 2832 | (q->netdev->features & NETIF_F_RXCSUM); |
992bea8e GG |
2833 | |
2834 | if (err_vec) | |
2835 | rxq->stats.bad_rx_pkts++; | |
2836 | ||
c50ae55e GG |
2837 | if (((pkt->l2info & htonl(RXF_TCP_F)) || |
2838 | tnl_hdr_len) && | |
fd3a4790 | 2839 | (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { |
c50ae55e | 2840 | do_gro(rxq, si, pkt, tnl_hdr_len); |
fd3a4790 DM |
2841 | return 0; |
2842 | } | |
2843 | ||
2844 | skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); | |
2845 | if (unlikely(!skb)) { | |
2846 | t4_pktgl_free(si); | |
2847 | rxq->stats.rx_drops++; | |
2848 | return 0; | |
2849 | } | |
a4569504 AG |
2850 | pi = netdev_priv(q->netdev); |
2851 | ||
2852 | /* Handle PTP Event Rx packet */ | |
2853 | if (unlikely(pi->ptp_enable)) { | |
2854 | ret = t4_rx_hststamp(adapter, rsp, rxq, skb); | |
2855 | if (ret == RX_PTP_PKT_ERR) | |
2856 | return 0; | |
2857 | } | |
2858 | if (likely(!ret)) | |
2859 | __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ | |
2860 | ||
2861 | /* Handle the PTP Event Tx Loopback packet */ | |
2862 | if (unlikely(pi->ptp_enable && !ret && | |
2863 | (pkt->l2info & htonl(RXF_UDP_F)) && | |
2864 | cxgb4_ptp_is_ptp_rx(skb))) { | |
2865 | if (!t4_tx_hststamp(adapter, skb, q->netdev)) | |
2866 | return 0; | |
2867 | } | |
fd3a4790 | 2868 | |
fd3a4790 DM |
2869 | skb->protocol = eth_type_trans(skb, q->netdev); |
2870 | skb_record_rx_queue(skb, q->idx); | |
87b6cf51 | 2871 | if (skb->dev->features & NETIF_F_RXHASH) |
8264989c TH |
2872 | skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, |
2873 | PKT_HASH_TYPE_L3); | |
87b6cf51 | 2874 | |
fd3a4790 DM |
2875 | rxq->stats.pkts++; |
2876 | ||
5e2a5ebc HS |
2877 | if (pi->rxtstamp) |
2878 | cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), | |
2879 | si->sgetstamp); | |
bdc590b9 | 2880 | if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { |
ba5d3c66 | 2881 | if (!pkt->ip_frag) { |
fd3a4790 | 2882 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
ba5d3c66 | 2883 | rxq->stats.rx_cso++; |
bdc590b9 | 2884 | } else if (pkt->l2info & htonl(RXF_IP_F)) { |
fd3a4790 DM |
2885 | __sum16 c = (__force __sum16)pkt->csum; |
2886 | skb->csum = csum_unfold(c); | |
c50ae55e GG |
2887 | |
2888 | if (tnl_hdr_len) { | |
2889 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2890 | skb->csum_level = 1; | |
2891 | } else { | |
2892 | skb->ip_summed = CHECKSUM_COMPLETE; | |
2893 | } | |
ba5d3c66 | 2894 | rxq->stats.rx_cso++; |
fd3a4790 | 2895 | } |
84a200b3 | 2896 | } else { |
bc8acf2c | 2897 | skb_checksum_none_assert(skb); |
84a200b3 VP |
2898 | #ifdef CONFIG_CHELSIO_T4_FCOE |
2899 | #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ | |
2900 | RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) | |
2901 | ||
84a200b3 VP |
2902 | if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { |
2903 | if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && | |
2904 | (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { | |
8eb9f2f9 A |
2905 | if (q->adap->params.tp.rx_pkt_encap) |
2906 | csum_ok = err_vec & | |
2907 | T6_COMPR_RXERR_SUM_F; | |
2908 | else | |
2909 | csum_ok = err_vec & RXERR_CSUM_F; | |
2910 | if (!csum_ok) | |
84a200b3 VP |
2911 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2912 | } | |
2913 | } | |
2914 | ||
2915 | #undef CPL_RX_PKT_FLAGS | |
2916 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
2917 | } | |
fd3a4790 DM |
2918 | |
2919 | if (unlikely(pkt->vlan_ex)) { | |
86a9bad3 | 2920 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); |
fd3a4790 | 2921 | rxq->stats.vlan_ex++; |
19ecae2c | 2922 | } |
3a336cb1 | 2923 | skb_mark_napi_id(skb, &q->napi); |
19ecae2c | 2924 | netif_receive_skb(skb); |
fd3a4790 DM |
2925 | return 0; |
2926 | } | |
2927 | ||
2928 | /** | |
2929 | * restore_rx_bufs - put back a packet's Rx buffers | |
2930 | * @si: the packet gather list | |
2931 | * @q: the SGE free list | |
2932 | * @frags: number of FL buffers to restore | |
2933 | * | |
2934 | * Puts back on an FL the Rx buffers associated with @si. The buffers | |
2935 | * have already been unmapped and are left unmapped, we mark them so to | |
2936 | * prevent further unmapping attempts. | |
2937 | * | |
2938 | * This function undoes a series of @unmap_rx_buf calls when we find out | |
2939 | * that the current packet can't be processed right away afterall and we | |
2940 | * need to come back to it later. This is a very rare event and there's | |
2941 | * no effort to make this particularly efficient. | |
2942 | */ | |
2943 | static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, | |
2944 | int frags) | |
2945 | { | |
2946 | struct rx_sw_desc *d; | |
2947 | ||
2948 | while (frags--) { | |
2949 | if (q->cidx == 0) | |
2950 | q->cidx = q->size - 1; | |
2951 | else | |
2952 | q->cidx--; | |
2953 | d = &q->sdesc[q->cidx]; | |
2954 | d->page = si->frags[frags].page; | |
2955 | d->dma_addr |= RX_UNMAPPED_BUF; | |
2956 | q->avail++; | |
2957 | } | |
2958 | } | |
2959 | ||
2960 | /** | |
2961 | * is_new_response - check if a response is newly written | |
2962 | * @r: the response descriptor | |
2963 | * @q: the response queue | |
2964 | * | |
2965 | * Returns true if a response descriptor contains a yet unprocessed | |
2966 | * response. | |
2967 | */ | |
2968 | static inline bool is_new_response(const struct rsp_ctrl *r, | |
2969 | const struct sge_rspq *q) | |
2970 | { | |
1ecc7b7a | 2971 | return (r->type_gen >> RSPD_GEN_S) == q->gen; |
fd3a4790 DM |
2972 | } |
2973 | ||
2974 | /** | |
2975 | * rspq_next - advance to the next entry in a response queue | |
2976 | * @q: the queue | |
2977 | * | |
2978 | * Updates the state of a response queue to advance it to the next entry. | |
2979 | */ | |
2980 | static inline void rspq_next(struct sge_rspq *q) | |
2981 | { | |
2982 | q->cur_desc = (void *)q->cur_desc + q->iqe_len; | |
2983 | if (unlikely(++q->cidx == q->size)) { | |
2984 | q->cidx = 0; | |
2985 | q->gen ^= 1; | |
2986 | q->cur_desc = q->desc; | |
2987 | } | |
2988 | } | |
2989 | ||
2990 | /** | |
2991 | * process_responses - process responses from an SGE response queue | |
2992 | * @q: the ingress queue to process | |
2993 | * @budget: how many responses can be processed in this round | |
2994 | * | |
2995 | * Process responses from an SGE response queue up to the supplied budget. | |
2996 | * Responses include received packets as well as control messages from FW | |
2997 | * or HW. | |
2998 | * | |
2999 | * Additionally choose the interrupt holdoff time for the next interrupt | |
3000 | * on this queue. If the system is under memory shortage use a fairly | |
3001 | * long delay to help recovery. | |
3002 | */ | |
3003 | static int process_responses(struct sge_rspq *q, int budget) | |
3004 | { | |
3005 | int ret, rsp_type; | |
3006 | int budget_left = budget; | |
3007 | const struct rsp_ctrl *rc; | |
3008 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | |
52367a76 VP |
3009 | struct adapter *adapter = q->adap; |
3010 | struct sge *s = &adapter->sge; | |
fd3a4790 DM |
3011 | |
3012 | while (likely(budget_left)) { | |
3013 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | |
2337ba42 VP |
3014 | if (!is_new_response(rc, q)) { |
3015 | if (q->flush_handler) | |
3016 | q->flush_handler(q); | |
fd3a4790 | 3017 | break; |
2337ba42 | 3018 | } |
fd3a4790 | 3019 | |
019be1cf | 3020 | dma_rmb(); |
1ecc7b7a HS |
3021 | rsp_type = RSPD_TYPE_G(rc->type_gen); |
3022 | if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { | |
e91b0f24 | 3023 | struct page_frag *fp; |
fd3a4790 DM |
3024 | struct pkt_gl si; |
3025 | const struct rx_sw_desc *rsd; | |
3026 | u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; | |
3027 | ||
1ecc7b7a | 3028 | if (len & RSPD_NEWBUF_F) { |
fd3a4790 DM |
3029 | if (likely(q->offset > 0)) { |
3030 | free_rx_bufs(q->adap, &rxq->fl, 1); | |
3031 | q->offset = 0; | |
3032 | } | |
1ecc7b7a | 3033 | len = RSPD_LEN_G(len); |
fd3a4790 DM |
3034 | } |
3035 | si.tot_len = len; | |
3036 | ||
3037 | /* gather packet fragments */ | |
3038 | for (frags = 0, fp = si.frags; ; frags++, fp++) { | |
3039 | rsd = &rxq->fl.sdesc[rxq->fl.cidx]; | |
52367a76 | 3040 | bufsz = get_buf_size(adapter, rsd); |
fd3a4790 | 3041 | fp->page = rsd->page; |
e91b0f24 IC |
3042 | fp->offset = q->offset; |
3043 | fp->size = min(bufsz, len); | |
3044 | len -= fp->size; | |
fd3a4790 DM |
3045 | if (!len) |
3046 | break; | |
3047 | unmap_rx_buf(q->adap, &rxq->fl); | |
3048 | } | |
3049 | ||
5e2a5ebc HS |
3050 | si.sgetstamp = SGE_TIMESTAMP_G( |
3051 | be64_to_cpu(rc->last_flit)); | |
fd3a4790 DM |
3052 | /* |
3053 | * Last buffer remains mapped so explicitly make it | |
3054 | * coherent for CPU access. | |
3055 | */ | |
3056 | dma_sync_single_for_cpu(q->adap->pdev_dev, | |
3057 | get_buf_addr(rsd), | |
e91b0f24 | 3058 | fp->size, DMA_FROM_DEVICE); |
fd3a4790 DM |
3059 | |
3060 | si.va = page_address(si.frags[0].page) + | |
e91b0f24 | 3061 | si.frags[0].offset; |
fd3a4790 DM |
3062 | prefetch(si.va); |
3063 | ||
3064 | si.nfrags = frags + 1; | |
3065 | ret = q->handler(q, q->cur_desc, &si); | |
3066 | if (likely(ret == 0)) | |
52367a76 | 3067 | q->offset += ALIGN(fp->size, s->fl_align); |
fd3a4790 DM |
3068 | else |
3069 | restore_rx_bufs(&si, &rxq->fl, frags); | |
1ecc7b7a | 3070 | } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { |
fd3a4790 DM |
3071 | ret = q->handler(q, q->cur_desc, NULL); |
3072 | } else { | |
3073 | ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); | |
3074 | } | |
3075 | ||
3076 | if (unlikely(ret)) { | |
3077 | /* couldn't process descriptor, back off for recovery */ | |
1ecc7b7a | 3078 | q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); |
fd3a4790 DM |
3079 | break; |
3080 | } | |
3081 | ||
3082 | rspq_next(q); | |
3083 | budget_left--; | |
3084 | } | |
3085 | ||
da08e425 | 3086 | if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) |
fd3a4790 DM |
3087 | __refill_fl(q->adap, &rxq->fl); |
3088 | return budget - budget_left; | |
3089 | } | |
3090 | ||
3091 | /** | |
3092 | * napi_rx_handler - the NAPI handler for Rx processing | |
3093 | * @napi: the napi instance | |
3094 | * @budget: how many packets we can process in this round | |
3095 | * | |
3096 | * Handler for new data events when using NAPI. This does not need any | |
3097 | * locking or protection from interrupts as data interrupts are off at | |
3098 | * this point and other adapter interrupts do not interfere (the latter | |
3099 | * in not a concern at all with MSI-X as non-data interrupts then have | |
3100 | * a separate handler). | |
3101 | */ | |
3102 | static int napi_rx_handler(struct napi_struct *napi, int budget) | |
3103 | { | |
3104 | unsigned int params; | |
3105 | struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); | |
3a336cb1 | 3106 | int work_done; |
d63a6dcf | 3107 | u32 val; |
fd3a4790 | 3108 | |
3a336cb1 | 3109 | work_done = process_responses(q, budget); |
fd3a4790 | 3110 | if (likely(work_done < budget)) { |
e553ec3f HS |
3111 | int timer_index; |
3112 | ||
812787b8 | 3113 | napi_complete_done(napi, work_done); |
1ecc7b7a | 3114 | timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); |
e553ec3f HS |
3115 | |
3116 | if (q->adaptive_rx) { | |
3117 | if (work_done > max(timer_pkt_quota[timer_index], | |
3118 | MIN_NAPI_WORK)) | |
3119 | timer_index = (timer_index + 1); | |
3120 | else | |
3121 | timer_index = timer_index - 1; | |
3122 | ||
3123 | timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); | |
1ecc7b7a HS |
3124 | q->next_intr_params = |
3125 | QINTR_TIMER_IDX_V(timer_index) | | |
3126 | QINTR_CNT_EN_V(0); | |
e553ec3f HS |
3127 | params = q->next_intr_params; |
3128 | } else { | |
3129 | params = q->next_intr_params; | |
3130 | q->next_intr_params = q->intr_params; | |
3131 | } | |
fd3a4790 | 3132 | } else |
1ecc7b7a | 3133 | params = QINTR_TIMER_IDX_V(7); |
fd3a4790 | 3134 | |
f612b815 | 3135 | val = CIDXINC_V(work_done) | SEINTARM_V(params); |
df64e4d3 HS |
3136 | |
3137 | /* If we don't have access to the new User GTS (T5+), use the old | |
3138 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
3139 | */ | |
3140 | if (unlikely(q->bar2_addr == NULL)) { | |
f612b815 HS |
3141 | t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), |
3142 | val | INGRESSQID_V((u32)q->cntxt_id)); | |
d63a6dcf | 3143 | } else { |
f612b815 | 3144 | writel(val | INGRESSQID_V(q->bar2_qid), |
df64e4d3 | 3145 | q->bar2_addr + SGE_UDB_GTS); |
d63a6dcf HS |
3146 | wmb(); |
3147 | } | |
fd3a4790 DM |
3148 | return work_done; |
3149 | } | |
3150 | ||
3151 | /* | |
3152 | * The MSI-X interrupt handler for an SGE response queue. | |
3153 | */ | |
3154 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie) | |
3155 | { | |
3156 | struct sge_rspq *q = cookie; | |
3157 | ||
3158 | napi_schedule(&q->napi); | |
3159 | return IRQ_HANDLED; | |
3160 | } | |
3161 | ||
3162 | /* | |
3163 | * Process the indirect interrupt entries in the interrupt queue and kick off | |
3164 | * NAPI for each queue that has generated an entry. | |
3165 | */ | |
3166 | static unsigned int process_intrq(struct adapter *adap) | |
3167 | { | |
3168 | unsigned int credits; | |
3169 | const struct rsp_ctrl *rc; | |
3170 | struct sge_rspq *q = &adap->sge.intrq; | |
d63a6dcf | 3171 | u32 val; |
fd3a4790 DM |
3172 | |
3173 | spin_lock(&adap->sge.intrq_lock); | |
3174 | for (credits = 0; ; credits++) { | |
3175 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | |
3176 | if (!is_new_response(rc, q)) | |
3177 | break; | |
3178 | ||
019be1cf | 3179 | dma_rmb(); |
1ecc7b7a | 3180 | if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { |
fd3a4790 DM |
3181 | unsigned int qid = ntohl(rc->pldbuflen_qid); |
3182 | ||
e46dab4d | 3183 | qid -= adap->sge.ingr_start; |
fd3a4790 DM |
3184 | napi_schedule(&adap->sge.ingr_map[qid]->napi); |
3185 | } | |
3186 | ||
3187 | rspq_next(q); | |
3188 | } | |
3189 | ||
f612b815 | 3190 | val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); |
df64e4d3 HS |
3191 | |
3192 | /* If we don't have access to the new User GTS (T5+), use the old | |
3193 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
3194 | */ | |
3195 | if (unlikely(q->bar2_addr == NULL)) { | |
f612b815 HS |
3196 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
3197 | val | INGRESSQID_V(q->cntxt_id)); | |
d63a6dcf | 3198 | } else { |
f612b815 | 3199 | writel(val | INGRESSQID_V(q->bar2_qid), |
df64e4d3 | 3200 | q->bar2_addr + SGE_UDB_GTS); |
d63a6dcf HS |
3201 | wmb(); |
3202 | } | |
fd3a4790 DM |
3203 | spin_unlock(&adap->sge.intrq_lock); |
3204 | return credits; | |
3205 | } | |
3206 | ||
3207 | /* | |
3208 | * The MSI interrupt handler, which handles data events from SGE response queues | |
3209 | * as well as error and other async events as they all use the same MSI vector. | |
3210 | */ | |
3211 | static irqreturn_t t4_intr_msi(int irq, void *cookie) | |
3212 | { | |
3213 | struct adapter *adap = cookie; | |
3214 | ||
c3c7b121 HS |
3215 | if (adap->flags & MASTER_PF) |
3216 | t4_slow_intr_handler(adap); | |
fd3a4790 DM |
3217 | process_intrq(adap); |
3218 | return IRQ_HANDLED; | |
3219 | } | |
3220 | ||
3221 | /* | |
3222 | * Interrupt handler for legacy INTx interrupts. | |
3223 | * Handles data events from SGE response queues as well as error and other | |
3224 | * async events as they all use the same interrupt line. | |
3225 | */ | |
3226 | static irqreturn_t t4_intr_intx(int irq, void *cookie) | |
3227 | { | |
3228 | struct adapter *adap = cookie; | |
3229 | ||
f061de42 | 3230 | t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); |
c3c7b121 HS |
3231 | if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | |
3232 | process_intrq(adap)) | |
fd3a4790 DM |
3233 | return IRQ_HANDLED; |
3234 | return IRQ_NONE; /* probably shared interrupt */ | |
3235 | } | |
3236 | ||
3237 | /** | |
3238 | * t4_intr_handler - select the top-level interrupt handler | |
3239 | * @adap: the adapter | |
3240 | * | |
3241 | * Selects the top-level interrupt handler based on the type of interrupts | |
3242 | * (MSI-X, MSI, or INTx). | |
3243 | */ | |
3244 | irq_handler_t t4_intr_handler(struct adapter *adap) | |
3245 | { | |
3246 | if (adap->flags & USING_MSIX) | |
3247 | return t4_sge_intr_msix; | |
3248 | if (adap->flags & USING_MSI) | |
3249 | return t4_intr_msi; | |
3250 | return t4_intr_intx; | |
3251 | } | |
3252 | ||
0e23daeb | 3253 | static void sge_rx_timer_cb(struct timer_list *t) |
fd3a4790 DM |
3254 | { |
3255 | unsigned long m; | |
a3bfb617 | 3256 | unsigned int i; |
0e23daeb | 3257 | struct adapter *adap = from_timer(adap, t, sge.rx_timer); |
fd3a4790 DM |
3258 | struct sge *s = &adap->sge; |
3259 | ||
4b8e27a8 | 3260 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
fd3a4790 DM |
3261 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
3262 | struct sge_eth_rxq *rxq; | |
3263 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | |
3264 | struct sge_fl *fl = s->egr_map[id]; | |
3265 | ||
3266 | clear_bit(id, s->starving_fl); | |
4e857c58 | 3267 | smp_mb__after_atomic(); |
fd3a4790 | 3268 | |
c098b026 | 3269 | if (fl_starving(adap, fl)) { |
fd3a4790 DM |
3270 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
3271 | if (napi_reschedule(&rxq->rspq.napi)) | |
3272 | fl->starving++; | |
3273 | else | |
3274 | set_bit(id, s->starving_fl); | |
3275 | } | |
3276 | } | |
a3bfb617 HS |
3277 | /* The remainder of the SGE RX Timer Callback routine is dedicated to |
3278 | * global Master PF activities like checking for chip ingress stalls, | |
3279 | * etc. | |
3280 | */ | |
3281 | if (!(adap->flags & MASTER_PF)) | |
3282 | goto done; | |
fd3a4790 | 3283 | |
a3bfb617 | 3284 | t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); |
fd3a4790 | 3285 | |
a3bfb617 | 3286 | done: |
fd3a4790 DM |
3287 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); |
3288 | } | |
3289 | ||
0e23daeb | 3290 | static void sge_tx_timer_cb(struct timer_list *t) |
fd3a4790 DM |
3291 | { |
3292 | unsigned long m; | |
3293 | unsigned int i, budget; | |
0e23daeb | 3294 | struct adapter *adap = from_timer(adap, t, sge.tx_timer); |
fd3a4790 DM |
3295 | struct sge *s = &adap->sge; |
3296 | ||
4b8e27a8 | 3297 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
fd3a4790 DM |
3298 | for (m = s->txq_maperr[i]; m; m &= m - 1) { |
3299 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | |
ab677ff4 | 3300 | struct sge_uld_txq *txq = s->egr_map[id]; |
fd3a4790 DM |
3301 | |
3302 | clear_bit(id, s->txq_maperr); | |
3303 | tasklet_schedule(&txq->qresume_tsk); | |
3304 | } | |
3305 | ||
a4569504 AG |
3306 | if (!is_t4(adap->params.chip)) { |
3307 | struct sge_eth_txq *q = &s->ptptxq; | |
3308 | int avail; | |
3309 | ||
3310 | spin_lock(&adap->ptp_lock); | |
3311 | avail = reclaimable(&q->q); | |
3312 | ||
3313 | if (avail) { | |
3314 | free_tx_desc(adap, &q->q, avail, false); | |
3315 | q->q.in_use -= avail; | |
3316 | } | |
3317 | spin_unlock(&adap->ptp_lock); | |
3318 | } | |
3319 | ||
fd3a4790 DM |
3320 | budget = MAX_TIMER_TX_RECLAIM; |
3321 | i = s->ethtxq_rover; | |
3322 | do { | |
3323 | struct sge_eth_txq *q = &s->ethtxq[i]; | |
3324 | ||
3325 | if (q->q.in_use && | |
3326 | time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && | |
3327 | __netif_tx_trylock(q->txq)) { | |
3328 | int avail = reclaimable(&q->q); | |
3329 | ||
3330 | if (avail) { | |
3331 | if (avail > budget) | |
3332 | avail = budget; | |
3333 | ||
3334 | free_tx_desc(adap, &q->q, avail, true); | |
3335 | q->q.in_use -= avail; | |
3336 | budget -= avail; | |
3337 | } | |
3338 | __netif_tx_unlock(q->txq); | |
3339 | } | |
3340 | ||
3341 | if (++i >= s->ethqsets) | |
3342 | i = 0; | |
3343 | } while (budget && i != s->ethtxq_rover); | |
3344 | s->ethtxq_rover = i; | |
3345 | mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); | |
3346 | } | |
3347 | ||
d63a6dcf | 3348 | /** |
df64e4d3 HS |
3349 | * bar2_address - return the BAR2 address for an SGE Queue's Registers |
3350 | * @adapter: the adapter | |
3351 | * @qid: the SGE Queue ID | |
3352 | * @qtype: the SGE Queue Type (Egress or Ingress) | |
3353 | * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues | |
d63a6dcf | 3354 | * |
df64e4d3 HS |
3355 | * Returns the BAR2 address for the SGE Queue Registers associated with |
3356 | * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also | |
3357 | * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE | |
3358 | * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" | |
3359 | * Registers are supported (e.g. the Write Combining Doorbell Buffer). | |
3360 | */ | |
3361 | static void __iomem *bar2_address(struct adapter *adapter, | |
3362 | unsigned int qid, | |
3363 | enum t4_bar2_qtype qtype, | |
3364 | unsigned int *pbar2_qid) | |
3365 | { | |
3366 | u64 bar2_qoffset; | |
3367 | int ret; | |
d63a6dcf | 3368 | |
e0456717 | 3369 | ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0, |
df64e4d3 HS |
3370 | &bar2_qoffset, pbar2_qid); |
3371 | if (ret) | |
3372 | return NULL; | |
d63a6dcf | 3373 | |
df64e4d3 | 3374 | return adapter->bar2 + bar2_qoffset; |
d63a6dcf HS |
3375 | } |
3376 | ||
145ef8a5 HS |
3377 | /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 |
3378 | * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map | |
3379 | */ | |
fd3a4790 DM |
3380 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, |
3381 | struct net_device *dev, int intr_idx, | |
2337ba42 VP |
3382 | struct sge_fl *fl, rspq_handler_t hnd, |
3383 | rspq_flush_handler_t flush_hnd, int cong) | |
fd3a4790 DM |
3384 | { |
3385 | int ret, flsz = 0; | |
3386 | struct fw_iq_cmd c; | |
52367a76 | 3387 | struct sge *s = &adap->sge; |
fd3a4790 | 3388 | struct port_info *pi = netdev_priv(dev); |
b0ba9d5f | 3389 | int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); |
fd3a4790 DM |
3390 | |
3391 | /* Size needs to be multiple of 16, including status entry. */ | |
3392 | iq->size = roundup(iq->size, 16); | |
3393 | ||
3394 | iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, | |
0ac5b708 HS |
3395 | &iq->phys_addr, NULL, 0, |
3396 | dev_to_node(adap->pdev_dev)); | |
fd3a4790 DM |
3397 | if (!iq->desc) |
3398 | return -ENOMEM; | |
3399 | ||
3400 | memset(&c, 0, sizeof(c)); | |
e2ac9628 HS |
3401 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | |
3402 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
b2612722 | 3403 | FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); |
6e4b51a6 | 3404 | c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | |
fd3a4790 | 3405 | FW_LEN16(c)); |
6e4b51a6 HS |
3406 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | |
3407 | FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | | |
1ecc7b7a HS |
3408 | FW_IQ_CMD_IQANDST_V(intr_idx < 0) | |
3409 | FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | | |
6e4b51a6 | 3410 | FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : |
fd3a4790 | 3411 | -intr_idx - 1)); |
6e4b51a6 HS |
3412 | c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | |
3413 | FW_IQ_CMD_IQGTSMODE_F | | |
3414 | FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | | |
3415 | FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); | |
fd3a4790 DM |
3416 | c.iqsize = htons(iq->size); |
3417 | c.iqaddr = cpu_to_be64(iq->phys_addr); | |
145ef8a5 | 3418 | if (cong >= 0) |
8dce04f1 AV |
3419 | c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | |
3420 | FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC | |
3421 | : FW_IQ_IQTYPE_OFLD)); | |
fd3a4790 DM |
3422 | |
3423 | if (fl) { | |
3ccc6cf7 HS |
3424 | enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); |
3425 | ||
13432997 HS |
3426 | /* Allocate the ring for the hardware free list (with space |
3427 | * for its status page) along with the associated software | |
3428 | * descriptor ring. The free list size needs to be a multiple | |
3429 | * of the Egress Queue Unit and at least 2 Egress Units larger | |
3430 | * than the SGE's Egress Congrestion Threshold | |
3431 | * (fl_starve_thres - 1). | |
3432 | */ | |
3433 | if (fl->size < s->fl_starve_thres - 1 + 2 * 8) | |
3434 | fl->size = s->fl_starve_thres - 1 + 2 * 8; | |
fd3a4790 DM |
3435 | fl->size = roundup(fl->size, 8); |
3436 | fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), | |
3437 | sizeof(struct rx_sw_desc), &fl->addr, | |
0ac5b708 HS |
3438 | &fl->sdesc, s->stat_len, |
3439 | dev_to_node(adap->pdev_dev)); | |
fd3a4790 DM |
3440 | if (!fl->desc) |
3441 | goto fl_nomem; | |
3442 | ||
52367a76 | 3443 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); |
145ef8a5 | 3444 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | |
b0ba9d5f CL |
3445 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | |
3446 | FW_IQ_CMD_FL0DATARO_V(relaxed) | | |
145ef8a5 HS |
3447 | FW_IQ_CMD_FL0PADEN_F); |
3448 | if (cong >= 0) | |
3449 | c.iqns_to_fl0congen |= | |
3450 | htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | | |
3451 | FW_IQ_CMD_FL0CONGCIF_F | | |
3452 | FW_IQ_CMD_FL0CONGEN_F); | |
edadad80 HS |
3453 | /* In T6, for egress queue type FL there is internal overhead |
3454 | * of 16B for header going into FLM module. Hence the maximum | |
3455 | * allowed burst size is 448 bytes. For T4/T5, the hardware | |
3456 | * doesn't coalesce fetch requests if more than 64 bytes of | |
3457 | * Free List pointers are provided, so we use a 128-byte Fetch | |
3458 | * Burst Minimum there (T6 implements coalescing so we can use | |
3459 | * the smaller 64-byte value there). | |
3460 | */ | |
1ecc7b7a | 3461 | c.fl0dcaen_to_fl0cidxfthresh = |
edadad80 HS |
3462 | htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? |
3463 | FETCHBURSTMIN_128B_X : | |
3464 | FETCHBURSTMIN_64B_X) | | |
3ccc6cf7 HS |
3465 | FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? |
3466 | FETCHBURSTMAX_512B_X : | |
3467 | FETCHBURSTMAX_256B_X)); | |
fd3a4790 DM |
3468 | c.fl0size = htons(flsz); |
3469 | c.fl0addr = cpu_to_be64(fl->addr); | |
3470 | } | |
3471 | ||
b2612722 | 3472 | ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); |
fd3a4790 DM |
3473 | if (ret) |
3474 | goto err; | |
3475 | ||
3476 | netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); | |
3477 | iq->cur_desc = iq->desc; | |
3478 | iq->cidx = 0; | |
3479 | iq->gen = 1; | |
3480 | iq->next_intr_params = iq->intr_params; | |
3481 | iq->cntxt_id = ntohs(c.iqid); | |
3482 | iq->abs_id = ntohs(c.physiqid); | |
df64e4d3 HS |
3483 | iq->bar2_addr = bar2_address(adap, |
3484 | iq->cntxt_id, | |
3485 | T4_BAR2_QTYPE_INGRESS, | |
3486 | &iq->bar2_qid); | |
fd3a4790 | 3487 | iq->size--; /* subtract status entry */ |
fd3a4790 DM |
3488 | iq->netdev = dev; |
3489 | iq->handler = hnd; | |
2337ba42 VP |
3490 | iq->flush_handler = flush_hnd; |
3491 | ||
3492 | memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); | |
3493 | skb_queue_head_init(&iq->lro_mgr.lroq); | |
fd3a4790 DM |
3494 | |
3495 | /* set offset to -1 to distinguish ingress queues without FL */ | |
3496 | iq->offset = fl ? 0 : -1; | |
3497 | ||
e46dab4d | 3498 | adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; |
fd3a4790 DM |
3499 | |
3500 | if (fl) { | |
62718b32 | 3501 | fl->cntxt_id = ntohs(c.fl0id); |
fd3a4790 DM |
3502 | fl->avail = fl->pend_cred = 0; |
3503 | fl->pidx = fl->cidx = 0; | |
3504 | fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; | |
e46dab4d | 3505 | adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; |
d63a6dcf | 3506 | |
df64e4d3 HS |
3507 | /* Note, we must initialize the BAR2 Free List User Doorbell |
3508 | * information before refilling the Free List! | |
d63a6dcf | 3509 | */ |
df64e4d3 HS |
3510 | fl->bar2_addr = bar2_address(adap, |
3511 | fl->cntxt_id, | |
3512 | T4_BAR2_QTYPE_EGRESS, | |
3513 | &fl->bar2_qid); | |
fd3a4790 DM |
3514 | refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); |
3515 | } | |
b8b1ae99 HS |
3516 | |
3517 | /* For T5 and later we attempt to set up the Congestion Manager values | |
3518 | * of the new RX Ethernet Queue. This should really be handled by | |
3519 | * firmware because it's more complex than any host driver wants to | |
3520 | * get involved with and it's different per chip and this is almost | |
3521 | * certainly wrong. Firmware would be wrong as well, but it would be | |
3522 | * a lot easier to fix in one place ... For now we do something very | |
3523 | * simple (and hopefully less wrong). | |
3524 | */ | |
3525 | if (!is_t4(adap->params.chip) && cong >= 0) { | |
2216d014 | 3526 | u32 param, val, ch_map = 0; |
b8b1ae99 | 3527 | int i; |
2216d014 | 3528 | u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; |
b8b1ae99 HS |
3529 | |
3530 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | | |
3531 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | | |
3532 | FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); | |
3533 | if (cong == 0) { | |
3534 | val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); | |
3535 | } else { | |
3536 | val = | |
3537 | CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); | |
3538 | for (i = 0; i < 4; i++) { | |
3539 | if (cong & (1 << i)) | |
2216d014 | 3540 | ch_map |= 1 << (i << cng_ch_bits_log); |
b8b1ae99 | 3541 | } |
2216d014 | 3542 | val |= CONMCTXT_CNGCHMAP_V(ch_map); |
b8b1ae99 | 3543 | } |
b2612722 | 3544 | ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, |
b8b1ae99 HS |
3545 | ¶m, &val); |
3546 | if (ret) | |
3547 | dev_warn(adap->pdev_dev, "Failed to set Congestion" | |
3548 | " Manager Context for Ingress Queue %d: %d\n", | |
3549 | iq->cntxt_id, -ret); | |
3550 | } | |
3551 | ||
fd3a4790 DM |
3552 | return 0; |
3553 | ||
3554 | fl_nomem: | |
3555 | ret = -ENOMEM; | |
3556 | err: | |
3557 | if (iq->desc) { | |
3558 | dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, | |
3559 | iq->desc, iq->phys_addr); | |
3560 | iq->desc = NULL; | |
3561 | } | |
3562 | if (fl && fl->desc) { | |
3563 | kfree(fl->sdesc); | |
3564 | fl->sdesc = NULL; | |
3565 | dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), | |
3566 | fl->desc, fl->addr); | |
3567 | fl->desc = NULL; | |
3568 | } | |
3569 | return ret; | |
3570 | } | |
3571 | ||
3572 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | |
3573 | { | |
22adfe0a | 3574 | q->cntxt_id = id; |
df64e4d3 HS |
3575 | q->bar2_addr = bar2_address(adap, |
3576 | q->cntxt_id, | |
3577 | T4_BAR2_QTYPE_EGRESS, | |
3578 | &q->bar2_qid); | |
fd3a4790 DM |
3579 | q->in_use = 0; |
3580 | q->cidx = q->pidx = 0; | |
3581 | q->stops = q->restarts = 0; | |
3582 | q->stat = (void *)&q->desc[q->size]; | |
3069ee9b | 3583 | spin_lock_init(&q->db_lock); |
e46dab4d | 3584 | adap->sge.egr_map[id - adap->sge.egr_start] = q; |
fd3a4790 DM |
3585 | } |
3586 | ||
3587 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | |
3588 | struct net_device *dev, struct netdev_queue *netdevq, | |
3589 | unsigned int iqid) | |
3590 | { | |
3591 | int ret, nentries; | |
3592 | struct fw_eq_eth_cmd c; | |
52367a76 | 3593 | struct sge *s = &adap->sge; |
fd3a4790 DM |
3594 | struct port_info *pi = netdev_priv(dev); |
3595 | ||
3596 | /* Add status entries */ | |
52367a76 | 3597 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
fd3a4790 DM |
3598 | |
3599 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | |
3600 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | |
52367a76 | 3601 | &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, |
ad6bad3e | 3602 | netdev_queue_numa_node_read(netdevq)); |
fd3a4790 DM |
3603 | if (!txq->q.desc) |
3604 | return -ENOMEM; | |
3605 | ||
3606 | memset(&c, 0, sizeof(c)); | |
e2ac9628 HS |
3607 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | |
3608 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
b2612722 | 3609 | FW_EQ_ETH_CMD_PFN_V(adap->pf) | |
6e4b51a6 HS |
3610 | FW_EQ_ETH_CMD_VFN_V(0)); |
3611 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | | |
3612 | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); | |
3613 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | | |
3614 | FW_EQ_ETH_CMD_VIID_V(pi->viid)); | |
1ecc7b7a HS |
3615 | c.fetchszm_to_iqid = |
3616 | htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | | |
3617 | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | | |
3618 | FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); | |
3619 | c.dcaen_to_eqsize = | |
3620 | htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | | |
3621 | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | | |
3622 | FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | | |
3623 | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); | |
fd3a4790 DM |
3624 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
3625 | ||
b2612722 | 3626 | ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); |
fd3a4790 DM |
3627 | if (ret) { |
3628 | kfree(txq->q.sdesc); | |
3629 | txq->q.sdesc = NULL; | |
3630 | dma_free_coherent(adap->pdev_dev, | |
3631 | nentries * sizeof(struct tx_desc), | |
3632 | txq->q.desc, txq->q.phys_addr); | |
3633 | txq->q.desc = NULL; | |
3634 | return ret; | |
3635 | } | |
3636 | ||
ab677ff4 | 3637 | txq->q.q_type = CXGB4_TXQ_ETH; |
6e4b51a6 | 3638 | init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); |
fd3a4790 DM |
3639 | txq->txq = netdevq; |
3640 | txq->tso = txq->tx_cso = txq->vlan_ins = 0; | |
3641 | txq->mapping_err = 0; | |
3642 | return 0; | |
3643 | } | |
3644 | ||
3645 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | |
3646 | struct net_device *dev, unsigned int iqid, | |
3647 | unsigned int cmplqid) | |
3648 | { | |
3649 | int ret, nentries; | |
3650 | struct fw_eq_ctrl_cmd c; | |
52367a76 | 3651 | struct sge *s = &adap->sge; |
fd3a4790 DM |
3652 | struct port_info *pi = netdev_priv(dev); |
3653 | ||
3654 | /* Add status entries */ | |
52367a76 | 3655 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
fd3a4790 DM |
3656 | |
3657 | txq->q.desc = alloc_ring(adap->pdev_dev, nentries, | |
3658 | sizeof(struct tx_desc), 0, &txq->q.phys_addr, | |
982b81eb | 3659 | NULL, 0, dev_to_node(adap->pdev_dev)); |
fd3a4790 DM |
3660 | if (!txq->q.desc) |
3661 | return -ENOMEM; | |
3662 | ||
e2ac9628 HS |
3663 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | |
3664 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
b2612722 | 3665 | FW_EQ_CTRL_CMD_PFN_V(adap->pf) | |
6e4b51a6 HS |
3666 | FW_EQ_CTRL_CMD_VFN_V(0)); |
3667 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | | |
3668 | FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); | |
3669 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); | |
fd3a4790 | 3670 | c.physeqid_pkd = htonl(0); |
1ecc7b7a HS |
3671 | c.fetchszm_to_iqid = |
3672 | htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | | |
3673 | FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | | |
3674 | FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); | |
3675 | c.dcaen_to_eqsize = | |
3676 | htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | | |
3677 | FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | | |
3678 | FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | | |
3679 | FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); | |
fd3a4790 DM |
3680 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
3681 | ||
b2612722 | 3682 | ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); |
fd3a4790 DM |
3683 | if (ret) { |
3684 | dma_free_coherent(adap->pdev_dev, | |
3685 | nentries * sizeof(struct tx_desc), | |
3686 | txq->q.desc, txq->q.phys_addr); | |
3687 | txq->q.desc = NULL; | |
3688 | return ret; | |
3689 | } | |
3690 | ||
ab677ff4 | 3691 | txq->q.q_type = CXGB4_TXQ_CTRL; |
6e4b51a6 | 3692 | init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); |
fd3a4790 DM |
3693 | txq->adap = adap; |
3694 | skb_queue_head_init(&txq->sendq); | |
3695 | tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); | |
3696 | txq->full = 0; | |
3697 | return 0; | |
3698 | } | |
3699 | ||
0fbc81b3 HS |
3700 | int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, |
3701 | unsigned int cmplqid) | |
3702 | { | |
3703 | u32 param, val; | |
3704 | ||
3705 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | | |
3706 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | | |
3707 | FW_PARAMS_PARAM_YZ_V(eqid)); | |
3708 | val = cmplqid; | |
3709 | return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); | |
3710 | } | |
3711 | ||
ab677ff4 HS |
3712 | int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, |
3713 | struct net_device *dev, unsigned int iqid, | |
3714 | unsigned int uld_type) | |
fd3a4790 DM |
3715 | { |
3716 | int ret, nentries; | |
3717 | struct fw_eq_ofld_cmd c; | |
52367a76 | 3718 | struct sge *s = &adap->sge; |
fd3a4790 | 3719 | struct port_info *pi = netdev_priv(dev); |
ab677ff4 | 3720 | int cmd = FW_EQ_OFLD_CMD; |
fd3a4790 DM |
3721 | |
3722 | /* Add status entries */ | |
52367a76 | 3723 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
fd3a4790 DM |
3724 | |
3725 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | |
3726 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | |
52367a76 | 3727 | &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, |
ad6bad3e | 3728 | NUMA_NO_NODE); |
fd3a4790 DM |
3729 | if (!txq->q.desc) |
3730 | return -ENOMEM; | |
3731 | ||
3732 | memset(&c, 0, sizeof(c)); | |
ab677ff4 HS |
3733 | if (unlikely(uld_type == CXGB4_TX_CRYPTO)) |
3734 | cmd = FW_EQ_CTRL_CMD; | |
3735 | c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | | |
e2ac9628 | 3736 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | |
b2612722 | 3737 | FW_EQ_OFLD_CMD_PFN_V(adap->pf) | |
6e4b51a6 HS |
3738 | FW_EQ_OFLD_CMD_VFN_V(0)); |
3739 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | | |
3740 | FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); | |
1ecc7b7a HS |
3741 | c.fetchszm_to_iqid = |
3742 | htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | | |
3743 | FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | | |
3744 | FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); | |
3745 | c.dcaen_to_eqsize = | |
3746 | htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | | |
3747 | FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | | |
3748 | FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | | |
3749 | FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); | |
fd3a4790 DM |
3750 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
3751 | ||
b2612722 | 3752 | ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); |
fd3a4790 DM |
3753 | if (ret) { |
3754 | kfree(txq->q.sdesc); | |
3755 | txq->q.sdesc = NULL; | |
3756 | dma_free_coherent(adap->pdev_dev, | |
3757 | nentries * sizeof(struct tx_desc), | |
3758 | txq->q.desc, txq->q.phys_addr); | |
3759 | txq->q.desc = NULL; | |
3760 | return ret; | |
3761 | } | |
3762 | ||
ab677ff4 | 3763 | txq->q.q_type = CXGB4_TXQ_ULD; |
6e4b51a6 | 3764 | init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); |
fd3a4790 DM |
3765 | txq->adap = adap; |
3766 | skb_queue_head_init(&txq->sendq); | |
3767 | tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); | |
3768 | txq->full = 0; | |
3769 | txq->mapping_err = 0; | |
3770 | return 0; | |
3771 | } | |
3772 | ||
ab677ff4 | 3773 | void free_txq(struct adapter *adap, struct sge_txq *q) |
fd3a4790 | 3774 | { |
52367a76 VP |
3775 | struct sge *s = &adap->sge; |
3776 | ||
fd3a4790 | 3777 | dma_free_coherent(adap->pdev_dev, |
52367a76 | 3778 | q->size * sizeof(struct tx_desc) + s->stat_len, |
fd3a4790 DM |
3779 | q->desc, q->phys_addr); |
3780 | q->cntxt_id = 0; | |
3781 | q->sdesc = NULL; | |
3782 | q->desc = NULL; | |
3783 | } | |
3784 | ||
94cdb8bb HS |
3785 | void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, |
3786 | struct sge_fl *fl) | |
fd3a4790 | 3787 | { |
52367a76 | 3788 | struct sge *s = &adap->sge; |
fd3a4790 DM |
3789 | unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; |
3790 | ||
e46dab4d | 3791 | adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; |
b2612722 | 3792 | t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, |
060e0c75 | 3793 | rq->cntxt_id, fl_id, 0xffff); |
fd3a4790 DM |
3794 | dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, |
3795 | rq->desc, rq->phys_addr); | |
3796 | netif_napi_del(&rq->napi); | |
3797 | rq->netdev = NULL; | |
3798 | rq->cntxt_id = rq->abs_id = 0; | |
3799 | rq->desc = NULL; | |
3800 | ||
3801 | if (fl) { | |
3802 | free_rx_bufs(adap, fl, fl->avail); | |
52367a76 | 3803 | dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, |
fd3a4790 DM |
3804 | fl->desc, fl->addr); |
3805 | kfree(fl->sdesc); | |
3806 | fl->sdesc = NULL; | |
3807 | fl->cntxt_id = 0; | |
3808 | fl->desc = NULL; | |
3809 | } | |
3810 | } | |
3811 | ||
5fa76694 HS |
3812 | /** |
3813 | * t4_free_ofld_rxqs - free a block of consecutive Rx queues | |
3814 | * @adap: the adapter | |
3815 | * @n: number of queues | |
3816 | * @q: pointer to first queue | |
3817 | * | |
3818 | * Release the resources of a consecutive block of offload Rx queues. | |
3819 | */ | |
3820 | void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) | |
3821 | { | |
3822 | for ( ; n; n--, q++) | |
3823 | if (q->rspq.desc) | |
3824 | free_rspq_fl(adap, &q->rspq, | |
3825 | q->fl.size ? &q->fl : NULL); | |
3826 | } | |
3827 | ||
fd3a4790 DM |
3828 | /** |
3829 | * t4_free_sge_resources - free SGE resources | |
3830 | * @adap: the adapter | |
3831 | * | |
3832 | * Frees resources used by the SGE queue sets. | |
3833 | */ | |
3834 | void t4_free_sge_resources(struct adapter *adap) | |
3835 | { | |
3836 | int i; | |
ebf4dc2b HS |
3837 | struct sge_eth_rxq *eq; |
3838 | struct sge_eth_txq *etq; | |
3839 | ||
3840 | /* stop all Rx queues in order to start them draining */ | |
3841 | for (i = 0; i < adap->sge.ethqsets; i++) { | |
3842 | eq = &adap->sge.ethrxq[i]; | |
3843 | if (eq->rspq.desc) | |
3844 | t4_iq_stop(adap, adap->mbox, adap->pf, 0, | |
3845 | FW_IQ_TYPE_FL_INT_CAP, | |
3846 | eq->rspq.cntxt_id, | |
3847 | eq->fl.size ? eq->fl.cntxt_id : 0xffff, | |
3848 | 0xffff); | |
3849 | } | |
fd3a4790 DM |
3850 | |
3851 | /* clean up Ethernet Tx/Rx queues */ | |
ebf4dc2b HS |
3852 | for (i = 0; i < adap->sge.ethqsets; i++) { |
3853 | eq = &adap->sge.ethrxq[i]; | |
fd3a4790 | 3854 | if (eq->rspq.desc) |
5fa76694 HS |
3855 | free_rspq_fl(adap, &eq->rspq, |
3856 | eq->fl.size ? &eq->fl : NULL); | |
ebf4dc2b HS |
3857 | |
3858 | etq = &adap->sge.ethtxq[i]; | |
fd3a4790 | 3859 | if (etq->q.desc) { |
b2612722 | 3860 | t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, |
060e0c75 | 3861 | etq->q.cntxt_id); |
fbe80776 | 3862 | __netif_tx_lock_bh(etq->txq); |
fd3a4790 | 3863 | free_tx_desc(adap, &etq->q, etq->q.in_use, true); |
fbe80776 | 3864 | __netif_tx_unlock_bh(etq->txq); |
fd3a4790 DM |
3865 | kfree(etq->q.sdesc); |
3866 | free_txq(adap, &etq->q); | |
3867 | } | |
3868 | } | |
3869 | ||
fd3a4790 DM |
3870 | /* clean up control Tx queues */ |
3871 | for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { | |
3872 | struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; | |
3873 | ||
3874 | if (cq->q.desc) { | |
3875 | tasklet_kill(&cq->qresume_tsk); | |
b2612722 | 3876 | t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, |
060e0c75 | 3877 | cq->q.cntxt_id); |
fd3a4790 DM |
3878 | __skb_queue_purge(&cq->sendq); |
3879 | free_txq(adap, &cq->q); | |
3880 | } | |
3881 | } | |
3882 | ||
3883 | if (adap->sge.fw_evtq.desc) | |
3884 | free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); | |
3885 | ||
3886 | if (adap->sge.intrq.desc) | |
3887 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | |
3888 | ||
a4569504 AG |
3889 | if (!is_t4(adap->params.chip)) { |
3890 | etq = &adap->sge.ptptxq; | |
3891 | if (etq->q.desc) { | |
3892 | t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, | |
3893 | etq->q.cntxt_id); | |
3894 | spin_lock_bh(&adap->ptp_lock); | |
3895 | free_tx_desc(adap, &etq->q, etq->q.in_use, true); | |
3896 | spin_unlock_bh(&adap->ptp_lock); | |
3897 | kfree(etq->q.sdesc); | |
3898 | free_txq(adap, &etq->q); | |
3899 | } | |
3900 | } | |
3901 | ||
fd3a4790 | 3902 | /* clear the reverse egress queue map */ |
4b8e27a8 HS |
3903 | memset(adap->sge.egr_map, 0, |
3904 | adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); | |
fd3a4790 DM |
3905 | } |
3906 | ||
3907 | void t4_sge_start(struct adapter *adap) | |
3908 | { | |
3909 | adap->sge.ethtxq_rover = 0; | |
3910 | mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); | |
3911 | mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); | |
3912 | } | |
3913 | ||
3914 | /** | |
3915 | * t4_sge_stop - disable SGE operation | |
3916 | * @adap: the adapter | |
3917 | * | |
3918 | * Stop tasklets and timers associated with the DMA engine. Note that | |
3919 | * this is effective only if measures have been taken to disable any HW | |
3920 | * events that may restart them. | |
3921 | */ | |
3922 | void t4_sge_stop(struct adapter *adap) | |
3923 | { | |
3924 | int i; | |
3925 | struct sge *s = &adap->sge; | |
3926 | ||
3927 | if (in_interrupt()) /* actions below require waiting */ | |
3928 | return; | |
3929 | ||
3930 | if (s->rx_timer.function) | |
3931 | del_timer_sync(&s->rx_timer); | |
3932 | if (s->tx_timer.function) | |
3933 | del_timer_sync(&s->tx_timer); | |
3934 | ||
ab677ff4 HS |
3935 | if (is_offload(adap)) { |
3936 | struct sge_uld_txq_info *txq_info; | |
3937 | ||
3938 | txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; | |
3939 | if (txq_info) { | |
3940 | struct sge_uld_txq *txq = txq_info->uldtxq; | |
fd3a4790 | 3941 | |
ab677ff4 HS |
3942 | for_each_ofldtxq(&adap->sge, i) { |
3943 | if (txq->q.desc) | |
3944 | tasklet_kill(&txq->qresume_tsk); | |
3945 | } | |
3946 | } | |
fd3a4790 | 3947 | } |
ab677ff4 HS |
3948 | |
3949 | if (is_pci_uld(adap)) { | |
3950 | struct sge_uld_txq_info *txq_info; | |
3951 | ||
3952 | txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; | |
3953 | if (txq_info) { | |
3954 | struct sge_uld_txq *txq = txq_info->uldtxq; | |
3955 | ||
3956 | for_each_ofldtxq(&adap->sge, i) { | |
3957 | if (txq->q.desc) | |
3958 | tasklet_kill(&txq->qresume_tsk); | |
3959 | } | |
3960 | } | |
3961 | } | |
3962 | ||
fd3a4790 DM |
3963 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { |
3964 | struct sge_ctrl_txq *cq = &s->ctrlq[i]; | |
3965 | ||
3966 | if (cq->q.desc) | |
3967 | tasklet_kill(&cq->qresume_tsk); | |
3968 | } | |
3969 | } | |
3970 | ||
3971 | /** | |
06640310 | 3972 | * t4_sge_init_soft - grab core SGE values needed by SGE code |
fd3a4790 DM |
3973 | * @adap: the adapter |
3974 | * | |
06640310 HS |
3975 | * We need to grab the SGE operating parameters that we need to have |
3976 | * in order to do our job and make sure we can live with them. | |
fd3a4790 | 3977 | */ |
52367a76 VP |
3978 | |
3979 | static int t4_sge_init_soft(struct adapter *adap) | |
fd3a4790 DM |
3980 | { |
3981 | struct sge *s = &adap->sge; | |
52367a76 VP |
3982 | u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; |
3983 | u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; | |
3984 | u32 ingress_rx_threshold; | |
fd3a4790 | 3985 | |
52367a76 VP |
3986 | /* |
3987 | * Verify that CPL messages are going to the Ingress Queue for | |
3988 | * process_responses() and that only packet data is going to the | |
3989 | * Free Lists. | |
3990 | */ | |
f612b815 HS |
3991 | if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != |
3992 | RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { | |
52367a76 VP |
3993 | dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); |
3994 | return -EINVAL; | |
3995 | } | |
3996 | ||
3997 | /* | |
3998 | * Validate the Host Buffer Register Array indices that we want to | |
3999 | * use ... | |
4000 | * | |
4001 | * XXX Note that we should really read through the Host Buffer Size | |
4002 | * XXX register array and find the indices of the Buffer Sizes which | |
4003 | * XXX meet our needs! | |
4004 | */ | |
4005 | #define READ_FL_BUF(x) \ | |
f612b815 | 4006 | t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) |
52367a76 VP |
4007 | |
4008 | fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); | |
4009 | fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); | |
4010 | fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); | |
4011 | fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); | |
4012 | ||
92ddcc7b KS |
4013 | /* We only bother using the Large Page logic if the Large Page Buffer |
4014 | * is larger than our Page Size Buffer. | |
4015 | */ | |
4016 | if (fl_large_pg <= fl_small_pg) | |
4017 | fl_large_pg = 0; | |
4018 | ||
52367a76 VP |
4019 | #undef READ_FL_BUF |
4020 | ||
92ddcc7b KS |
4021 | /* The Page Size Buffer must be exactly equal to our Page Size and the |
4022 | * Large Page Size Buffer should be 0 (per above) or a power of 2. | |
4023 | */ | |
52367a76 | 4024 | if (fl_small_pg != PAGE_SIZE || |
92ddcc7b | 4025 | (fl_large_pg & (fl_large_pg-1)) != 0) { |
52367a76 VP |
4026 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", |
4027 | fl_small_pg, fl_large_pg); | |
4028 | return -EINVAL; | |
4029 | } | |
4030 | if (fl_large_pg) | |
4031 | s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; | |
4032 | ||
4033 | if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || | |
4034 | fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { | |
4035 | dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", | |
4036 | fl_small_mtu, fl_large_mtu); | |
4037 | return -EINVAL; | |
4038 | } | |
4039 | ||
4040 | /* | |
4041 | * Retrieve our RX interrupt holdoff timer values and counter | |
4042 | * threshold values from the SGE parameters. | |
4043 | */ | |
f061de42 HS |
4044 | timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); |
4045 | timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); | |
4046 | timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); | |
52367a76 | 4047 | s->timer_val[0] = core_ticks_to_us(adap, |
f061de42 | 4048 | TIMERVALUE0_G(timer_value_0_and_1)); |
52367a76 | 4049 | s->timer_val[1] = core_ticks_to_us(adap, |
f061de42 | 4050 | TIMERVALUE1_G(timer_value_0_and_1)); |
52367a76 | 4051 | s->timer_val[2] = core_ticks_to_us(adap, |
f061de42 | 4052 | TIMERVALUE2_G(timer_value_2_and_3)); |
52367a76 | 4053 | s->timer_val[3] = core_ticks_to_us(adap, |
f061de42 | 4054 | TIMERVALUE3_G(timer_value_2_and_3)); |
52367a76 | 4055 | s->timer_val[4] = core_ticks_to_us(adap, |
f061de42 | 4056 | TIMERVALUE4_G(timer_value_4_and_5)); |
52367a76 | 4057 | s->timer_val[5] = core_ticks_to_us(adap, |
f061de42 | 4058 | TIMERVALUE5_G(timer_value_4_and_5)); |
52367a76 | 4059 | |
f612b815 HS |
4060 | ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); |
4061 | s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); | |
4062 | s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); | |
4063 | s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); | |
4064 | s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); | |
52367a76 VP |
4065 | |
4066 | return 0; | |
4067 | } | |
4068 | ||
06640310 HS |
4069 | /** |
4070 | * t4_sge_init - initialize SGE | |
4071 | * @adap: the adapter | |
4072 | * | |
4073 | * Perform low-level SGE code initialization needed every time after a | |
4074 | * chip reset. | |
4075 | */ | |
52367a76 VP |
4076 | int t4_sge_init(struct adapter *adap) |
4077 | { | |
4078 | struct sge *s = &adap->sge; | |
acac5962 | 4079 | u32 sge_control, sge_conm_ctrl; |
c2b955e0 | 4080 | int ret, egress_threshold; |
52367a76 VP |
4081 | |
4082 | /* | |
4083 | * Ingress Padding Boundary and Egress Status Page Size are set up by | |
4084 | * t4_fixup_host_params(). | |
4085 | */ | |
f612b815 HS |
4086 | sge_control = t4_read_reg(adap, SGE_CONTROL_A); |
4087 | s->pktshift = PKTSHIFT_G(sge_control); | |
4088 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; | |
ce8f407a | 4089 | |
acac5962 | 4090 | s->fl_align = t4_fl_pkt_align(adap); |
06640310 | 4091 | ret = t4_sge_init_soft(adap); |
52367a76 VP |
4092 | if (ret < 0) |
4093 | return ret; | |
4094 | ||
4095 | /* | |
4096 | * A FL with <= fl_starve_thres buffers is starving and a periodic | |
4097 | * timer will attempt to refill it. This needs to be larger than the | |
4098 | * SGE's Egress Congestion Threshold. If it isn't, then we can get | |
4099 | * stuck waiting for new packets while the SGE is waiting for us to | |
4100 | * give it more Free List entries. (Note that the SGE's Egress | |
c2b955e0 KS |
4101 | * Congestion Threshold is in units of 2 Free List pointers.) For T4, |
4102 | * there was only a single field to control this. For T5 there's the | |
4103 | * original field which now only applies to Unpacked Mode Free List | |
4104 | * buffers and a new field which only applies to Packed Mode Free List | |
4105 | * buffers. | |
52367a76 | 4106 | */ |
f612b815 | 4107 | sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); |
676d6a75 HS |
4108 | switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { |
4109 | case CHELSIO_T4: | |
f612b815 | 4110 | egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); |
676d6a75 HS |
4111 | break; |
4112 | case CHELSIO_T5: | |
f612b815 | 4113 | egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); |
676d6a75 HS |
4114 | break; |
4115 | case CHELSIO_T6: | |
4116 | egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); | |
4117 | break; | |
4118 | default: | |
4119 | dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", | |
4120 | CHELSIO_CHIP_VERSION(adap->params.chip)); | |
4121 | return -EINVAL; | |
4122 | } | |
c2b955e0 | 4123 | s->fl_starve_thres = 2*egress_threshold + 1; |
52367a76 | 4124 | |
a3bfb617 HS |
4125 | t4_idma_monitor_init(adap, &s->idma_monitor); |
4126 | ||
1ecc7b7a HS |
4127 | /* Set up timers used for recuring callbacks to process RX and TX |
4128 | * administrative tasks. | |
4129 | */ | |
0e23daeb KC |
4130 | timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); |
4131 | timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); | |
a3bfb617 | 4132 | |
fd3a4790 | 4133 | spin_lock_init(&s->intrq_lock); |
52367a76 VP |
4134 | |
4135 | return 0; | |
fd3a4790 | 4136 | } |