]>
Commit | Line | Data |
---|---|---|
8ceee660 | 1 | /**************************************************************************** |
f7a6d2c4 | 2 | * Driver for Solarflare network controllers and boards |
8ceee660 | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
f7a6d2c4 | 4 | * Copyright 2005-2013 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/pci.h> | |
12 | #include <linux/tcp.h> | |
13 | #include <linux/ip.h> | |
14 | #include <linux/in.h> | |
738a8f4b | 15 | #include <linux/ipv6.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
738a8f4b | 17 | #include <net/ipv6.h> |
8ceee660 BH |
18 | #include <linux/if_ether.h> |
19 | #include <linux/highmem.h> | |
183233be | 20 | #include <linux/cache.h> |
8ceee660 | 21 | #include "net_driver.h" |
8ceee660 | 22 | #include "efx.h" |
183233be | 23 | #include "io.h" |
744093c9 | 24 | #include "nic.h" |
e9117e50 | 25 | #include "tx.h" |
8ceee660 | 26 | #include "workarounds.h" |
dfa50be9 | 27 | #include "ef10_regs.h" |
8ceee660 | 28 | |
183233be BH |
29 | #ifdef EFX_USE_PIO |
30 | ||
183233be BH |
31 | #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) |
32 | unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; | |
33 | ||
34 | #endif /* EFX_USE_PIO */ | |
35 | ||
e9117e50 BK |
36 | static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, |
37 | struct efx_tx_buffer *buffer) | |
0fe5565b | 38 | { |
e9117e50 BK |
39 | unsigned int index = efx_tx_queue_get_insert_index(tx_queue); |
40 | struct efx_buffer *page_buf = | |
41 | &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; | |
42 | unsigned int offset = | |
43 | ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); | |
0fe5565b | 44 | |
e9117e50 BK |
45 | if (unlikely(!page_buf->addr) && |
46 | efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, | |
47 | GFP_ATOMIC)) | |
48 | return NULL; | |
49 | buffer->dma_addr = page_buf->dma_addr + offset; | |
50 | buffer->unmap_len = 0; | |
51 | return (u8 *)page_buf->addr + offset; | |
0fe5565b BH |
52 | } |
53 | ||
e9117e50 BK |
54 | u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, |
55 | struct efx_tx_buffer *buffer, size_t len) | |
0fe5565b | 56 | { |
e9117e50 BK |
57 | if (len > EFX_TX_CB_SIZE) |
58 | return NULL; | |
59 | return efx_tx_get_copy_buffer(tx_queue, buffer); | |
0fe5565b BH |
60 | } |
61 | ||
4d566063 | 62 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
c3940999 TH |
63 | struct efx_tx_buffer *buffer, |
64 | unsigned int *pkts_compl, | |
65 | unsigned int *bytes_compl) | |
8ceee660 BH |
66 | { |
67 | if (buffer->unmap_len) { | |
0e33d870 | 68 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
2acdb92e | 69 | dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; |
7668ff9c | 70 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
71 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
72 | DMA_TO_DEVICE); | |
8ceee660 | 73 | else |
0e33d870 BH |
74 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
75 | DMA_TO_DEVICE); | |
8ceee660 | 76 | buffer->unmap_len = 0; |
8ceee660 BH |
77 | } |
78 | ||
7668ff9c | 79 | if (buffer->flags & EFX_TX_BUF_SKB) { |
d4a7a889 | 80 | EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); |
c3940999 TH |
81 | (*pkts_compl)++; |
82 | (*bytes_compl) += buffer->skb->len; | |
4ef6dae4 | 83 | dev_consume_skb_any((struct sk_buff *)buffer->skb); |
62776d03 BH |
84 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
85 | "TX queue %d transmission id %x complete\n", | |
86 | tx_queue->queue, tx_queue->read_count); | |
8ceee660 | 87 | } |
7668ff9c | 88 | |
f7251a9c BH |
89 | buffer->len = 0; |
90 | buffer->flags = 0; | |
8ceee660 BH |
91 | } |
92 | ||
7e6d06f0 BH |
93 | unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) |
94 | { | |
95 | /* Header and payload descriptor for each output segment, plus | |
96 | * one for every input fragment boundary within a segment | |
97 | */ | |
98 | unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; | |
99 | ||
5a6681e2 EC |
100 | /* Possibly one more per segment for option descriptors */ |
101 | if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) | |
7e6d06f0 BH |
102 | max_descs += EFX_TSO_MAX_SEGS; |
103 | ||
104 | /* Possibly more for PCIe page boundaries within input fragments */ | |
105 | if (PAGE_SIZE > EFX_PAGE_SIZE) | |
106 | max_descs += max_t(unsigned int, MAX_SKB_FRAGS, | |
107 | DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); | |
108 | ||
109 | return max_descs; | |
110 | } | |
111 | ||
14bf718f BH |
112 | static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) |
113 | { | |
114 | /* We need to consider both queues that the net core sees as one */ | |
115 | struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); | |
116 | struct efx_nic *efx = txq1->efx; | |
117 | unsigned int fill_level; | |
118 | ||
119 | fill_level = max(txq1->insert_count - txq1->old_read_count, | |
120 | txq2->insert_count - txq2->old_read_count); | |
121 | if (likely(fill_level < efx->txq_stop_thresh)) | |
122 | return; | |
123 | ||
124 | /* We used the stale old_read_count above, which gives us a | |
125 | * pessimistic estimate of the fill level (which may even | |
126 | * validly be >= efx->txq_entries). Now try again using | |
127 | * read_count (more likely to be a cache miss). | |
128 | * | |
129 | * If we read read_count and then conditionally stop the | |
130 | * queue, it is possible for the completion path to race with | |
131 | * us and complete all outstanding descriptors in the middle, | |
132 | * after which there will be no more completions to wake it. | |
133 | * Therefore we stop the queue first, then read read_count | |
134 | * (with a memory barrier to ensure the ordering), then | |
135 | * restart the queue if the fill level turns out to be low | |
136 | * enough. | |
137 | */ | |
138 | netif_tx_stop_queue(txq1->core_txq); | |
139 | smp_mb(); | |
6aa7de05 MR |
140 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
141 | txq2->old_read_count = READ_ONCE(txq2->read_count); | |
14bf718f BH |
142 | |
143 | fill_level = max(txq1->insert_count - txq1->old_read_count, | |
144 | txq2->insert_count - txq2->old_read_count); | |
e01b16a7 | 145 | EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); |
14bf718f BH |
146 | if (likely(fill_level < efx->txq_stop_thresh)) { |
147 | smp_mb(); | |
148 | if (likely(!efx->loopback_selftest)) | |
149 | netif_tx_start_queue(txq1->core_txq); | |
150 | } | |
151 | } | |
152 | ||
e9117e50 BK |
153 | static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, |
154 | struct sk_buff *skb) | |
155 | { | |
e9117e50 BK |
156 | unsigned int copy_len = skb->len; |
157 | struct efx_tx_buffer *buffer; | |
158 | u8 *copy_buffer; | |
159 | int rc; | |
160 | ||
e01b16a7 | 161 | EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); |
e9117e50 BK |
162 | |
163 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); | |
164 | ||
165 | copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); | |
166 | if (unlikely(!copy_buffer)) | |
167 | return -ENOMEM; | |
168 | ||
169 | rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); | |
170 | EFX_WARN_ON_PARANOID(rc); | |
5a6681e2 | 171 | buffer->len = copy_len; |
e9117e50 BK |
172 | |
173 | buffer->skb = skb; | |
174 | buffer->flags = EFX_TX_BUF_SKB; | |
175 | ||
176 | ++tx_queue->insert_count; | |
177 | return rc; | |
178 | } | |
179 | ||
ee45fd92 JC |
180 | #ifdef EFX_USE_PIO |
181 | ||
182 | struct efx_short_copy_buffer { | |
183 | int used; | |
184 | u8 buf[L1_CACHE_BYTES]; | |
185 | }; | |
186 | ||
187 | /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. | |
188 | * Advances piobuf pointer. Leaves additional data in the copy buffer. | |
189 | */ | |
190 | static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, | |
191 | u8 *data, int len, | |
192 | struct efx_short_copy_buffer *copy_buf) | |
193 | { | |
194 | int block_len = len & ~(sizeof(copy_buf->buf) - 1); | |
195 | ||
4984c237 | 196 | __iowrite64_copy(*piobuf, data, block_len >> 3); |
ee45fd92 JC |
197 | *piobuf += block_len; |
198 | len -= block_len; | |
199 | ||
200 | if (len) { | |
201 | data += block_len; | |
202 | BUG_ON(copy_buf->used); | |
203 | BUG_ON(len > sizeof(copy_buf->buf)); | |
204 | memcpy(copy_buf->buf, data, len); | |
205 | copy_buf->used = len; | |
206 | } | |
207 | } | |
208 | ||
209 | /* Copy to PIO, respecting dword alignment, popping data from copy buffer first. | |
210 | * Advances piobuf pointer. Leaves additional data in the copy buffer. | |
211 | */ | |
212 | static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, | |
213 | u8 *data, int len, | |
214 | struct efx_short_copy_buffer *copy_buf) | |
215 | { | |
216 | if (copy_buf->used) { | |
217 | /* if the copy buffer is partially full, fill it up and write */ | |
218 | int copy_to_buf = | |
219 | min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); | |
220 | ||
221 | memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); | |
222 | copy_buf->used += copy_to_buf; | |
223 | ||
224 | /* if we didn't fill it up then we're done for now */ | |
225 | if (copy_buf->used < sizeof(copy_buf->buf)) | |
226 | return; | |
227 | ||
4984c237 BH |
228 | __iowrite64_copy(*piobuf, copy_buf->buf, |
229 | sizeof(copy_buf->buf) >> 3); | |
ee45fd92 JC |
230 | *piobuf += sizeof(copy_buf->buf); |
231 | data += copy_to_buf; | |
232 | len -= copy_to_buf; | |
233 | copy_buf->used = 0; | |
234 | } | |
235 | ||
236 | efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); | |
237 | } | |
238 | ||
239 | static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, | |
240 | struct efx_short_copy_buffer *copy_buf) | |
241 | { | |
242 | /* if there's anything in it, write the whole buffer, including junk */ | |
243 | if (copy_buf->used) | |
4984c237 BH |
244 | __iowrite64_copy(piobuf, copy_buf->buf, |
245 | sizeof(copy_buf->buf) >> 3); | |
ee45fd92 JC |
246 | } |
247 | ||
248 | /* Traverse skb structure and copy fragments in to PIO buffer. | |
249 | * Advances piobuf pointer. | |
250 | */ | |
251 | static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, | |
252 | u8 __iomem **piobuf, | |
253 | struct efx_short_copy_buffer *copy_buf) | |
254 | { | |
255 | int i; | |
256 | ||
257 | efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), | |
258 | copy_buf); | |
259 | ||
260 | for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { | |
261 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | |
262 | u8 *vaddr; | |
263 | ||
264 | vaddr = kmap_atomic(skb_frag_page(f)); | |
265 | ||
266 | efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, | |
267 | skb_frag_size(f), copy_buf); | |
268 | kunmap_atomic(vaddr); | |
269 | } | |
270 | ||
e01b16a7 | 271 | EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); |
ee45fd92 JC |
272 | } |
273 | ||
e9117e50 BK |
274 | static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, |
275 | struct sk_buff *skb) | |
ee45fd92 JC |
276 | { |
277 | struct efx_tx_buffer *buffer = | |
278 | efx_tx_queue_get_insert_buffer(tx_queue); | |
279 | u8 __iomem *piobuf = tx_queue->piobuf; | |
280 | ||
281 | /* Copy to PIO buffer. Ensure the writes are padded to the end | |
282 | * of a cache line, as this is required for write-combining to be | |
283 | * effective on at least x86. | |
284 | */ | |
285 | ||
286 | if (skb_shinfo(skb)->nr_frags) { | |
287 | /* The size of the copy buffer will ensure all writes | |
288 | * are the size of a cache line. | |
289 | */ | |
290 | struct efx_short_copy_buffer copy_buf; | |
291 | ||
292 | copy_buf.used = 0; | |
293 | ||
294 | efx_skb_copy_bits_to_pio(tx_queue->efx, skb, | |
295 | &piobuf, ©_buf); | |
296 | efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); | |
297 | } else { | |
298 | /* Pad the write to the size of a cache line. | |
e9117e50 | 299 | * We can do this because we know the skb_shared_info struct is |
ee45fd92 JC |
300 | * after the source, and the destination buffer is big enough. |
301 | */ | |
302 | BUILD_BUG_ON(L1_CACHE_BYTES > | |
303 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
4984c237 BH |
304 | __iowrite64_copy(tx_queue->piobuf, skb->data, |
305 | ALIGN(skb->len, L1_CACHE_BYTES) >> 3); | |
ee45fd92 JC |
306 | } |
307 | ||
e9117e50 BK |
308 | buffer->skb = skb; |
309 | buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; | |
310 | ||
ee45fd92 JC |
311 | EFX_POPULATE_QWORD_5(buffer->option, |
312 | ESF_DZ_TX_DESC_IS_OPT, 1, | |
313 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, | |
314 | ESF_DZ_TX_PIO_CONT, 0, | |
315 | ESF_DZ_TX_PIO_BYTE_CNT, skb->len, | |
316 | ESF_DZ_TX_PIO_BUF_ADDR, | |
317 | tx_queue->piobuf_offset); | |
ee45fd92 | 318 | ++tx_queue->insert_count; |
e9117e50 | 319 | return 0; |
ee45fd92 JC |
320 | } |
321 | #endif /* EFX_USE_PIO */ | |
322 | ||
e9117e50 BK |
323 | static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, |
324 | dma_addr_t dma_addr, | |
325 | size_t len) | |
8ceee660 | 326 | { |
e9117e50 | 327 | const struct efx_nic_type *nic_type = tx_queue->efx->type; |
8ceee660 | 328 | struct efx_tx_buffer *buffer; |
8ceee660 | 329 | unsigned int dma_len; |
8ceee660 | 330 | |
e9117e50 BK |
331 | /* Map the fragment taking account of NIC-dependent DMA limits. */ |
332 | do { | |
333 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); | |
334 | dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); | |
b9b39b62 | 335 | |
e9117e50 BK |
336 | buffer->len = dma_len; |
337 | buffer->dma_addr = dma_addr; | |
338 | buffer->flags = EFX_TX_BUF_CONT; | |
339 | len -= dma_len; | |
340 | dma_addr += dma_len; | |
341 | ++tx_queue->insert_count; | |
342 | } while (len); | |
8ceee660 | 343 | |
e9117e50 BK |
344 | return buffer; |
345 | } | |
bb145a9e | 346 | |
e9117e50 BK |
347 | /* Map all data from an SKB for DMA and create descriptors on the queue. |
348 | */ | |
349 | static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, | |
350 | unsigned int segment_count) | |
351 | { | |
352 | struct efx_nic *efx = tx_queue->efx; | |
353 | struct device *dma_dev = &efx->pci_dev->dev; | |
354 | unsigned int frag_index, nr_frags; | |
355 | dma_addr_t dma_addr, unmap_addr; | |
356 | unsigned short dma_flags; | |
357 | size_t len, unmap_len; | |
ee45fd92 | 358 | |
e9117e50 BK |
359 | nr_frags = skb_shinfo(skb)->nr_frags; |
360 | frag_index = 0; | |
8ceee660 | 361 | |
e9117e50 BK |
362 | /* Map header data. */ |
363 | len = skb_headlen(skb); | |
364 | dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); | |
365 | dma_flags = EFX_TX_BUF_MAP_SINGLE; | |
366 | unmap_len = len; | |
367 | unmap_addr = dma_addr; | |
8ceee660 | 368 | |
e9117e50 BK |
369 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
370 | return -EIO; | |
8ceee660 | 371 | |
e9117e50 BK |
372 | if (segment_count) { |
373 | /* For TSO we need to put the header in to a separate | |
374 | * descriptor. Map this separately if necessary. | |
375 | */ | |
376 | size_t header_len = skb_transport_header(skb) - skb->data + | |
377 | (tcp_hdr(skb)->doff << 2u); | |
378 | ||
379 | if (header_len != len) { | |
380 | tx_queue->tso_long_headers++; | |
381 | efx_tx_map_chunk(tx_queue, dma_addr, header_len); | |
382 | len -= header_len; | |
383 | dma_addr += header_len; | |
384 | } | |
385 | } | |
8ceee660 | 386 | |
e9117e50 BK |
387 | /* Add descriptors for each fragment. */ |
388 | do { | |
389 | struct efx_tx_buffer *buffer; | |
390 | skb_frag_t *fragment; | |
8ceee660 | 391 | |
e9117e50 | 392 | buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); |
8ceee660 | 393 | |
e9117e50 BK |
394 | /* The final descriptor for a fragment is responsible for |
395 | * unmapping the whole fragment. | |
396 | */ | |
7668ff9c | 397 | buffer->flags = EFX_TX_BUF_CONT | dma_flags; |
8ceee660 | 398 | buffer->unmap_len = unmap_len; |
2acdb92e | 399 | buffer->dma_offset = buffer->dma_addr - unmap_addr; |
8ceee660 | 400 | |
e9117e50 BK |
401 | if (frag_index >= nr_frags) { |
402 | /* Store SKB details with the final buffer for | |
403 | * the completion. | |
404 | */ | |
405 | buffer->skb = skb; | |
406 | buffer->flags = EFX_TX_BUF_SKB | dma_flags; | |
407 | return 0; | |
408 | } | |
409 | ||
410 | /* Move on to the next fragment. */ | |
411 | fragment = &skb_shinfo(skb)->frags[frag_index++]; | |
9e903e08 | 412 | len = skb_frag_size(fragment); |
e9117e50 BK |
413 | dma_addr = skb_frag_dma_map(dma_dev, fragment, |
414 | 0, len, DMA_TO_DEVICE); | |
7668ff9c | 415 | dma_flags = 0; |
e9117e50 BK |
416 | unmap_len = len; |
417 | unmap_addr = dma_addr; | |
418 | ||
419 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) | |
420 | return -EIO; | |
421 | } while (1); | |
422 | } | |
423 | ||
424 | /* Remove buffers put into a tx_queue. None of the buffers must have | |
425 | * an skb attached. | |
426 | */ | |
427 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |
428 | { | |
429 | struct efx_tx_buffer *buffer; | |
d4a7a889 BK |
430 | unsigned int bytes_compl = 0; |
431 | unsigned int pkts_compl = 0; | |
e9117e50 BK |
432 | |
433 | /* Work backwards until we hit the original insert pointer value */ | |
434 | while (tx_queue->insert_count != tx_queue->write_count) { | |
435 | --tx_queue->insert_count; | |
436 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); | |
d4a7a889 | 437 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 | 438 | } |
e9117e50 BK |
439 | } |
440 | ||
46d1efd8 EC |
441 | /* |
442 | * Fallback to software TSO. | |
443 | * | |
444 | * This is used if we are unable to send a GSO packet through hardware TSO. | |
445 | * This should only ever happen due to per-queue restrictions - unsupported | |
446 | * packets should first be filtered by the feature flags. | |
447 | * | |
448 | * Returns 0 on success, error code otherwise. | |
449 | */ | |
450 | static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, | |
451 | struct sk_buff *skb) | |
e9117e50 | 452 | { |
46d1efd8 EC |
453 | struct sk_buff *segments, *next; |
454 | ||
455 | segments = skb_gso_segment(skb, 0); | |
456 | if (IS_ERR(segments)) | |
457 | return PTR_ERR(segments); | |
458 | ||
459 | dev_kfree_skb_any(skb); | |
460 | skb = segments; | |
461 | ||
462 | while (skb) { | |
463 | next = skb->next; | |
464 | skb->next = NULL; | |
465 | ||
466 | if (next) | |
467 | skb->xmit_more = true; | |
468 | efx_enqueue_skb(tx_queue, skb); | |
469 | skb = next; | |
470 | } | |
471 | ||
472 | return 0; | |
e9117e50 BK |
473 | } |
474 | ||
475 | /* | |
476 | * Add a socket buffer to a TX queue | |
477 | * | |
478 | * This maps all fragments of a socket buffer for DMA and adds them to | |
479 | * the TX queue. The queue's insert pointer will be incremented by | |
480 | * the number of fragments in the socket buffer. | |
481 | * | |
482 | * If any DMA mapping fails, any mapped fragments will be unmapped, | |
483 | * the queue's insert pointer will be restored to its original value. | |
484 | * | |
485 | * This function is split out from efx_hard_start_xmit to allow the | |
486 | * loopback test to direct packets via specific TX queues. | |
487 | * | |
488 | * Returns NETDEV_TX_OK. | |
489 | * You must hold netif_tx_lock() to call this function. | |
490 | */ | |
491 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |
492 | { | |
493 | bool data_mapped = false; | |
494 | unsigned int segments; | |
495 | unsigned int skb_len; | |
46d1efd8 | 496 | int rc; |
8ceee660 | 497 | |
e9117e50 BK |
498 | skb_len = skb->len; |
499 | segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; | |
500 | if (segments == 1) | |
501 | segments = 0; /* Don't use TSO for a single segment. */ | |
502 | ||
503 | /* Handle TSO first - it's *possible* (although unlikely) that we might | |
504 | * be passed a packet to segment that's smaller than the copybreak/PIO | |
505 | * size limit. | |
506 | */ | |
507 | if (segments) { | |
e01b16a7 | 508 | EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso); |
46d1efd8 EC |
509 | rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped); |
510 | if (rc == -EINVAL) { | |
511 | rc = efx_tx_tso_fallback(tx_queue, skb); | |
512 | tx_queue->tso_fallbacks++; | |
513 | if (rc == 0) | |
514 | return 0; | |
515 | } | |
516 | if (rc) | |
e9117e50 | 517 | goto err; |
440b87ea | 518 | #ifdef EFX_USE_PIO |
e9117e50 BK |
519 | } else if (skb_len <= efx_piobuf_size && !skb->xmit_more && |
520 | efx_nic_may_tx_pio(tx_queue)) { | |
521 | /* Use PIO for short packets with an empty queue. */ | |
522 | if (efx_enqueue_skb_pio(tx_queue, skb)) | |
523 | goto err; | |
524 | tx_queue->pio_packets++; | |
525 | data_mapped = true; | |
440b87ea | 526 | #endif |
5a6681e2 | 527 | } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { |
e9117e50 BK |
528 | /* Pad short packets or coalesce short fragmented packets. */ |
529 | if (efx_enqueue_skb_copy(tx_queue, skb)) | |
530 | goto err; | |
531 | tx_queue->cb_packets++; | |
532 | data_mapped = true; | |
533 | } | |
8ceee660 | 534 | |
e9117e50 BK |
535 | /* Map for DMA and create descriptors if we haven't done so already. */ |
536 | if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) | |
537 | goto err; | |
c3940999 | 538 | |
e9117e50 BK |
539 | /* Update BQL */ |
540 | netdev_tx_sent_queue(tx_queue->core_txq, skb_len); | |
70b33fb0 | 541 | |
8ceee660 | 542 | /* Pass off to hardware */ |
b2663a4f MH |
543 | if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { |
544 | struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); | |
545 | ||
546 | /* There could be packets left on the partner queue if those | |
547 | * SKBs had skb->xmit_more set. If we do not push those they | |
548 | * could be left for a long time and cause a netdev watchdog. | |
549 | */ | |
550 | if (txq2->xmit_more_available) | |
551 | efx_nic_push_buffers(txq2); | |
552 | ||
70b33fb0 | 553 | efx_nic_push_buffers(tx_queue); |
b2663a4f MH |
554 | } else { |
555 | tx_queue->xmit_more_available = skb->xmit_more; | |
556 | } | |
8ceee660 | 557 | |
e9117e50 BK |
558 | if (segments) { |
559 | tx_queue->tso_bursts++; | |
560 | tx_queue->tso_packets += segments; | |
561 | tx_queue->tx_packets += segments; | |
562 | } else { | |
563 | tx_queue->tx_packets++; | |
564 | } | |
565 | ||
566 | efx_tx_maybe_stop_queue(tx_queue); | |
8ccf3800 | 567 | |
8ceee660 BH |
568 | return NETDEV_TX_OK; |
569 | ||
8ceee660 | 570 | |
e9117e50 BK |
571 | err: |
572 | efx_enqueue_unwind(tx_queue); | |
9bc183d7 | 573 | dev_kfree_skb_any(skb); |
14bf718f | 574 | return NETDEV_TX_OK; |
8ceee660 BH |
575 | } |
576 | ||
577 | /* Remove packets from the TX queue | |
578 | * | |
579 | * This removes packets from the TX queue, up to and including the | |
580 | * specified index. | |
581 | */ | |
4d566063 | 582 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
c3940999 TH |
583 | unsigned int index, |
584 | unsigned int *pkts_compl, | |
585 | unsigned int *bytes_compl) | |
8ceee660 BH |
586 | { |
587 | struct efx_nic *efx = tx_queue->efx; | |
588 | unsigned int stop_index, read_ptr; | |
8ceee660 | 589 | |
ecc910f5 SH |
590 | stop_index = (index + 1) & tx_queue->ptr_mask; |
591 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | |
8ceee660 BH |
592 | |
593 | while (read_ptr != stop_index) { | |
594 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | |
ba8977bd BH |
595 | |
596 | if (!(buffer->flags & EFX_TX_BUF_OPTION) && | |
597 | unlikely(buffer->len == 0)) { | |
62776d03 BH |
598 | netif_err(efx, tx_err, efx->net_dev, |
599 | "TX queue %d spurious TX completion id %x\n", | |
600 | tx_queue->queue, read_ptr); | |
8ceee660 BH |
601 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
602 | return; | |
603 | } | |
604 | ||
c3940999 | 605 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
8ceee660 BH |
606 | |
607 | ++tx_queue->read_count; | |
ecc910f5 | 608 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
8ceee660 BH |
609 | } |
610 | } | |
611 | ||
8ceee660 BH |
612 | /* Initiate a packet transmission. We use one channel per CPU |
613 | * (sharing when we have more CPUs than channels). On Falcon, the TX | |
614 | * completion events will be directed back to the CPU that transmitted | |
615 | * the packet, which should be cache-efficient. | |
616 | * | |
617 | * Context: non-blocking. | |
618 | * Note that returning anything other than NETDEV_TX_OK will cause the | |
619 | * OS to free the skb. | |
620 | */ | |
61357325 | 621 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
2d0cc56d | 622 | struct net_device *net_dev) |
8ceee660 | 623 | { |
767e468c | 624 | struct efx_nic *efx = netdev_priv(net_dev); |
60ac1065 | 625 | struct efx_tx_queue *tx_queue; |
94b274bf | 626 | unsigned index, type; |
60ac1065 | 627 | |
e4abce85 | 628 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); |
a7ef5933 | 629 | |
7c236c43 SH |
630 | /* PTP "event" packet */ |
631 | if (unlikely(efx_xmit_with_hwtstamp(skb)) && | |
632 | unlikely(efx_ptp_is_ptp_tx(efx, skb))) { | |
633 | return efx_ptp_tx(efx, skb); | |
634 | } | |
635 | ||
94b274bf BH |
636 | index = skb_get_queue_mapping(skb); |
637 | type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; | |
638 | if (index >= efx->n_tx_channels) { | |
639 | index -= efx->n_tx_channels; | |
640 | type |= EFX_TXQ_TYPE_HIGHPRI; | |
641 | } | |
642 | tx_queue = efx_get_tx_queue(efx, index, type); | |
60ac1065 | 643 | |
497f5ba3 | 644 | return efx_enqueue_skb(tx_queue, skb); |
8ceee660 BH |
645 | } |
646 | ||
60031fcc BH |
647 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) |
648 | { | |
94b274bf BH |
649 | struct efx_nic *efx = tx_queue->efx; |
650 | ||
60031fcc | 651 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ |
94b274bf BH |
652 | tx_queue->core_txq = |
653 | netdev_get_tx_queue(efx->net_dev, | |
654 | tx_queue->queue / EFX_TXQ_TYPES + | |
655 | ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | |
656 | efx->n_tx_channels : 0)); | |
657 | } | |
658 | ||
2572ac53 | 659 | int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, |
de4784ca | 660 | void *type_data) |
94b274bf BH |
661 | { |
662 | struct efx_nic *efx = netdev_priv(net_dev); | |
de4784ca | 663 | struct tc_mqprio_qopt *mqprio = type_data; |
94b274bf BH |
664 | struct efx_channel *channel; |
665 | struct efx_tx_queue *tx_queue; | |
16e5cc64 | 666 | unsigned tc, num_tc; |
94b274bf BH |
667 | int rc; |
668 | ||
575ed7d3 | 669 | if (type != TC_SETUP_QDISC_MQPRIO) |
38cf0426 | 670 | return -EOPNOTSUPP; |
e4c6734e | 671 | |
de4784ca | 672 | num_tc = mqprio->num_tc; |
16e5cc64 | 673 | |
5a6681e2 | 674 | if (num_tc > EFX_MAX_TX_TC) |
94b274bf BH |
675 | return -EINVAL; |
676 | ||
de4784ca | 677 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
56f36acd | 678 | |
94b274bf BH |
679 | if (num_tc == net_dev->num_tc) |
680 | return 0; | |
681 | ||
682 | for (tc = 0; tc < num_tc; tc++) { | |
683 | net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; | |
684 | net_dev->tc_to_txq[tc].count = efx->n_tx_channels; | |
685 | } | |
686 | ||
687 | if (num_tc > net_dev->num_tc) { | |
688 | /* Initialise high-priority queues as necessary */ | |
689 | efx_for_each_channel(channel, efx) { | |
690 | efx_for_each_possible_channel_tx_queue(tx_queue, | |
691 | channel) { | |
692 | if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) | |
693 | continue; | |
694 | if (!tx_queue->buffer) { | |
695 | rc = efx_probe_tx_queue(tx_queue); | |
696 | if (rc) | |
697 | return rc; | |
698 | } | |
699 | if (!tx_queue->initialised) | |
700 | efx_init_tx_queue(tx_queue); | |
701 | efx_init_tx_queue_core_txq(tx_queue); | |
702 | } | |
703 | } | |
704 | } else { | |
705 | /* Reduce number of classes before number of queues */ | |
706 | net_dev->num_tc = num_tc; | |
707 | } | |
708 | ||
709 | rc = netif_set_real_num_tx_queues(net_dev, | |
710 | max_t(int, num_tc, 1) * | |
711 | efx->n_tx_channels); | |
712 | if (rc) | |
713 | return rc; | |
714 | ||
715 | /* Do not destroy high-priority queues when they become | |
716 | * unused. We would have to flush them first, and it is | |
717 | * fairly difficult to flush a subset of TX queues. Leave | |
718 | * it to efx_fini_channels(). | |
719 | */ | |
720 | ||
721 | net_dev->num_tc = num_tc; | |
722 | return 0; | |
60031fcc BH |
723 | } |
724 | ||
8ceee660 BH |
725 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
726 | { | |
727 | unsigned fill_level; | |
728 | struct efx_nic *efx = tx_queue->efx; | |
14bf718f | 729 | struct efx_tx_queue *txq2; |
c3940999 | 730 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 731 | |
e01b16a7 | 732 | EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); |
8ceee660 | 733 | |
c3940999 | 734 | efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); |
c936835c PD |
735 | tx_queue->pkts_compl += pkts_compl; |
736 | tx_queue->bytes_compl += bytes_compl; | |
8ceee660 | 737 | |
02e12165 BH |
738 | if (pkts_compl > 1) |
739 | ++tx_queue->merge_events; | |
740 | ||
14bf718f BH |
741 | /* See if we need to restart the netif queue. This memory |
742 | * barrier ensures that we write read_count (inside | |
743 | * efx_dequeue_buffers()) before reading the queue status. | |
744 | */ | |
8ceee660 | 745 | smp_mb(); |
c04bfc6b | 746 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
9d1aea62 | 747 | likely(efx->port_enabled) && |
e4abce85 | 748 | likely(netif_device_present(efx->net_dev))) { |
14bf718f BH |
749 | txq2 = efx_tx_queue_partner(tx_queue); |
750 | fill_level = max(tx_queue->insert_count - tx_queue->read_count, | |
751 | txq2->insert_count - txq2->read_count); | |
752 | if (fill_level <= efx->txq_wake_thresh) | |
c04bfc6b | 753 | netif_tx_wake_queue(tx_queue->core_txq); |
8ceee660 | 754 | } |
cd38557d BH |
755 | |
756 | /* Check whether the hardware queue is now empty */ | |
757 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | |
6aa7de05 | 758 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
cd38557d BH |
759 | if (tx_queue->read_count == tx_queue->old_write_count) { |
760 | smp_mb(); | |
761 | tx_queue->empty_read_count = | |
762 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; | |
763 | } | |
764 | } | |
8ceee660 BH |
765 | } |
766 | ||
e9117e50 | 767 | static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) |
f7251a9c | 768 | { |
e9117e50 | 769 | return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER); |
f7251a9c BH |
770 | } |
771 | ||
8ceee660 BH |
772 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
773 | { | |
774 | struct efx_nic *efx = tx_queue->efx; | |
ecc910f5 | 775 | unsigned int entries; |
7668ff9c | 776 | int rc; |
8ceee660 | 777 | |
ecc910f5 SH |
778 | /* Create the smallest power-of-two aligned ring */ |
779 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); | |
e01b16a7 | 780 | EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); |
ecc910f5 SH |
781 | tx_queue->ptr_mask = entries - 1; |
782 | ||
783 | netif_dbg(efx, probe, efx->net_dev, | |
784 | "creating TX queue %d size %#x mask %#x\n", | |
785 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | |
8ceee660 BH |
786 | |
787 | /* Allocate software ring */ | |
c2e4e25a | 788 | tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), |
ecc910f5 | 789 | GFP_KERNEL); |
60ac1065 BH |
790 | if (!tx_queue->buffer) |
791 | return -ENOMEM; | |
8ceee660 | 792 | |
e9117e50 BK |
793 | tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), |
794 | sizeof(tx_queue->cb_page[0]), GFP_KERNEL); | |
795 | if (!tx_queue->cb_page) { | |
796 | rc = -ENOMEM; | |
797 | goto fail1; | |
f7251a9c BH |
798 | } |
799 | ||
8ceee660 | 800 | /* Allocate hardware ring */ |
152b6a62 | 801 | rc = efx_nic_probe_tx(tx_queue); |
8ceee660 | 802 | if (rc) |
f7251a9c | 803 | goto fail2; |
8ceee660 BH |
804 | |
805 | return 0; | |
806 | ||
f7251a9c | 807 | fail2: |
e9117e50 BK |
808 | kfree(tx_queue->cb_page); |
809 | tx_queue->cb_page = NULL; | |
f7251a9c | 810 | fail1: |
8ceee660 BH |
811 | kfree(tx_queue->buffer); |
812 | tx_queue->buffer = NULL; | |
8ceee660 BH |
813 | return rc; |
814 | } | |
815 | ||
bc3c90a2 | 816 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 | 817 | { |
e9117e50 BK |
818 | struct efx_nic *efx = tx_queue->efx; |
819 | ||
820 | netif_dbg(efx, drv, efx->net_dev, | |
62776d03 | 821 | "initialising TX queue %d\n", tx_queue->queue); |
8ceee660 BH |
822 | |
823 | tx_queue->insert_count = 0; | |
824 | tx_queue->write_count = 0; | |
de1deff9 | 825 | tx_queue->packet_write_count = 0; |
cd38557d | 826 | tx_queue->old_write_count = 0; |
8ceee660 BH |
827 | tx_queue->read_count = 0; |
828 | tx_queue->old_read_count = 0; | |
cd38557d | 829 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
b2663a4f | 830 | tx_queue->xmit_more_available = false; |
8ceee660 | 831 | |
e9117e50 BK |
832 | /* Set up default function pointers. These may get replaced by |
833 | * efx_nic_init_tx() based off NIC/queue capabilities. | |
834 | */ | |
46d1efd8 | 835 | tx_queue->handle_tso = efx_enqueue_skb_tso; |
e9117e50 | 836 | |
8ceee660 | 837 | /* Set up TX descriptor ring */ |
152b6a62 | 838 | efx_nic_init_tx(tx_queue); |
94b274bf BH |
839 | |
840 | tx_queue->initialised = true; | |
8ceee660 BH |
841 | } |
842 | ||
e42c3d85 | 843 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 BH |
844 | { |
845 | struct efx_tx_buffer *buffer; | |
846 | ||
e42c3d85 BH |
847 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
848 | "shutting down TX queue %d\n", tx_queue->queue); | |
849 | ||
8ceee660 BH |
850 | if (!tx_queue->buffer) |
851 | return; | |
852 | ||
853 | /* Free any buffers left in the ring */ | |
854 | while (tx_queue->read_count != tx_queue->write_count) { | |
c3940999 | 855 | unsigned int pkts_compl = 0, bytes_compl = 0; |
ecc910f5 | 856 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
c3940999 | 857 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 BH |
858 | |
859 | ++tx_queue->read_count; | |
860 | } | |
b2663a4f | 861 | tx_queue->xmit_more_available = false; |
c3940999 | 862 | netdev_tx_reset_queue(tx_queue->core_txq); |
8ceee660 BH |
863 | } |
864 | ||
8ceee660 BH |
865 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
866 | { | |
f7251a9c BH |
867 | int i; |
868 | ||
94b274bf BH |
869 | if (!tx_queue->buffer) |
870 | return; | |
871 | ||
62776d03 BH |
872 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
873 | "destroying TX queue %d\n", tx_queue->queue); | |
152b6a62 | 874 | efx_nic_remove_tx(tx_queue); |
8ceee660 | 875 | |
e9117e50 BK |
876 | if (tx_queue->cb_page) { |
877 | for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) | |
f7251a9c | 878 | efx_nic_free_buffer(tx_queue->efx, |
e9117e50 BK |
879 | &tx_queue->cb_page[i]); |
880 | kfree(tx_queue->cb_page); | |
881 | tx_queue->cb_page = NULL; | |
f7251a9c BH |
882 | } |
883 | ||
8ceee660 BH |
884 | kfree(tx_queue->buffer); |
885 | tx_queue->buffer = NULL; | |
8ceee660 | 886 | } |