1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
26 #include "workarounds.h"
27 #include "ef10_regs.h"
31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32 unsigned int efx_piobuf_size __read_mostly
= EFX_PIOBUF_SIZE_DEF
;
34 #endif /* EFX_USE_PIO */
36 static inline u8
*efx_tx_get_copy_buffer(struct efx_tx_queue
*tx_queue
,
37 struct efx_tx_buffer
*buffer
)
39 unsigned int index
= efx_tx_queue_get_insert_index(tx_queue
);
40 struct efx_buffer
*page_buf
=
41 &tx_queue
->cb_page
[index
>> (PAGE_SHIFT
- EFX_TX_CB_ORDER
)];
43 ((index
<< EFX_TX_CB_ORDER
) + NET_IP_ALIGN
) & (PAGE_SIZE
- 1);
45 if (unlikely(!page_buf
->addr
) &&
46 efx_nic_alloc_buffer(tx_queue
->efx
, page_buf
, PAGE_SIZE
,
49 buffer
->dma_addr
= page_buf
->dma_addr
+ offset
;
50 buffer
->unmap_len
= 0;
51 return (u8
*)page_buf
->addr
+ offset
;
54 u8
*efx_tx_get_copy_buffer_limited(struct efx_tx_queue
*tx_queue
,
55 struct efx_tx_buffer
*buffer
, size_t len
)
57 if (len
> EFX_TX_CB_SIZE
)
59 return efx_tx_get_copy_buffer(tx_queue
, buffer
);
62 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
63 struct efx_tx_buffer
*buffer
,
64 unsigned int *pkts_compl
,
65 unsigned int *bytes_compl
)
67 if (buffer
->unmap_len
) {
68 struct device
*dma_dev
= &tx_queue
->efx
->pci_dev
->dev
;
69 dma_addr_t unmap_addr
= buffer
->dma_addr
- buffer
->dma_offset
;
70 if (buffer
->flags
& EFX_TX_BUF_MAP_SINGLE
)
71 dma_unmap_single(dma_dev
, unmap_addr
, buffer
->unmap_len
,
74 dma_unmap_page(dma_dev
, unmap_addr
, buffer
->unmap_len
,
76 buffer
->unmap_len
= 0;
79 if (buffer
->flags
& EFX_TX_BUF_SKB
) {
80 EFX_WARN_ON_PARANOID(!pkts_compl
|| !bytes_compl
);
82 (*bytes_compl
) += buffer
->skb
->len
;
83 dev_consume_skb_any((struct sk_buff
*)buffer
->skb
);
84 netif_vdbg(tx_queue
->efx
, tx_done
, tx_queue
->efx
->net_dev
,
85 "TX queue %d transmission id %x complete\n",
86 tx_queue
->queue
, tx_queue
->read_count
);
93 unsigned int efx_tx_max_skb_descs(struct efx_nic
*efx
)
95 /* Header and payload descriptor for each output segment, plus
96 * one for every input fragment boundary within a segment
98 unsigned int max_descs
= EFX_TSO_MAX_SEGS
* 2 + MAX_SKB_FRAGS
;
100 /* Possibly one more per segment for option descriptors */
101 if (efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
)
102 max_descs
+= EFX_TSO_MAX_SEGS
;
104 /* Possibly more for PCIe page boundaries within input fragments */
105 if (PAGE_SIZE
> EFX_PAGE_SIZE
)
106 max_descs
+= max_t(unsigned int, MAX_SKB_FRAGS
,
107 DIV_ROUND_UP(GSO_MAX_SIZE
, EFX_PAGE_SIZE
));
112 static void efx_tx_maybe_stop_queue(struct efx_tx_queue
*txq1
)
114 /* We need to consider both queues that the net core sees as one */
115 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(txq1
);
116 struct efx_nic
*efx
= txq1
->efx
;
117 unsigned int fill_level
;
119 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
120 txq2
->insert_count
- txq2
->old_read_count
);
121 if (likely(fill_level
< efx
->txq_stop_thresh
))
124 /* We used the stale old_read_count above, which gives us a
125 * pessimistic estimate of the fill level (which may even
126 * validly be >= efx->txq_entries). Now try again using
127 * read_count (more likely to be a cache miss).
129 * If we read read_count and then conditionally stop the
130 * queue, it is possible for the completion path to race with
131 * us and complete all outstanding descriptors in the middle,
132 * after which there will be no more completions to wake it.
133 * Therefore we stop the queue first, then read read_count
134 * (with a memory barrier to ensure the ordering), then
135 * restart the queue if the fill level turns out to be low
138 netif_tx_stop_queue(txq1
->core_txq
);
140 txq1
->old_read_count
= READ_ONCE(txq1
->read_count
);
141 txq2
->old_read_count
= READ_ONCE(txq2
->read_count
);
143 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
144 txq2
->insert_count
- txq2
->old_read_count
);
145 EFX_WARN_ON_ONCE_PARANOID(fill_level
>= efx
->txq_entries
);
146 if (likely(fill_level
< efx
->txq_stop_thresh
)) {
148 if (likely(!efx
->loopback_selftest
))
149 netif_tx_start_queue(txq1
->core_txq
);
153 static int efx_enqueue_skb_copy(struct efx_tx_queue
*tx_queue
,
156 unsigned int copy_len
= skb
->len
;
157 struct efx_tx_buffer
*buffer
;
161 EFX_WARN_ON_ONCE_PARANOID(copy_len
> EFX_TX_CB_SIZE
);
163 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
165 copy_buffer
= efx_tx_get_copy_buffer(tx_queue
, buffer
);
166 if (unlikely(!copy_buffer
))
169 rc
= skb_copy_bits(skb
, 0, copy_buffer
, copy_len
);
170 EFX_WARN_ON_PARANOID(rc
);
171 buffer
->len
= copy_len
;
174 buffer
->flags
= EFX_TX_BUF_SKB
;
176 ++tx_queue
->insert_count
;
182 struct efx_short_copy_buffer
{
184 u8 buf
[L1_CACHE_BYTES
];
187 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
188 * Advances piobuf pointer. Leaves additional data in the copy buffer.
190 static void efx_memcpy_toio_aligned(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
192 struct efx_short_copy_buffer
*copy_buf
)
194 int block_len
= len
& ~(sizeof(copy_buf
->buf
) - 1);
196 __iowrite64_copy(*piobuf
, data
, block_len
>> 3);
197 *piobuf
+= block_len
;
202 BUG_ON(copy_buf
->used
);
203 BUG_ON(len
> sizeof(copy_buf
->buf
));
204 memcpy(copy_buf
->buf
, data
, len
);
205 copy_buf
->used
= len
;
209 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
210 * Advances piobuf pointer. Leaves additional data in the copy buffer.
212 static void efx_memcpy_toio_aligned_cb(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
214 struct efx_short_copy_buffer
*copy_buf
)
216 if (copy_buf
->used
) {
217 /* if the copy buffer is partially full, fill it up and write */
219 min_t(int, sizeof(copy_buf
->buf
) - copy_buf
->used
, len
);
221 memcpy(copy_buf
->buf
+ copy_buf
->used
, data
, copy_to_buf
);
222 copy_buf
->used
+= copy_to_buf
;
224 /* if we didn't fill it up then we're done for now */
225 if (copy_buf
->used
< sizeof(copy_buf
->buf
))
228 __iowrite64_copy(*piobuf
, copy_buf
->buf
,
229 sizeof(copy_buf
->buf
) >> 3);
230 *piobuf
+= sizeof(copy_buf
->buf
);
236 efx_memcpy_toio_aligned(efx
, piobuf
, data
, len
, copy_buf
);
239 static void efx_flush_copy_buffer(struct efx_nic
*efx
, u8 __iomem
*piobuf
,
240 struct efx_short_copy_buffer
*copy_buf
)
242 /* if there's anything in it, write the whole buffer, including junk */
244 __iowrite64_copy(piobuf
, copy_buf
->buf
,
245 sizeof(copy_buf
->buf
) >> 3);
248 /* Traverse skb structure and copy fragments in to PIO buffer.
249 * Advances piobuf pointer.
251 static void efx_skb_copy_bits_to_pio(struct efx_nic
*efx
, struct sk_buff
*skb
,
253 struct efx_short_copy_buffer
*copy_buf
)
257 efx_memcpy_toio_aligned(efx
, piobuf
, skb
->data
, skb_headlen(skb
),
260 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
261 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
264 vaddr
= kmap_atomic(skb_frag_page(f
));
266 efx_memcpy_toio_aligned_cb(efx
, piobuf
, vaddr
+ f
->page_offset
,
267 skb_frag_size(f
), copy_buf
);
268 kunmap_atomic(vaddr
);
271 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb
)->frag_list
);
274 static int efx_enqueue_skb_pio(struct efx_tx_queue
*tx_queue
,
277 struct efx_tx_buffer
*buffer
=
278 efx_tx_queue_get_insert_buffer(tx_queue
);
279 u8 __iomem
*piobuf
= tx_queue
->piobuf
;
281 /* Copy to PIO buffer. Ensure the writes are padded to the end
282 * of a cache line, as this is required for write-combining to be
283 * effective on at least x86.
286 if (skb_shinfo(skb
)->nr_frags
) {
287 /* The size of the copy buffer will ensure all writes
288 * are the size of a cache line.
290 struct efx_short_copy_buffer copy_buf
;
294 efx_skb_copy_bits_to_pio(tx_queue
->efx
, skb
,
296 efx_flush_copy_buffer(tx_queue
->efx
, piobuf
, ©_buf
);
298 /* Pad the write to the size of a cache line.
299 * We can do this because we know the skb_shared_info struct is
300 * after the source, and the destination buffer is big enough.
302 BUILD_BUG_ON(L1_CACHE_BYTES
>
303 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
304 __iowrite64_copy(tx_queue
->piobuf
, skb
->data
,
305 ALIGN(skb
->len
, L1_CACHE_BYTES
) >> 3);
309 buffer
->flags
= EFX_TX_BUF_SKB
| EFX_TX_BUF_OPTION
;
311 EFX_POPULATE_QWORD_5(buffer
->option
,
312 ESF_DZ_TX_DESC_IS_OPT
, 1,
313 ESF_DZ_TX_OPTION_TYPE
, ESE_DZ_TX_OPTION_DESC_PIO
,
314 ESF_DZ_TX_PIO_CONT
, 0,
315 ESF_DZ_TX_PIO_BYTE_CNT
, skb
->len
,
316 ESF_DZ_TX_PIO_BUF_ADDR
,
317 tx_queue
->piobuf_offset
);
318 ++tx_queue
->insert_count
;
321 #endif /* EFX_USE_PIO */
323 static struct efx_tx_buffer
*efx_tx_map_chunk(struct efx_tx_queue
*tx_queue
,
327 const struct efx_nic_type
*nic_type
= tx_queue
->efx
->type
;
328 struct efx_tx_buffer
*buffer
;
329 unsigned int dma_len
;
331 /* Map the fragment taking account of NIC-dependent DMA limits. */
333 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
334 dma_len
= nic_type
->tx_limit_len(tx_queue
, dma_addr
, len
);
336 buffer
->len
= dma_len
;
337 buffer
->dma_addr
= dma_addr
;
338 buffer
->flags
= EFX_TX_BUF_CONT
;
341 ++tx_queue
->insert_count
;
347 /* Map all data from an SKB for DMA and create descriptors on the queue.
349 static int efx_tx_map_data(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
,
350 unsigned int segment_count
)
352 struct efx_nic
*efx
= tx_queue
->efx
;
353 struct device
*dma_dev
= &efx
->pci_dev
->dev
;
354 unsigned int frag_index
, nr_frags
;
355 dma_addr_t dma_addr
, unmap_addr
;
356 unsigned short dma_flags
;
357 size_t len
, unmap_len
;
359 nr_frags
= skb_shinfo(skb
)->nr_frags
;
362 /* Map header data. */
363 len
= skb_headlen(skb
);
364 dma_addr
= dma_map_single(dma_dev
, skb
->data
, len
, DMA_TO_DEVICE
);
365 dma_flags
= EFX_TX_BUF_MAP_SINGLE
;
367 unmap_addr
= dma_addr
;
369 if (unlikely(dma_mapping_error(dma_dev
, dma_addr
)))
373 /* For TSO we need to put the header in to a separate
374 * descriptor. Map this separately if necessary.
376 size_t header_len
= skb_transport_header(skb
) - skb
->data
+
377 (tcp_hdr(skb
)->doff
<< 2u);
379 if (header_len
!= len
) {
380 tx_queue
->tso_long_headers
++;
381 efx_tx_map_chunk(tx_queue
, dma_addr
, header_len
);
383 dma_addr
+= header_len
;
387 /* Add descriptors for each fragment. */
389 struct efx_tx_buffer
*buffer
;
390 skb_frag_t
*fragment
;
392 buffer
= efx_tx_map_chunk(tx_queue
, dma_addr
, len
);
394 /* The final descriptor for a fragment is responsible for
395 * unmapping the whole fragment.
397 buffer
->flags
= EFX_TX_BUF_CONT
| dma_flags
;
398 buffer
->unmap_len
= unmap_len
;
399 buffer
->dma_offset
= buffer
->dma_addr
- unmap_addr
;
401 if (frag_index
>= nr_frags
) {
402 /* Store SKB details with the final buffer for
406 buffer
->flags
= EFX_TX_BUF_SKB
| dma_flags
;
410 /* Move on to the next fragment. */
411 fragment
= &skb_shinfo(skb
)->frags
[frag_index
++];
412 len
= skb_frag_size(fragment
);
413 dma_addr
= skb_frag_dma_map(dma_dev
, fragment
,
414 0, len
, DMA_TO_DEVICE
);
417 unmap_addr
= dma_addr
;
419 if (unlikely(dma_mapping_error(dma_dev
, dma_addr
)))
424 /* Remove buffers put into a tx_queue. None of the buffers must have
427 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
429 struct efx_tx_buffer
*buffer
;
430 unsigned int bytes_compl
= 0;
431 unsigned int pkts_compl
= 0;
433 /* Work backwards until we hit the original insert pointer value */
434 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
435 --tx_queue
->insert_count
;
436 buffer
= __efx_tx_queue_get_insert_buffer(tx_queue
);
437 efx_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
442 * Fallback to software TSO.
444 * This is used if we are unable to send a GSO packet through hardware TSO.
445 * This should only ever happen due to per-queue restrictions - unsupported
446 * packets should first be filtered by the feature flags.
448 * Returns 0 on success, error code otherwise.
450 static int efx_tx_tso_fallback(struct efx_tx_queue
*tx_queue
,
453 struct sk_buff
*segments
, *next
;
455 segments
= skb_gso_segment(skb
, 0);
456 if (IS_ERR(segments
))
457 return PTR_ERR(segments
);
459 dev_kfree_skb_any(skb
);
467 skb
->xmit_more
= true;
468 efx_enqueue_skb(tx_queue
, skb
);
476 * Add a socket buffer to a TX queue
478 * This maps all fragments of a socket buffer for DMA and adds them to
479 * the TX queue. The queue's insert pointer will be incremented by
480 * the number of fragments in the socket buffer.
482 * If any DMA mapping fails, any mapped fragments will be unmapped,
483 * the queue's insert pointer will be restored to its original value.
485 * This function is split out from efx_hard_start_xmit to allow the
486 * loopback test to direct packets via specific TX queues.
488 * Returns NETDEV_TX_OK.
489 * You must hold netif_tx_lock() to call this function.
491 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
493 bool data_mapped
= false;
494 unsigned int segments
;
495 unsigned int skb_len
;
499 segments
= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 0;
501 segments
= 0; /* Don't use TSO for a single segment. */
503 /* Handle TSO first - it's *possible* (although unlikely) that we might
504 * be passed a packet to segment that's smaller than the copybreak/PIO
508 EFX_WARN_ON_ONCE_PARANOID(!tx_queue
->handle_tso
);
509 rc
= tx_queue
->handle_tso(tx_queue
, skb
, &data_mapped
);
511 rc
= efx_tx_tso_fallback(tx_queue
, skb
);
512 tx_queue
->tso_fallbacks
++;
519 } else if (skb_len
<= efx_piobuf_size
&& !skb
->xmit_more
&&
520 efx_nic_may_tx_pio(tx_queue
)) {
521 /* Use PIO for short packets with an empty queue. */
522 if (efx_enqueue_skb_pio(tx_queue
, skb
))
524 tx_queue
->pio_packets
++;
527 } else if (skb
->data_len
&& skb_len
<= EFX_TX_CB_SIZE
) {
528 /* Pad short packets or coalesce short fragmented packets. */
529 if (efx_enqueue_skb_copy(tx_queue
, skb
))
531 tx_queue
->cb_packets
++;
535 /* Map for DMA and create descriptors if we haven't done so already. */
536 if (!data_mapped
&& (efx_tx_map_data(tx_queue
, skb
, segments
)))
540 netdev_tx_sent_queue(tx_queue
->core_txq
, skb_len
);
542 /* Pass off to hardware */
543 if (!skb
->xmit_more
|| netif_xmit_stopped(tx_queue
->core_txq
)) {
544 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(tx_queue
);
546 /* There could be packets left on the partner queue if those
547 * SKBs had skb->xmit_more set. If we do not push those they
548 * could be left for a long time and cause a netdev watchdog.
550 if (txq2
->xmit_more_available
)
551 efx_nic_push_buffers(txq2
);
553 efx_nic_push_buffers(tx_queue
);
555 tx_queue
->xmit_more_available
= skb
->xmit_more
;
559 tx_queue
->tso_bursts
++;
560 tx_queue
->tso_packets
+= segments
;
561 tx_queue
->tx_packets
+= segments
;
563 tx_queue
->tx_packets
++;
566 efx_tx_maybe_stop_queue(tx_queue
);
572 efx_enqueue_unwind(tx_queue
);
573 dev_kfree_skb_any(skb
);
577 /* Remove packets from the TX queue
579 * This removes packets from the TX queue, up to and including the
582 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
584 unsigned int *pkts_compl
,
585 unsigned int *bytes_compl
)
587 struct efx_nic
*efx
= tx_queue
->efx
;
588 unsigned int stop_index
, read_ptr
;
590 stop_index
= (index
+ 1) & tx_queue
->ptr_mask
;
591 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
593 while (read_ptr
!= stop_index
) {
594 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
596 if (!(buffer
->flags
& EFX_TX_BUF_OPTION
) &&
597 unlikely(buffer
->len
== 0)) {
598 netif_err(efx
, tx_err
, efx
->net_dev
,
599 "TX queue %d spurious TX completion id %x\n",
600 tx_queue
->queue
, read_ptr
);
601 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
605 efx_dequeue_buffer(tx_queue
, buffer
, pkts_compl
, bytes_compl
);
607 ++tx_queue
->read_count
;
608 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
612 /* Initiate a packet transmission. We use one channel per CPU
613 * (sharing when we have more CPUs than channels). On Falcon, the TX
614 * completion events will be directed back to the CPU that transmitted
615 * the packet, which should be cache-efficient.
617 * Context: non-blocking.
618 * Note that returning anything other than NETDEV_TX_OK will cause the
619 * OS to free the skb.
621 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
622 struct net_device
*net_dev
)
624 struct efx_nic
*efx
= netdev_priv(net_dev
);
625 struct efx_tx_queue
*tx_queue
;
626 unsigned index
, type
;
628 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev
));
630 /* PTP "event" packet */
631 if (unlikely(efx_xmit_with_hwtstamp(skb
)) &&
632 unlikely(efx_ptp_is_ptp_tx(efx
, skb
))) {
633 return efx_ptp_tx(efx
, skb
);
636 index
= skb_get_queue_mapping(skb
);
637 type
= skb
->ip_summed
== CHECKSUM_PARTIAL
? EFX_TXQ_TYPE_OFFLOAD
: 0;
638 if (index
>= efx
->n_tx_channels
) {
639 index
-= efx
->n_tx_channels
;
640 type
|= EFX_TXQ_TYPE_HIGHPRI
;
642 tx_queue
= efx_get_tx_queue(efx
, index
, type
);
644 return efx_enqueue_skb(tx_queue
, skb
);
647 void efx_init_tx_queue_core_txq(struct efx_tx_queue
*tx_queue
)
649 struct efx_nic
*efx
= tx_queue
->efx
;
651 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
653 netdev_get_tx_queue(efx
->net_dev
,
654 tx_queue
->queue
/ EFX_TXQ_TYPES
+
655 ((tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
656 efx
->n_tx_channels
: 0));
659 int efx_setup_tc(struct net_device
*net_dev
, enum tc_setup_type type
,
662 struct efx_nic
*efx
= netdev_priv(net_dev
);
663 struct tc_mqprio_qopt
*mqprio
= type_data
;
664 struct efx_channel
*channel
;
665 struct efx_tx_queue
*tx_queue
;
669 if (type
!= TC_SETUP_QDISC_MQPRIO
)
672 num_tc
= mqprio
->num_tc
;
674 if (num_tc
> EFX_MAX_TX_TC
)
677 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
679 if (num_tc
== net_dev
->num_tc
)
682 for (tc
= 0; tc
< num_tc
; tc
++) {
683 net_dev
->tc_to_txq
[tc
].offset
= tc
* efx
->n_tx_channels
;
684 net_dev
->tc_to_txq
[tc
].count
= efx
->n_tx_channels
;
687 if (num_tc
> net_dev
->num_tc
) {
688 /* Initialise high-priority queues as necessary */
689 efx_for_each_channel(channel
, efx
) {
690 efx_for_each_possible_channel_tx_queue(tx_queue
,
692 if (!(tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
))
694 if (!tx_queue
->buffer
) {
695 rc
= efx_probe_tx_queue(tx_queue
);
699 if (!tx_queue
->initialised
)
700 efx_init_tx_queue(tx_queue
);
701 efx_init_tx_queue_core_txq(tx_queue
);
705 /* Reduce number of classes before number of queues */
706 net_dev
->num_tc
= num_tc
;
709 rc
= netif_set_real_num_tx_queues(net_dev
,
710 max_t(int, num_tc
, 1) *
715 /* Do not destroy high-priority queues when they become
716 * unused. We would have to flush them first, and it is
717 * fairly difficult to flush a subset of TX queues. Leave
718 * it to efx_fini_channels().
721 net_dev
->num_tc
= num_tc
;
725 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
728 struct efx_nic
*efx
= tx_queue
->efx
;
729 struct efx_tx_queue
*txq2
;
730 unsigned int pkts_compl
= 0, bytes_compl
= 0;
732 EFX_WARN_ON_ONCE_PARANOID(index
> tx_queue
->ptr_mask
);
734 efx_dequeue_buffers(tx_queue
, index
, &pkts_compl
, &bytes_compl
);
735 tx_queue
->pkts_compl
+= pkts_compl
;
736 tx_queue
->bytes_compl
+= bytes_compl
;
739 ++tx_queue
->merge_events
;
741 /* See if we need to restart the netif queue. This memory
742 * barrier ensures that we write read_count (inside
743 * efx_dequeue_buffers()) before reading the queue status.
746 if (unlikely(netif_tx_queue_stopped(tx_queue
->core_txq
)) &&
747 likely(efx
->port_enabled
) &&
748 likely(netif_device_present(efx
->net_dev
))) {
749 txq2
= efx_tx_queue_partner(tx_queue
);
750 fill_level
= max(tx_queue
->insert_count
- tx_queue
->read_count
,
751 txq2
->insert_count
- txq2
->read_count
);
752 if (fill_level
<= efx
->txq_wake_thresh
)
753 netif_tx_wake_queue(tx_queue
->core_txq
);
756 /* Check whether the hardware queue is now empty */
757 if ((int)(tx_queue
->read_count
- tx_queue
->old_write_count
) >= 0) {
758 tx_queue
->old_write_count
= READ_ONCE(tx_queue
->write_count
);
759 if (tx_queue
->read_count
== tx_queue
->old_write_count
) {
761 tx_queue
->empty_read_count
=
762 tx_queue
->read_count
| EFX_EMPTY_COUNT_VALID
;
767 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue
*tx_queue
)
769 return DIV_ROUND_UP(tx_queue
->ptr_mask
+ 1, PAGE_SIZE
>> EFX_TX_CB_ORDER
);
772 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
774 struct efx_nic
*efx
= tx_queue
->efx
;
775 unsigned int entries
;
778 /* Create the smallest power-of-two aligned ring */
779 entries
= max(roundup_pow_of_two(efx
->txq_entries
), EFX_MIN_DMAQ_SIZE
);
780 EFX_WARN_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
781 tx_queue
->ptr_mask
= entries
- 1;
783 netif_dbg(efx
, probe
, efx
->net_dev
,
784 "creating TX queue %d size %#x mask %#x\n",
785 tx_queue
->queue
, efx
->txq_entries
, tx_queue
->ptr_mask
);
787 /* Allocate software ring */
788 tx_queue
->buffer
= kcalloc(entries
, sizeof(*tx_queue
->buffer
),
790 if (!tx_queue
->buffer
)
793 tx_queue
->cb_page
= kcalloc(efx_tx_cb_page_count(tx_queue
),
794 sizeof(tx_queue
->cb_page
[0]), GFP_KERNEL
);
795 if (!tx_queue
->cb_page
) {
800 /* Allocate hardware ring */
801 rc
= efx_nic_probe_tx(tx_queue
);
808 kfree(tx_queue
->cb_page
);
809 tx_queue
->cb_page
= NULL
;
811 kfree(tx_queue
->buffer
);
812 tx_queue
->buffer
= NULL
;
816 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
818 struct efx_nic
*efx
= tx_queue
->efx
;
820 netif_dbg(efx
, drv
, efx
->net_dev
,
821 "initialising TX queue %d\n", tx_queue
->queue
);
823 tx_queue
->insert_count
= 0;
824 tx_queue
->write_count
= 0;
825 tx_queue
->packet_write_count
= 0;
826 tx_queue
->old_write_count
= 0;
827 tx_queue
->read_count
= 0;
828 tx_queue
->old_read_count
= 0;
829 tx_queue
->empty_read_count
= 0 | EFX_EMPTY_COUNT_VALID
;
830 tx_queue
->xmit_more_available
= false;
832 /* Set up default function pointers. These may get replaced by
833 * efx_nic_init_tx() based off NIC/queue capabilities.
835 tx_queue
->handle_tso
= efx_enqueue_skb_tso
;
837 /* Set up TX descriptor ring */
838 efx_nic_init_tx(tx_queue
);
840 tx_queue
->initialised
= true;
843 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
845 struct efx_tx_buffer
*buffer
;
847 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
848 "shutting down TX queue %d\n", tx_queue
->queue
);
850 if (!tx_queue
->buffer
)
853 /* Free any buffers left in the ring */
854 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
855 unsigned int pkts_compl
= 0, bytes_compl
= 0;
856 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& tx_queue
->ptr_mask
];
857 efx_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
859 ++tx_queue
->read_count
;
861 tx_queue
->xmit_more_available
= false;
862 netdev_tx_reset_queue(tx_queue
->core_txq
);
865 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
869 if (!tx_queue
->buffer
)
872 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
873 "destroying TX queue %d\n", tx_queue
->queue
);
874 efx_nic_remove_tx(tx_queue
);
876 if (tx_queue
->cb_page
) {
877 for (i
= 0; i
< efx_tx_cb_page_count(tx_queue
); i
++)
878 efx_nic_free_buffer(tx_queue
->efx
,
879 &tx_queue
->cb_page
[i
]);
880 kfree(tx_queue
->cb_page
);
881 tx_queue
->cb_page
= NULL
;
884 kfree(tx_queue
->buffer
);
885 tx_queue
->buffer
= NULL
;