1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64
build_ctob(u32 td_cmd
, u32 td_offset
, unsigned int size
,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA
|
36 ((u64
)td_cmd
<< I40E_TXD_QW1_CMD_SHIFT
) |
37 ((u64
)td_offset
<< I40E_TXD_QW1_OFFSET_SHIFT
) |
38 ((u64
)size
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
) |
39 ((u64
)td_tag
<< I40E_TXD_QW1_L2TAG1_SHIFT
));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter
*fdir_data
, u8
*raw_packet
,
52 struct i40e_pf
*pf
, bool add
)
54 struct i40e_filter_program_desc
*fdir_desc
;
55 struct i40e_tx_buffer
*tx_buf
, *first
;
56 struct i40e_tx_desc
*tx_desc
;
57 struct i40e_ring
*tx_ring
;
58 unsigned int fpt
, dcc
;
66 /* find existing FDIR VSI */
68 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
69 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
)
74 tx_ring
= vsi
->tx_rings
[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring
) > 1)
81 msleep_interruptible(1);
83 } while (delay
< I40E_FD_CLEAN_DELAY
);
85 if (!(I40E_DESC_UNUSED(tx_ring
) > 1))
88 dma
= dma_map_single(dev
, raw_packet
,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE
, DMA_TO_DEVICE
);
90 if (dma_mapping_error(dev
, dma
))
93 /* grab the next descriptor */
94 i
= tx_ring
->next_to_use
;
95 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
96 first
= &tx_ring
->tx_bi
[i
];
97 memset(first
, 0, sizeof(struct i40e_tx_buffer
));
99 tx_ring
->next_to_use
= ((i
+ 1) < tx_ring
->count
) ? i
+ 1 : 0;
101 fpt
= (fdir_data
->q_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
104 fpt
|= (fdir_data
->flex_off
<< I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT
) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK
;
107 fpt
|= (fdir_data
->pctype
<< I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK
;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data
->dest_vsi
== 0)
112 fpt
|= (pf
->vsi
[pf
->lan_vsi
]->id
) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
115 fpt
|= ((u32
)fdir_data
->dest_vsi
<<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK
;
119 dcc
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
122 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
125 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
128 dcc
|= (fdir_data
->dest_ctl
<< I40E_TXD_FLTR_QW1_DEST_SHIFT
) &
129 I40E_TXD_FLTR_QW1_DEST_MASK
;
131 dcc
|= (fdir_data
->fd_status
<< I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK
;
134 if (fdir_data
->cnt_index
!= 0) {
135 dcc
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
136 dcc
|= ((u32
)fdir_data
->cnt_index
<<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
141 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(fpt
);
142 fdir_desc
->rsvd
= cpu_to_le32(0);
143 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dcc
);
144 fdir_desc
->fd_id
= cpu_to_le32(fdir_data
->fd_id
);
146 /* Now program a dummy descriptor */
147 i
= tx_ring
->next_to_use
;
148 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
149 tx_buf
= &tx_ring
->tx_bi
[i
];
151 tx_ring
->next_to_use
= ((i
+ 1) < tx_ring
->count
) ? i
+ 1 : 0;
153 memset(tx_buf
, 0, sizeof(struct i40e_tx_buffer
));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf
, len
, I40E_FDIR_MAX_RAW_PACKET_SIZE
);
157 dma_unmap_addr_set(tx_buf
, dma
, dma
);
159 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
160 td_cmd
= I40E_TXD_CMD
| I40E_TX_DESC_CMD_DUMMY
;
162 tx_buf
->tx_flags
= I40E_TX_FLAGS_FD_SB
;
163 tx_buf
->raw_buf
= (void *)raw_packet
;
165 tx_desc
->cmd_type_offset_bsz
=
166 build_ctob(td_cmd
, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE
, 0);
168 /* set the timestamp */
169 tx_buf
->time_stamp
= jiffies
;
171 /* Force memory writes to complete before letting h/w
172 * know there are new descriptors to fetch.
176 /* Mark the data descriptor to be watched */
177 first
->next_to_watch
= tx_desc
;
179 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
186 #define IP_HEADER_OFFSET 14
187 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
189 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
190 * @vsi: pointer to the targeted VSI
191 * @fd_data: the flow director data required for the FDir descriptor
192 * @add: true adds a filter, false removes it
194 * Returns 0 if the filters were successfully added or removed
196 static int i40e_add_del_fdir_udpv4(struct i40e_vsi
*vsi
,
197 struct i40e_fdir_filter
*fd_data
,
200 struct i40e_pf
*pf
= vsi
->back
;
206 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
207 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
208 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
210 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
213 memcpy(raw_packet
, packet
, I40E_UDPIP_DUMMY_PACKET_LEN
);
215 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
216 udp
= (struct udphdr
*)(raw_packet
+ IP_HEADER_OFFSET
217 + sizeof(struct iphdr
));
219 ip
->daddr
= fd_data
->dst_ip
[0];
220 udp
->dest
= fd_data
->dst_port
;
221 ip
->saddr
= fd_data
->src_ip
[0];
222 udp
->source
= fd_data
->src_port
;
224 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_UDP
;
225 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
227 dev_info(&pf
->pdev
->dev
,
228 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
229 fd_data
->pctype
, fd_data
->fd_id
, ret
);
231 } else if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
) {
233 dev_info(&pf
->pdev
->dev
,
234 "Filter OK for PCTYPE %d loc = %d\n",
235 fd_data
->pctype
, fd_data
->fd_id
);
237 dev_info(&pf
->pdev
->dev
,
238 "Filter deleted for PCTYPE %d loc = %d\n",
239 fd_data
->pctype
, fd_data
->fd_id
);
241 return err
? -EOPNOTSUPP
: 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi
*vsi
,
254 struct i40e_fdir_filter
*fd_data
,
257 struct i40e_pf
*pf
= vsi
->back
;
264 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
272 memcpy(raw_packet
, packet
, I40E_TCPIP_DUMMY_PACKET_LEN
);
274 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
275 tcp
= (struct tcphdr
*)(raw_packet
+ IP_HEADER_OFFSET
276 + sizeof(struct iphdr
));
278 ip
->daddr
= fd_data
->dst_ip
[0];
279 tcp
->dest
= fd_data
->dst_port
;
280 ip
->saddr
= fd_data
->src_ip
[0];
281 tcp
->source
= fd_data
->src_port
;
285 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
286 dev_info(&pf
->pdev
->dev
, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
287 pf
->flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
290 pf
->fd_tcp_rule
= (pf
->fd_tcp_rule
> 0) ?
291 (pf
->fd_tcp_rule
- 1) : 0;
292 if (pf
->fd_tcp_rule
== 0) {
293 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
294 dev_info(&pf
->pdev
->dev
, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
298 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_TCP
;
299 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
302 dev_info(&pf
->pdev
->dev
,
303 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
304 fd_data
->pctype
, fd_data
->fd_id
, ret
);
306 } else if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
) {
308 dev_info(&pf
->pdev
->dev
, "Filter OK for PCTYPE %d loc = %d)\n",
309 fd_data
->pctype
, fd_data
->fd_id
);
311 dev_info(&pf
->pdev
->dev
,
312 "Filter deleted for PCTYPE %d loc = %d\n",
313 fd_data
->pctype
, fd_data
->fd_id
);
316 return err
? -EOPNOTSUPP
: 0;
320 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
321 * a specific flow spec
322 * @vsi: pointer to the targeted VSI
323 * @fd_data: the flow director data required for the FDir descriptor
324 * @add: true adds a filter, false removes it
326 * Always returns -EOPNOTSUPP
328 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi
*vsi
,
329 struct i40e_fdir_filter
*fd_data
,
335 #define I40E_IP_DUMMY_PACKET_LEN 34
337 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
338 * a specific flow spec
339 * @vsi: pointer to the targeted VSI
340 * @fd_data: the flow director data required for the FDir descriptor
341 * @add: true adds a filter, false removes it
343 * Returns 0 if the filters were successfully added or removed
345 static int i40e_add_del_fdir_ipv4(struct i40e_vsi
*vsi
,
346 struct i40e_fdir_filter
*fd_data
,
349 struct i40e_pf
*pf
= vsi
->back
;
355 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
356 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
359 for (i
= I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
;
360 i
<= I40E_FILTER_PCTYPE_FRAG_IPV4
; i
++) {
361 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
364 memcpy(raw_packet
, packet
, I40E_IP_DUMMY_PACKET_LEN
);
365 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
367 ip
->saddr
= fd_data
->src_ip
[0];
368 ip
->daddr
= fd_data
->dst_ip
[0];
372 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
375 dev_info(&pf
->pdev
->dev
,
376 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
377 fd_data
->pctype
, fd_data
->fd_id
, ret
);
379 } else if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
) {
381 dev_info(&pf
->pdev
->dev
,
382 "Filter OK for PCTYPE %d loc = %d\n",
383 fd_data
->pctype
, fd_data
->fd_id
);
385 dev_info(&pf
->pdev
->dev
,
386 "Filter deleted for PCTYPE %d loc = %d\n",
387 fd_data
->pctype
, fd_data
->fd_id
);
391 return err
? -EOPNOTSUPP
: 0;
395 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
396 * @vsi: pointer to the targeted VSI
397 * @cmd: command to get or set RX flow classification rules
398 * @add: true adds a filter, false removes it
401 int i40e_add_del_fdir(struct i40e_vsi
*vsi
,
402 struct i40e_fdir_filter
*input
, bool add
)
404 struct i40e_pf
*pf
= vsi
->back
;
407 switch (input
->flow_type
& ~FLOW_EXT
) {
409 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
, add
);
412 ret
= i40e_add_del_fdir_udpv4(vsi
, input
, add
);
415 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
, add
);
418 ret
= i40e_add_del_fdir_ipv4(vsi
, input
, add
);
421 switch (input
->ip4_proto
) {
423 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
, add
);
426 ret
= i40e_add_del_fdir_udpv4(vsi
, input
, add
);
429 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
, add
);
432 ret
= i40e_add_del_fdir_ipv4(vsi
, input
, add
);
437 dev_info(&pf
->pdev
->dev
, "Could not specify spec type %d\n",
442 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
447 * i40e_fd_handle_status - check the Programming Status for FD
448 * @rx_ring: the Rx ring for this descriptor
449 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
450 * @prog_id: the id originally used for programming
452 * This is used to verify if the FD programming or invalidation
453 * requested by SW to the HW is successful or not and take actions accordingly.
455 static void i40e_fd_handle_status(struct i40e_ring
*rx_ring
,
456 union i40e_rx_desc
*rx_desc
, u8 prog_id
)
458 struct i40e_pf
*pf
= rx_ring
->vsi
->back
;
459 struct pci_dev
*pdev
= pf
->pdev
;
460 u32 fcnt_prog
, fcnt_avail
;
464 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
465 error
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK
) >>
466 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT
;
468 if (error
== (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT
)) {
469 if ((rx_desc
->wb
.qword0
.hi_dword
.fd_id
!= 0) ||
470 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
471 dev_warn(&pdev
->dev
, "ntuple filter loc = %d, could not be added\n",
472 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
474 /* Check if the programming error is for ATR.
475 * If so, auto disable ATR and set a state for
476 * flush in progress. Next time we come here if flush is in
477 * progress do nothing, once flush is complete the state will
480 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
))
484 /* store the current atr filter count */
485 pf
->fd_atr_cnt
= i40e_get_current_atr_cnt(pf
);
487 if ((rx_desc
->wb
.qword0
.hi_dword
.fd_id
== 0) &&
488 (pf
->auto_disable_flags
& I40E_FLAG_FD_SB_ENABLED
)) {
489 pf
->auto_disable_flags
|= I40E_FLAG_FD_ATR_ENABLED
;
490 set_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
);
493 /* filter programming failed most likely due to table full */
494 fcnt_prog
= i40e_get_global_fd_count(pf
);
495 fcnt_avail
= pf
->fdir_pf_filter_count
;
496 /* If ATR is running fcnt_prog can quickly change,
497 * if we are very close to full, it makes sense to disable
498 * FD ATR/SB and then re-enable it when there is room.
500 if (fcnt_prog
>= (fcnt_avail
- I40E_FDIR_BUFFER_FULL_MARGIN
)) {
501 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
502 !(pf
->auto_disable_flags
&
503 I40E_FLAG_FD_SB_ENABLED
)) {
504 dev_warn(&pdev
->dev
, "FD filter space full, new ntuple rules will not be added\n");
505 pf
->auto_disable_flags
|=
506 I40E_FLAG_FD_SB_ENABLED
;
510 "FD filter programming failed due to incorrect filter parameters\n");
513 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT
)) {
514 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
515 dev_info(&pdev
->dev
, "ntuple filter fd_id = %d, could not be removed\n",
516 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
521 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
522 * @ring: the ring that owns the buffer
523 * @tx_buffer: the buffer to free
525 static void i40e_unmap_and_free_tx_resource(struct i40e_ring
*ring
,
526 struct i40e_tx_buffer
*tx_buffer
)
528 if (tx_buffer
->skb
) {
529 if (tx_buffer
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
530 kfree(tx_buffer
->raw_buf
);
532 dev_kfree_skb_any(tx_buffer
->skb
);
534 if (dma_unmap_len(tx_buffer
, len
))
535 dma_unmap_single(ring
->dev
,
536 dma_unmap_addr(tx_buffer
, dma
),
537 dma_unmap_len(tx_buffer
, len
),
539 } else if (dma_unmap_len(tx_buffer
, len
)) {
540 dma_unmap_page(ring
->dev
,
541 dma_unmap_addr(tx_buffer
, dma
),
542 dma_unmap_len(tx_buffer
, len
),
545 tx_buffer
->next_to_watch
= NULL
;
546 tx_buffer
->skb
= NULL
;
547 dma_unmap_len_set(tx_buffer
, len
, 0);
548 /* tx_buffer must be completely set up in the transmit path */
552 * i40e_clean_tx_ring - Free any empty Tx buffers
553 * @tx_ring: ring to be cleaned
555 void i40e_clean_tx_ring(struct i40e_ring
*tx_ring
)
557 unsigned long bi_size
;
560 /* ring already cleared, nothing to do */
564 /* Free all the Tx ring sk_buffs */
565 for (i
= 0; i
< tx_ring
->count
; i
++)
566 i40e_unmap_and_free_tx_resource(tx_ring
, &tx_ring
->tx_bi
[i
]);
568 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
569 memset(tx_ring
->tx_bi
, 0, bi_size
);
571 /* Zero out the descriptor ring */
572 memset(tx_ring
->desc
, 0, tx_ring
->size
);
574 tx_ring
->next_to_use
= 0;
575 tx_ring
->next_to_clean
= 0;
577 if (!tx_ring
->netdev
)
580 /* cleanup Tx queue statistics */
581 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
582 tx_ring
->queue_index
));
586 * i40e_free_tx_resources - Free Tx resources per queue
587 * @tx_ring: Tx descriptor ring for a specific queue
589 * Free all transmit software resources
591 void i40e_free_tx_resources(struct i40e_ring
*tx_ring
)
593 i40e_clean_tx_ring(tx_ring
);
594 kfree(tx_ring
->tx_bi
);
595 tx_ring
->tx_bi
= NULL
;
598 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
599 tx_ring
->desc
, tx_ring
->dma
);
600 tx_ring
->desc
= NULL
;
605 * i40e_get_head - Retrieve head from head writeback
606 * @tx_ring: tx ring to fetch head of
608 * Returns value of Tx ring head based on value stored
609 * in head write-back location
611 static inline u32
i40e_get_head(struct i40e_ring
*tx_ring
)
613 void *head
= (struct i40e_tx_desc
*)tx_ring
->desc
+ tx_ring
->count
;
615 return le32_to_cpu(*(volatile __le32
*)head
);
619 * i40e_get_tx_pending - how many tx descriptors not processed
620 * @tx_ring: the ring of descriptors
622 * Since there is no access to the ring head register
623 * in XL710, we need to use our local copies
625 static u32
i40e_get_tx_pending(struct i40e_ring
*ring
)
629 head
= i40e_get_head(ring
);
630 tail
= readl(ring
->tail
);
633 return (head
< tail
) ?
634 tail
- head
: (tail
+ ring
->count
- head
);
640 * i40e_check_tx_hang - Is there a hang in the Tx queue
641 * @tx_ring: the ring of descriptors
643 static bool i40e_check_tx_hang(struct i40e_ring
*tx_ring
)
645 u32 tx_done
= tx_ring
->stats
.packets
;
646 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
647 u32 tx_pending
= i40e_get_tx_pending(tx_ring
);
648 struct i40e_pf
*pf
= tx_ring
->vsi
->back
;
651 clear_check_for_tx_hang(tx_ring
);
653 /* Check for a hung queue, but be thorough. This verifies
654 * that a transmit has been completed since the previous
655 * check AND there is at least one packet pending. The
656 * ARMED bit is set to indicate a potential hang. The
657 * bit is cleared if a pause frame is received to remove
658 * false hang detection due to PFC or 802.3x frames. By
659 * requiring this to fail twice we avoid races with
660 * PFC clearing the ARMED bit and conditions where we
661 * run the check_tx_hang logic with a transmit completion
662 * pending but without time to complete it yet.
664 if ((tx_done_old
== tx_done
) && tx_pending
) {
665 /* make sure it is true for two checks in a row */
666 ret
= test_and_set_bit(__I40E_HANG_CHECK_ARMED
,
668 } else if (tx_done_old
== tx_done
&&
669 (tx_pending
< I40E_MIN_DESC_PENDING
) && (tx_pending
> 0)) {
670 if (I40E_DEBUG_FLOW
& pf
->hw
.debug_mask
)
671 dev_info(tx_ring
->dev
, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
672 tx_pending
, tx_ring
->queue_index
);
673 pf
->tx_sluggish_count
++;
675 /* update completed stats and disarm the hang check */
676 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
677 clear_bit(__I40E_HANG_CHECK_ARMED
, &tx_ring
->state
);
683 #define WB_STRIDE 0x3
686 * i40e_clean_tx_irq - Reclaim resources after transmit completes
687 * @tx_ring: tx ring to clean
688 * @budget: how many cleans we're allowed
690 * Returns true if there's any budget left (e.g. the clean is finished)
692 static bool i40e_clean_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
694 u16 i
= tx_ring
->next_to_clean
;
695 struct i40e_tx_buffer
*tx_buf
;
696 struct i40e_tx_desc
*tx_head
;
697 struct i40e_tx_desc
*tx_desc
;
698 unsigned int total_packets
= 0;
699 unsigned int total_bytes
= 0;
701 tx_buf
= &tx_ring
->tx_bi
[i
];
702 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
705 tx_head
= I40E_TX_DESC(tx_ring
, i40e_get_head(tx_ring
));
708 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
710 /* if next_to_watch is not set then there is no work pending */
714 /* prevent any other reads prior to eop_desc */
715 read_barrier_depends();
717 /* we have caught up to head, no work left to do */
718 if (tx_head
== tx_desc
)
721 /* clear next_to_watch to prevent false hangs */
722 tx_buf
->next_to_watch
= NULL
;
724 /* update the statistics for this packet */
725 total_bytes
+= tx_buf
->bytecount
;
726 total_packets
+= tx_buf
->gso_segs
;
729 dev_consume_skb_any(tx_buf
->skb
);
731 /* unmap skb header data */
732 dma_unmap_single(tx_ring
->dev
,
733 dma_unmap_addr(tx_buf
, dma
),
734 dma_unmap_len(tx_buf
, len
),
737 /* clear tx_buffer data */
739 dma_unmap_len_set(tx_buf
, len
, 0);
741 /* unmap remaining buffers */
742 while (tx_desc
!= eop_desc
) {
749 tx_buf
= tx_ring
->tx_bi
;
750 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
753 /* unmap any remaining paged data */
754 if (dma_unmap_len(tx_buf
, len
)) {
755 dma_unmap_page(tx_ring
->dev
,
756 dma_unmap_addr(tx_buf
, dma
),
757 dma_unmap_len(tx_buf
, len
),
759 dma_unmap_len_set(tx_buf
, len
, 0);
763 /* move us one more past the eop_desc for start of next pkt */
769 tx_buf
= tx_ring
->tx_bi
;
770 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
775 /* update budget accounting */
777 } while (likely(budget
));
780 tx_ring
->next_to_clean
= i
;
781 u64_stats_update_begin(&tx_ring
->syncp
);
782 tx_ring
->stats
.bytes
+= total_bytes
;
783 tx_ring
->stats
.packets
+= total_packets
;
784 u64_stats_update_end(&tx_ring
->syncp
);
785 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
786 tx_ring
->q_vector
->tx
.total_packets
+= total_packets
;
788 /* check to see if there are any non-cache aligned descriptors
789 * waiting to be written back, and kick the hardware to force
790 * them to be written back in case of napi polling
793 !((i
& WB_STRIDE
) == WB_STRIDE
) &&
794 !test_bit(__I40E_DOWN
, &tx_ring
->vsi
->state
) &&
795 (I40E_DESC_UNUSED(tx_ring
) != tx_ring
->count
))
796 tx_ring
->arm_wb
= true;
798 tx_ring
->arm_wb
= false;
800 if (check_for_tx_hang(tx_ring
) && i40e_check_tx_hang(tx_ring
)) {
801 /* schedule immediate reset if we believe we hung */
802 dev_info(tx_ring
->dev
, "Detected Tx Unit Hang\n"
805 " next_to_use <%x>\n"
806 " next_to_clean <%x>\n",
808 tx_ring
->queue_index
,
809 tx_ring
->next_to_use
, i
);
810 dev_info(tx_ring
->dev
, "tx_bi[next_to_clean]\n"
811 " time_stamp <%lx>\n"
813 tx_ring
->tx_bi
[i
].time_stamp
, jiffies
);
815 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
817 dev_info(tx_ring
->dev
,
818 "tx hang detected on queue %d, reset requested\n",
819 tx_ring
->queue_index
);
821 /* do not fire the reset immediately, wait for the stack to
822 * decide we are truly stuck, also prevents every queue from
823 * simultaneously requesting a reset
826 /* the adapter is about to reset, no point in enabling polling */
830 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring
->netdev
,
831 tx_ring
->queue_index
),
832 total_packets
, total_bytes
);
834 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
835 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
836 (I40E_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
837 /* Make sure that anybody stopping the queue after this
838 * sees the new next_to_clean.
841 if (__netif_subqueue_stopped(tx_ring
->netdev
,
842 tx_ring
->queue_index
) &&
843 !test_bit(__I40E_DOWN
, &tx_ring
->vsi
->state
)) {
844 netif_wake_subqueue(tx_ring
->netdev
,
845 tx_ring
->queue_index
);
846 ++tx_ring
->tx_stats
.restart_queue
;
854 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
855 * @vsi: the VSI we care about
856 * @q_vector: the vector on which to force writeback
859 static void i40e_force_wb(struct i40e_vsi
*vsi
, struct i40e_q_vector
*q_vector
)
861 u32 val
= I40E_PFINT_DYN_CTLN_INTENA_MASK
|
862 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK
| /* set noitr */
863 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK
|
864 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
;
865 /* allow 00 to be written to the index */
868 I40E_PFINT_DYN_CTLN(q_vector
->v_idx
+ vsi
->base_vector
- 1),
873 * i40e_set_new_dynamic_itr - Find new ITR level
874 * @rc: structure containing ring performance data
876 * Stores a new ITR value based on packets and byte counts during
877 * the last interrupt. The advantage of per interrupt computation
878 * is faster updates and more accurate ITR for the current traffic
879 * pattern. Constants in this function were computed based on
880 * theoretical maximum wire speed and thresholds were set based on
881 * testing data as well as attempting to minimize response time
882 * while increasing bulk throughput.
884 static void i40e_set_new_dynamic_itr(struct i40e_ring_container
*rc
)
886 enum i40e_latency_range new_latency_range
= rc
->latency_range
;
887 u32 new_itr
= rc
->itr
;
890 if (rc
->total_packets
== 0 || !rc
->itr
)
893 /* simple throttlerate management
894 * 0-10MB/s lowest (100000 ints/s)
895 * 10-20MB/s low (20000 ints/s)
896 * 20-1249MB/s bulk (8000 ints/s)
898 bytes_per_int
= rc
->total_bytes
/ rc
->itr
;
900 case I40E_LOWEST_LATENCY
:
901 if (bytes_per_int
> 10)
902 new_latency_range
= I40E_LOW_LATENCY
;
904 case I40E_LOW_LATENCY
:
905 if (bytes_per_int
> 20)
906 new_latency_range
= I40E_BULK_LATENCY
;
907 else if (bytes_per_int
<= 10)
908 new_latency_range
= I40E_LOWEST_LATENCY
;
910 case I40E_BULK_LATENCY
:
911 if (bytes_per_int
<= 20)
912 rc
->latency_range
= I40E_LOW_LATENCY
;
916 switch (new_latency_range
) {
917 case I40E_LOWEST_LATENCY
:
918 new_itr
= I40E_ITR_100K
;
920 case I40E_LOW_LATENCY
:
921 new_itr
= I40E_ITR_20K
;
923 case I40E_BULK_LATENCY
:
924 new_itr
= I40E_ITR_8K
;
930 if (new_itr
!= rc
->itr
) {
931 /* do an exponential smoothing */
932 new_itr
= (10 * new_itr
* rc
->itr
) /
933 ((9 * new_itr
) + rc
->itr
);
934 rc
->itr
= new_itr
& I40E_MAX_ITR
;
938 rc
->total_packets
= 0;
942 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
943 * @q_vector: the vector to adjust
945 static void i40e_update_dynamic_itr(struct i40e_q_vector
*q_vector
)
947 u16 vector
= q_vector
->vsi
->base_vector
+ q_vector
->v_idx
;
948 struct i40e_hw
*hw
= &q_vector
->vsi
->back
->hw
;
952 reg_addr
= I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1);
953 old_itr
= q_vector
->rx
.itr
;
954 i40e_set_new_dynamic_itr(&q_vector
->rx
);
955 if (old_itr
!= q_vector
->rx
.itr
)
956 wr32(hw
, reg_addr
, q_vector
->rx
.itr
);
958 reg_addr
= I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1);
959 old_itr
= q_vector
->tx
.itr
;
960 i40e_set_new_dynamic_itr(&q_vector
->tx
);
961 if (old_itr
!= q_vector
->tx
.itr
)
962 wr32(hw
, reg_addr
, q_vector
->tx
.itr
);
966 * i40e_clean_programming_status - clean the programming status descriptor
967 * @rx_ring: the rx ring that has this descriptor
968 * @rx_desc: the rx descriptor written back by HW
970 * Flow director should handle FD_FILTER_STATUS to check its filter programming
971 * status being successful or not and take actions accordingly. FCoE should
972 * handle its context/filter programming/invalidation status and take actions.
975 static void i40e_clean_programming_status(struct i40e_ring
*rx_ring
,
976 union i40e_rx_desc
*rx_desc
)
981 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
982 id
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK
) >>
983 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT
;
985 if (id
== I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS
)
986 i40e_fd_handle_status(rx_ring
, rx_desc
, id
);
988 else if ((id
== I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS
) ||
989 (id
== I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS
))
990 i40e_fcoe_handle_status(rx_ring
, rx_desc
, id
);
995 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
996 * @tx_ring: the tx ring to set up
998 * Return 0 on success, negative on error
1000 int i40e_setup_tx_descriptors(struct i40e_ring
*tx_ring
)
1002 struct device
*dev
= tx_ring
->dev
;
1008 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
1009 tx_ring
->tx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
1010 if (!tx_ring
->tx_bi
)
1013 /* round up to nearest 4K */
1014 tx_ring
->size
= tx_ring
->count
* sizeof(struct i40e_tx_desc
);
1015 /* add u32 for head writeback, align after this takes care of
1016 * guaranteeing this is at least one cache line in size
1018 tx_ring
->size
+= sizeof(u32
);
1019 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1020 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
1021 &tx_ring
->dma
, GFP_KERNEL
);
1022 if (!tx_ring
->desc
) {
1023 dev_info(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1028 tx_ring
->next_to_use
= 0;
1029 tx_ring
->next_to_clean
= 0;
1033 kfree(tx_ring
->tx_bi
);
1034 tx_ring
->tx_bi
= NULL
;
1039 * i40e_clean_rx_ring - Free Rx buffers
1040 * @rx_ring: ring to be cleaned
1042 void i40e_clean_rx_ring(struct i40e_ring
*rx_ring
)
1044 struct device
*dev
= rx_ring
->dev
;
1045 struct i40e_rx_buffer
*rx_bi
;
1046 unsigned long bi_size
;
1049 /* ring already cleared, nothing to do */
1050 if (!rx_ring
->rx_bi
)
1053 if (ring_is_ps_enabled(rx_ring
)) {
1054 int bufsz
= ALIGN(rx_ring
->rx_hdr_len
, 256) * rx_ring
->count
;
1056 rx_bi
= &rx_ring
->rx_bi
[0];
1057 if (rx_bi
->hdr_buf
) {
1058 dma_free_coherent(dev
,
1062 for (i
= 0; i
< rx_ring
->count
; i
++) {
1063 rx_bi
= &rx_ring
->rx_bi
[i
];
1065 rx_bi
->hdr_buf
= NULL
;
1069 /* Free all the Rx ring sk_buffs */
1070 for (i
= 0; i
< rx_ring
->count
; i
++) {
1071 rx_bi
= &rx_ring
->rx_bi
[i
];
1073 dma_unmap_single(dev
,
1075 rx_ring
->rx_buf_len
,
1080 dev_kfree_skb(rx_bi
->skb
);
1084 if (rx_bi
->page_dma
) {
1089 rx_bi
->page_dma
= 0;
1091 __free_page(rx_bi
->page
);
1093 rx_bi
->page_offset
= 0;
1097 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
1098 memset(rx_ring
->rx_bi
, 0, bi_size
);
1100 /* Zero out the descriptor ring */
1101 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1103 rx_ring
->next_to_clean
= 0;
1104 rx_ring
->next_to_use
= 0;
1108 * i40e_free_rx_resources - Free Rx resources
1109 * @rx_ring: ring to clean the resources from
1111 * Free all receive software resources
1113 void i40e_free_rx_resources(struct i40e_ring
*rx_ring
)
1115 i40e_clean_rx_ring(rx_ring
);
1116 kfree(rx_ring
->rx_bi
);
1117 rx_ring
->rx_bi
= NULL
;
1119 if (rx_ring
->desc
) {
1120 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
1121 rx_ring
->desc
, rx_ring
->dma
);
1122 rx_ring
->desc
= NULL
;
1127 * i40e_alloc_rx_headers - allocate rx header buffers
1128 * @rx_ring: ring to alloc buffers
1130 * Allocate rx header buffers for the entire ring. As these are static,
1131 * this is only called when setting up a new ring.
1133 void i40e_alloc_rx_headers(struct i40e_ring
*rx_ring
)
1135 struct device
*dev
= rx_ring
->dev
;
1136 struct i40e_rx_buffer
*rx_bi
;
1142 if (rx_ring
->rx_bi
[0].hdr_buf
)
1144 /* Make sure the buffers don't cross cache line boundaries. */
1145 buf_size
= ALIGN(rx_ring
->rx_hdr_len
, 256);
1146 buffer
= dma_alloc_coherent(dev
, buf_size
* rx_ring
->count
,
1150 for (i
= 0; i
< rx_ring
->count
; i
++) {
1151 rx_bi
= &rx_ring
->rx_bi
[i
];
1152 rx_bi
->dma
= dma
+ (i
* buf_size
);
1153 rx_bi
->hdr_buf
= buffer
+ (i
* buf_size
);
1158 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1159 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1161 * Returns 0 on success, negative on failure
1163 int i40e_setup_rx_descriptors(struct i40e_ring
*rx_ring
)
1165 struct device
*dev
= rx_ring
->dev
;
1168 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
1169 rx_ring
->rx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
1170 if (!rx_ring
->rx_bi
)
1173 u64_stats_init(&rx_ring
->syncp
);
1175 /* Round up to nearest 4K */
1176 rx_ring
->size
= ring_is_16byte_desc_enabled(rx_ring
)
1177 ? rx_ring
->count
* sizeof(union i40e_16byte_rx_desc
)
1178 : rx_ring
->count
* sizeof(union i40e_32byte_rx_desc
);
1179 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1180 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
1181 &rx_ring
->dma
, GFP_KERNEL
);
1183 if (!rx_ring
->desc
) {
1184 dev_info(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1189 rx_ring
->next_to_clean
= 0;
1190 rx_ring
->next_to_use
= 0;
1194 kfree(rx_ring
->rx_bi
);
1195 rx_ring
->rx_bi
= NULL
;
1200 * i40e_release_rx_desc - Store the new tail and head values
1201 * @rx_ring: ring to bump
1202 * @val: new head index
1204 static inline void i40e_release_rx_desc(struct i40e_ring
*rx_ring
, u32 val
)
1206 rx_ring
->next_to_use
= val
;
1207 /* Force memory writes to complete before letting h/w
1208 * know there are new descriptors to fetch. (Only
1209 * applicable for weak-ordered memory model archs,
1213 writel(val
, rx_ring
->tail
);
1217 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1218 * @rx_ring: ring to place buffers on
1219 * @cleaned_count: number of buffers to replace
1221 void i40e_alloc_rx_buffers_ps(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
1223 u16 i
= rx_ring
->next_to_use
;
1224 union i40e_rx_desc
*rx_desc
;
1225 struct i40e_rx_buffer
*bi
;
1227 /* do nothing if no valid netdev defined */
1228 if (!rx_ring
->netdev
|| !cleaned_count
)
1231 while (cleaned_count
--) {
1232 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1233 bi
= &rx_ring
->rx_bi
[i
];
1235 if (bi
->skb
) /* desc is in use */
1238 bi
->page
= alloc_page(GFP_ATOMIC
);
1240 rx_ring
->rx_stats
.alloc_page_failed
++;
1245 if (!bi
->page_dma
) {
1246 /* use a half page if we're re-using */
1247 bi
->page_offset
^= PAGE_SIZE
/ 2;
1248 bi
->page_dma
= dma_map_page(rx_ring
->dev
,
1253 if (dma_mapping_error(rx_ring
->dev
,
1255 rx_ring
->rx_stats
.alloc_page_failed
++;
1261 dma_sync_single_range_for_device(rx_ring
->dev
,
1264 rx_ring
->rx_hdr_len
,
1266 /* Refresh the desc even if buffer_addrs didn't change
1267 * because each write-back erases this info.
1269 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
1270 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
1272 if (i
== rx_ring
->count
)
1277 if (rx_ring
->next_to_use
!= i
)
1278 i40e_release_rx_desc(rx_ring
, i
);
1282 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1283 * @rx_ring: ring to place buffers on
1284 * @cleaned_count: number of buffers to replace
1286 void i40e_alloc_rx_buffers_1buf(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
1288 u16 i
= rx_ring
->next_to_use
;
1289 union i40e_rx_desc
*rx_desc
;
1290 struct i40e_rx_buffer
*bi
;
1291 struct sk_buff
*skb
;
1293 /* do nothing if no valid netdev defined */
1294 if (!rx_ring
->netdev
|| !cleaned_count
)
1297 while (cleaned_count
--) {
1298 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1299 bi
= &rx_ring
->rx_bi
[i
];
1303 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1304 rx_ring
->rx_buf_len
);
1306 rx_ring
->rx_stats
.alloc_buff_failed
++;
1309 /* initialize queue mapping */
1310 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1315 bi
->dma
= dma_map_single(rx_ring
->dev
,
1317 rx_ring
->rx_buf_len
,
1319 if (dma_mapping_error(rx_ring
->dev
, bi
->dma
)) {
1320 rx_ring
->rx_stats
.alloc_buff_failed
++;
1326 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
1327 rx_desc
->read
.hdr_addr
= 0;
1329 if (i
== rx_ring
->count
)
1334 if (rx_ring
->next_to_use
!= i
)
1335 i40e_release_rx_desc(rx_ring
, i
);
1339 * i40e_receive_skb - Send a completed packet up the stack
1340 * @rx_ring: rx ring in play
1341 * @skb: packet to send up
1342 * @vlan_tag: vlan tag for packet
1344 static void i40e_receive_skb(struct i40e_ring
*rx_ring
,
1345 struct sk_buff
*skb
, u16 vlan_tag
)
1347 struct i40e_q_vector
*q_vector
= rx_ring
->q_vector
;
1348 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1349 u64 flags
= vsi
->back
->flags
;
1351 if (vlan_tag
& VLAN_VID_MASK
)
1352 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
1354 if (flags
& I40E_FLAG_IN_NETPOLL
)
1357 napi_gro_receive(&q_vector
->napi
, skb
);
1361 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1362 * @vsi: the VSI we care about
1363 * @skb: skb currently being received and modified
1364 * @rx_status: status value of last descriptor in packet
1365 * @rx_error: error value of last descriptor in packet
1366 * @rx_ptype: ptype value of last descriptor in packet
1368 static inline void i40e_rx_checksum(struct i40e_vsi
*vsi
,
1369 struct sk_buff
*skb
,
1374 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(rx_ptype
);
1375 bool ipv4
= false, ipv6
= false;
1376 bool ipv4_tunnel
, ipv6_tunnel
;
1381 ipv4_tunnel
= (rx_ptype
>= I40E_RX_PTYPE_GRENAT4_MAC_PAY3
) &&
1382 (rx_ptype
<= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4
);
1383 ipv6_tunnel
= (rx_ptype
>= I40E_RX_PTYPE_GRENAT6_MAC_PAY3
) &&
1384 (rx_ptype
<= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4
);
1386 skb
->ip_summed
= CHECKSUM_NONE
;
1388 /* Rx csum enabled and ip headers found? */
1389 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
))
1392 /* did the hardware decode the packet and checksum? */
1393 if (!(rx_status
& (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT
)))
1396 /* both known and outer_ip must be set for the below code to work */
1397 if (!(decoded
.known
&& decoded
.outer_ip
))
1400 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1401 decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV4
)
1403 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1404 decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV6
)
1408 (rx_error
& ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT
) |
1409 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT
))))
1412 /* likely incorrect csum if alternate IP extension headers found */
1414 rx_status
& (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT
))
1415 /* don't increment checksum err here, non-fatal err */
1418 /* there was some L4 error, count error and punt packet to the stack */
1419 if (rx_error
& (1 << I40E_RX_DESC_ERROR_L4E_SHIFT
))
1422 /* handle packets that were not able to be checksummed due
1423 * to arrival speed, in this case the stack can compute
1426 if (rx_error
& (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT
))
1429 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1430 * it in the driver, hardware does not do it for us.
1431 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1432 * so the total length of IPv4 header is IHL*4 bytes
1433 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1436 skb
->transport_header
= skb
->mac_header
+
1437 sizeof(struct ethhdr
) +
1438 (ip_hdr(skb
)->ihl
* 4);
1440 /* Add 4 bytes for VLAN tagged packets */
1441 skb
->transport_header
+= (skb
->protocol
== htons(ETH_P_8021Q
) ||
1442 skb
->protocol
== htons(ETH_P_8021AD
))
1445 if ((ip_hdr(skb
)->protocol
== IPPROTO_UDP
) &&
1446 (udp_hdr(skb
)->check
!= 0)) {
1447 rx_udp_csum
= udp_csum(skb
);
1449 csum
= csum_tcpudp_magic(
1450 iph
->saddr
, iph
->daddr
,
1451 (skb
->len
- skb_transport_offset(skb
)),
1452 IPPROTO_UDP
, rx_udp_csum
);
1454 if (udp_hdr(skb
)->check
!= csum
)
1457 } /* else its GRE and so no outer UDP header */
1460 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1461 skb
->csum_level
= ipv4_tunnel
|| ipv6_tunnel
;
1466 vsi
->back
->hw_csum_rx_error
++;
1470 * i40e_rx_hash - returns the hash value from the Rx descriptor
1471 * @ring: descriptor ring
1472 * @rx_desc: specific descriptor
1474 static inline u32
i40e_rx_hash(struct i40e_ring
*ring
,
1475 union i40e_rx_desc
*rx_desc
)
1477 const __le64 rss_mask
=
1478 cpu_to_le64((u64
)I40E_RX_DESC_FLTSTAT_RSS_HASH
<<
1479 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT
);
1481 if ((ring
->netdev
->features
& NETIF_F_RXHASH
) &&
1482 (rx_desc
->wb
.qword1
.status_error_len
& rss_mask
) == rss_mask
)
1483 return le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.rss
);
1489 * i40e_ptype_to_hash - get a hash type
1490 * @ptype: the ptype value from the descriptor
1492 * Returns a hash type to be used by skb_set_hash
1494 static inline enum pkt_hash_types
i40e_ptype_to_hash(u8 ptype
)
1496 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(ptype
);
1499 return PKT_HASH_TYPE_NONE
;
1501 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1502 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4
)
1503 return PKT_HASH_TYPE_L4
;
1504 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1505 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3
)
1506 return PKT_HASH_TYPE_L3
;
1508 return PKT_HASH_TYPE_L2
;
1512 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1513 * @rx_ring: rx ring to clean
1514 * @budget: how many cleans we're allowed
1516 * Returns true if there's any budget left (e.g. the clean is finished)
1518 static int i40e_clean_rx_irq_ps(struct i40e_ring
*rx_ring
, int budget
)
1520 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1521 u16 rx_packet_len
, rx_header_len
, rx_sph
, rx_hbo
;
1522 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
1523 const int current_node
= numa_node_id();
1524 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1525 u16 i
= rx_ring
->next_to_clean
;
1526 union i40e_rx_desc
*rx_desc
;
1527 u32 rx_error
, rx_status
;
1535 struct i40e_rx_buffer
*rx_bi
;
1536 struct sk_buff
*skb
;
1538 /* return some buffers to hardware, one at a time is too slow */
1539 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1540 i40e_alloc_rx_buffers_ps(rx_ring
, cleaned_count
);
1544 i
= rx_ring
->next_to_clean
;
1545 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1546 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1547 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1548 I40E_RXD_QW1_STATUS_SHIFT
;
1550 if (!(rx_status
& (1 << I40E_RX_DESC_STATUS_DD_SHIFT
)))
1553 /* This memory barrier is needed to keep us from reading
1554 * any other fields out of the rx_desc until we know the
1558 if (i40e_rx_is_programming_status(qword
)) {
1559 i40e_clean_programming_status(rx_ring
, rx_desc
);
1560 I40E_RX_INCREMENT(rx_ring
, i
);
1563 rx_bi
= &rx_ring
->rx_bi
[i
];
1566 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1567 rx_ring
->rx_hdr_len
);
1569 rx_ring
->rx_stats
.alloc_buff_failed
++;
1573 /* initialize queue mapping */
1574 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1575 /* we are reusing so sync this buffer for CPU use */
1576 dma_sync_single_range_for_cpu(rx_ring
->dev
,
1579 rx_ring
->rx_hdr_len
,
1582 rx_packet_len
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
1583 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
1584 rx_header_len
= (qword
& I40E_RXD_QW1_LENGTH_HBUF_MASK
) >>
1585 I40E_RXD_QW1_LENGTH_HBUF_SHIFT
;
1586 rx_sph
= (qword
& I40E_RXD_QW1_LENGTH_SPH_MASK
) >>
1587 I40E_RXD_QW1_LENGTH_SPH_SHIFT
;
1589 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
) >>
1590 I40E_RXD_QW1_ERROR_SHIFT
;
1591 rx_hbo
= rx_error
& (1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1592 rx_error
&= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1594 rx_ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >>
1595 I40E_RXD_QW1_PTYPE_SHIFT
;
1596 prefetch(rx_bi
->page
);
1599 if (rx_hbo
|| rx_sph
) {
1602 len
= I40E_RX_HDR_SIZE
;
1604 len
= rx_header_len
;
1605 memcpy(__skb_put(skb
, len
), rx_bi
->hdr_buf
, len
);
1606 } else if (skb
->len
== 0) {
1609 len
= (rx_packet_len
> skb_headlen(skb
) ?
1610 skb_headlen(skb
) : rx_packet_len
);
1611 memcpy(__skb_put(skb
, len
),
1612 rx_bi
->page
+ rx_bi
->page_offset
,
1614 rx_bi
->page_offset
+= len
;
1615 rx_packet_len
-= len
;
1618 /* Get the rest of the data if this was a header split */
1619 if (rx_packet_len
) {
1620 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
1625 skb
->len
+= rx_packet_len
;
1626 skb
->data_len
+= rx_packet_len
;
1627 skb
->truesize
+= rx_packet_len
;
1629 if ((page_count(rx_bi
->page
) == 1) &&
1630 (page_to_nid(rx_bi
->page
) == current_node
))
1631 get_page(rx_bi
->page
);
1635 dma_unmap_page(rx_ring
->dev
,
1639 rx_bi
->page_dma
= 0;
1641 I40E_RX_INCREMENT(rx_ring
, i
);
1644 !(rx_status
& (1 << I40E_RX_DESC_STATUS_EOF_SHIFT
)))) {
1645 struct i40e_rx_buffer
*next_buffer
;
1647 next_buffer
= &rx_ring
->rx_bi
[i
];
1648 next_buffer
->skb
= skb
;
1649 rx_ring
->rx_stats
.non_eop_descs
++;
1653 /* ERR_MASK will only have valid bits if EOP set */
1654 if (unlikely(rx_error
& (1 << I40E_RX_DESC_ERROR_RXE_SHIFT
))) {
1655 dev_kfree_skb_any(skb
);
1656 /* TODO: shouldn't we increment a counter indicating the
1662 skb_set_hash(skb
, i40e_rx_hash(rx_ring
, rx_desc
),
1663 i40e_ptype_to_hash(rx_ptype
));
1664 if (unlikely(rx_status
& I40E_RXD_QW1_STATUS_TSYNVALID_MASK
)) {
1665 i40e_ptp_rx_hwtstamp(vsi
->back
, skb
, (rx_status
&
1666 I40E_RXD_QW1_STATUS_TSYNINDX_MASK
) >>
1667 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT
);
1668 rx_ring
->last_rx_timestamp
= jiffies
;
1671 /* probably a little skewed due to removing CRC */
1672 total_rx_bytes
+= skb
->len
;
1675 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1677 i40e_rx_checksum(vsi
, skb
, rx_status
, rx_error
, rx_ptype
);
1679 vlan_tag
= rx_status
& (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)
1680 ? le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
)
1683 if (!i40e_fcoe_handle_offload(rx_ring
, rx_desc
, skb
)) {
1684 dev_kfree_skb_any(skb
);
1688 skb_mark_napi_id(skb
, &rx_ring
->q_vector
->napi
);
1689 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1691 rx_desc
->wb
.qword1
.status_error_len
= 0;
1693 } while (likely(total_rx_packets
< budget
));
1695 u64_stats_update_begin(&rx_ring
->syncp
);
1696 rx_ring
->stats
.packets
+= total_rx_packets
;
1697 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1698 u64_stats_update_end(&rx_ring
->syncp
);
1699 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1700 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1702 return total_rx_packets
;
1706 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1707 * @rx_ring: rx ring to clean
1708 * @budget: how many cleans we're allowed
1710 * Returns number of packets cleaned
1712 static int i40e_clean_rx_irq_1buf(struct i40e_ring
*rx_ring
, int budget
)
1714 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1715 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
1716 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1717 union i40e_rx_desc
*rx_desc
;
1718 u32 rx_error
, rx_status
;
1725 struct i40e_rx_buffer
*rx_bi
;
1726 struct sk_buff
*skb
;
1728 /* return some buffers to hardware, one at a time is too slow */
1729 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1730 i40e_alloc_rx_buffers_1buf(rx_ring
, cleaned_count
);
1734 i
= rx_ring
->next_to_clean
;
1735 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1736 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1737 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1738 I40E_RXD_QW1_STATUS_SHIFT
;
1740 if (!(rx_status
& (1 << I40E_RX_DESC_STATUS_DD_SHIFT
)))
1743 /* This memory barrier is needed to keep us from reading
1744 * any other fields out of the rx_desc until we know the
1749 if (i40e_rx_is_programming_status(qword
)) {
1750 i40e_clean_programming_status(rx_ring
, rx_desc
);
1751 I40E_RX_INCREMENT(rx_ring
, i
);
1754 rx_bi
= &rx_ring
->rx_bi
[i
];
1756 prefetch(skb
->data
);
1758 rx_packet_len
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
1759 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
1761 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
) >>
1762 I40E_RXD_QW1_ERROR_SHIFT
;
1763 rx_error
&= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1765 rx_ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >>
1766 I40E_RXD_QW1_PTYPE_SHIFT
;
1770 /* Get the header and possibly the whole packet
1771 * If this is an skb from previous receive dma will be 0
1773 skb_put(skb
, rx_packet_len
);
1774 dma_unmap_single(rx_ring
->dev
, rx_bi
->dma
, rx_ring
->rx_buf_len
,
1778 I40E_RX_INCREMENT(rx_ring
, i
);
1781 !(rx_status
& (1 << I40E_RX_DESC_STATUS_EOF_SHIFT
)))) {
1782 rx_ring
->rx_stats
.non_eop_descs
++;
1786 /* ERR_MASK will only have valid bits if EOP set */
1787 if (unlikely(rx_error
& (1 << I40E_RX_DESC_ERROR_RXE_SHIFT
))) {
1788 dev_kfree_skb_any(skb
);
1789 /* TODO: shouldn't we increment a counter indicating the
1795 skb_set_hash(skb
, i40e_rx_hash(rx_ring
, rx_desc
),
1796 i40e_ptype_to_hash(rx_ptype
));
1797 if (unlikely(rx_status
& I40E_RXD_QW1_STATUS_TSYNVALID_MASK
)) {
1798 i40e_ptp_rx_hwtstamp(vsi
->back
, skb
, (rx_status
&
1799 I40E_RXD_QW1_STATUS_TSYNINDX_MASK
) >>
1800 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT
);
1801 rx_ring
->last_rx_timestamp
= jiffies
;
1804 /* probably a little skewed due to removing CRC */
1805 total_rx_bytes
+= skb
->len
;
1808 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1810 i40e_rx_checksum(vsi
, skb
, rx_status
, rx_error
, rx_ptype
);
1812 vlan_tag
= rx_status
& (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)
1813 ? le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
)
1816 if (!i40e_fcoe_handle_offload(rx_ring
, rx_desc
, skb
)) {
1817 dev_kfree_skb_any(skb
);
1821 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1823 rx_desc
->wb
.qword1
.status_error_len
= 0;
1824 } while (likely(total_rx_packets
< budget
));
1826 u64_stats_update_begin(&rx_ring
->syncp
);
1827 rx_ring
->stats
.packets
+= total_rx_packets
;
1828 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1829 u64_stats_update_end(&rx_ring
->syncp
);
1830 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1831 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1833 return total_rx_packets
;
1837 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1838 * @napi: napi struct with our devices info in it
1839 * @budget: amount of work driver is allowed to do this pass, in packets
1841 * This function will clean all queues associated with a q_vector.
1843 * Returns the amount of work done
1845 int i40e_napi_poll(struct napi_struct
*napi
, int budget
)
1847 struct i40e_q_vector
*q_vector
=
1848 container_of(napi
, struct i40e_q_vector
, napi
);
1849 struct i40e_vsi
*vsi
= q_vector
->vsi
;
1850 struct i40e_ring
*ring
;
1851 bool clean_complete
= true;
1852 bool arm_wb
= false;
1853 int budget_per_ring
;
1856 if (test_bit(__I40E_DOWN
, &vsi
->state
)) {
1857 napi_complete(napi
);
1861 /* Since the actual Tx work is minimal, we can give the Tx a larger
1862 * budget and be more aggressive about cleaning up the Tx descriptors.
1864 i40e_for_each_ring(ring
, q_vector
->tx
) {
1865 clean_complete
&= i40e_clean_tx_irq(ring
, vsi
->work_limit
);
1866 arm_wb
|= ring
->arm_wb
;
1869 /* We attempt to distribute budget to each Rx queue fairly, but don't
1870 * allow the budget to go below 1 because that would exit polling early.
1872 budget_per_ring
= max(budget
/q_vector
->num_ringpairs
, 1);
1874 i40e_for_each_ring(ring
, q_vector
->rx
) {
1875 if (ring_is_ps_enabled(ring
))
1876 cleaned
= i40e_clean_rx_irq_ps(ring
, budget_per_ring
);
1878 cleaned
= i40e_clean_rx_irq_1buf(ring
, budget_per_ring
);
1879 /* if we didn't clean as many as budgeted, we must be done */
1880 clean_complete
&= (budget_per_ring
!= cleaned
);
1883 /* If work not completed, return budget and polling will return */
1884 if (!clean_complete
) {
1886 i40e_force_wb(vsi
, q_vector
);
1890 /* Work is done so exit the polling mode and re-enable the interrupt */
1891 napi_complete(napi
);
1892 if (ITR_IS_DYNAMIC(vsi
->rx_itr_setting
) ||
1893 ITR_IS_DYNAMIC(vsi
->tx_itr_setting
))
1894 i40e_update_dynamic_itr(q_vector
);
1896 if (!test_bit(__I40E_DOWN
, &vsi
->state
)) {
1897 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
1898 i40e_irq_dynamic_enable(vsi
,
1899 q_vector
->v_idx
+ vsi
->base_vector
);
1901 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1902 /* We re-enable the queue 0 cause, but
1903 * don't worry about dynamic_enable
1904 * because we left it on for the other
1905 * possible interrupts during napi
1907 u32 qval
= rd32(hw
, I40E_QINT_RQCTL(0));
1908 qval
|= I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
1909 wr32(hw
, I40E_QINT_RQCTL(0), qval
);
1911 qval
= rd32(hw
, I40E_QINT_TQCTL(0));
1912 qval
|= I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
1913 wr32(hw
, I40E_QINT_TQCTL(0), qval
);
1915 i40e_irq_dynamic_enable_icr0(vsi
->back
);
1923 * i40e_atr - Add a Flow Director ATR filter
1924 * @tx_ring: ring to add programming descriptor to
1926 * @tx_flags: send tx flags
1927 * @protocol: wire protocol
1929 static void i40e_atr(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1930 u32 tx_flags
, __be16 protocol
)
1932 struct i40e_filter_program_desc
*fdir_desc
;
1933 struct i40e_pf
*pf
= tx_ring
->vsi
->back
;
1935 unsigned char *network
;
1937 struct ipv6hdr
*ipv6
;
1941 u32 flex_ptype
, dtype_cmd
;
1944 /* make sure ATR is enabled */
1945 if (!(pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
))
1948 if ((pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
1951 /* if sampling is disabled do nothing */
1952 if (!tx_ring
->atr_sample_rate
)
1955 if (!(tx_flags
& (I40E_TX_FLAGS_IPV4
| I40E_TX_FLAGS_IPV6
)))
1958 if (!(tx_flags
& I40E_TX_FLAGS_VXLAN_TUNNEL
)) {
1959 /* snag network header to get L4 type and address */
1960 hdr
.network
= skb_network_header(skb
);
1962 /* Currently only IPv4/IPv6 with TCP is supported
1963 * access ihl as u8 to avoid unaligned access on ia64
1965 if (tx_flags
& I40E_TX_FLAGS_IPV4
)
1966 hlen
= (hdr
.network
[0] & 0x0F) << 2;
1967 else if (protocol
== htons(ETH_P_IPV6
))
1968 hlen
= sizeof(struct ipv6hdr
);
1972 hdr
.network
= skb_inner_network_header(skb
);
1973 hlen
= skb_inner_network_header_len(skb
);
1976 /* Currently only IPv4/IPv6 with TCP is supported
1977 * Note: tx_flags gets modified to reflect inner protocols in
1978 * tx_enable_csum function if encap is enabled.
1980 if ((tx_flags
& I40E_TX_FLAGS_IPV4
) &&
1981 (hdr
.ipv4
->protocol
!= IPPROTO_TCP
))
1983 else if ((tx_flags
& I40E_TX_FLAGS_IPV6
) &&
1984 (hdr
.ipv6
->nexthdr
!= IPPROTO_TCP
))
1987 th
= (struct tcphdr
*)(hdr
.network
+ hlen
);
1989 /* Due to lack of space, no more new filters can be programmed */
1990 if (th
->syn
&& (pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
1993 tx_ring
->atr_count
++;
1995 /* sample on all syn/fin/rst packets or once every atr sample rate */
1999 (tx_ring
->atr_count
< tx_ring
->atr_sample_rate
))
2002 tx_ring
->atr_count
= 0;
2004 /* grab the next descriptor */
2005 i
= tx_ring
->next_to_use
;
2006 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
2009 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2011 flex_ptype
= (tx_ring
->queue_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
2012 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
2013 flex_ptype
|= (protocol
== htons(ETH_P_IP
)) ?
2014 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP
<<
2015 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) :
2016 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP
<<
2017 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
);
2019 flex_ptype
|= tx_ring
->vsi
->id
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
2021 dtype_cmd
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
2023 dtype_cmd
|= (th
->fin
|| th
->rst
) ?
2024 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
2025 I40E_TXD_FLTR_QW1_PCMD_SHIFT
) :
2026 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
2027 I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
2029 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX
<<
2030 I40E_TXD_FLTR_QW1_DEST_SHIFT
;
2032 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID
<<
2033 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
;
2035 dtype_cmd
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
2037 ((u32
)pf
->fd_atr_cnt_idx
<< I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
2038 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
2040 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(flex_ptype
);
2041 fdir_desc
->rsvd
= cpu_to_le32(0);
2042 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dtype_cmd
);
2043 fdir_desc
->fd_id
= cpu_to_le32(0);
2047 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2049 * @tx_ring: ring to send buffer on
2050 * @flags: the tx flags to be set
2052 * Checks the skb and set up correspondingly several generic transmit flags
2053 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2055 * Returns error code indicate the frame should be dropped upon error and the
2056 * otherwise returns 0 to indicate the flags has been set properly.
2059 int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
2060 struct i40e_ring
*tx_ring
,
2063 static int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
2064 struct i40e_ring
*tx_ring
,
2068 __be16 protocol
= skb
->protocol
;
2071 if (protocol
== htons(ETH_P_8021Q
) &&
2072 !(tx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)) {
2073 /* When HW VLAN acceleration is turned off by the user the
2074 * stack sets the protocol to 8021q so that the driver
2075 * can take any steps required to support the SW only
2076 * VLAN handling. In our case the driver doesn't need
2077 * to take any further steps so just set the protocol
2078 * to the encapsulated ethertype.
2080 skb
->protocol
= vlan_get_protocol(skb
);
2084 /* if we have a HW VLAN tag being added, default to the HW one */
2085 if (skb_vlan_tag_present(skb
)) {
2086 tx_flags
|= skb_vlan_tag_get(skb
) << I40E_TX_FLAGS_VLAN_SHIFT
;
2087 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
2088 /* else if it is a SW VLAN, check the next protocol and store the tag */
2089 } else if (protocol
== htons(ETH_P_8021Q
)) {
2090 struct vlan_hdr
*vhdr
, _vhdr
;
2091 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
2095 protocol
= vhdr
->h_vlan_encapsulated_proto
;
2096 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << I40E_TX_FLAGS_VLAN_SHIFT
;
2097 tx_flags
|= I40E_TX_FLAGS_SW_VLAN
;
2100 if (!(tx_ring
->vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
))
2103 /* Insert 802.1p priority into VLAN header */
2104 if ((tx_flags
& (I40E_TX_FLAGS_HW_VLAN
| I40E_TX_FLAGS_SW_VLAN
)) ||
2105 (skb
->priority
!= TC_PRIO_CONTROL
)) {
2106 tx_flags
&= ~I40E_TX_FLAGS_VLAN_PRIO_MASK
;
2107 tx_flags
|= (skb
->priority
& 0x7) <<
2108 I40E_TX_FLAGS_VLAN_PRIO_SHIFT
;
2109 if (tx_flags
& I40E_TX_FLAGS_SW_VLAN
) {
2110 struct vlan_ethhdr
*vhdr
;
2113 rc
= skb_cow_head(skb
, 0);
2116 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
2117 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
2118 I40E_TX_FLAGS_VLAN_SHIFT
);
2120 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
2130 * i40e_tso - set up the tso context descriptor
2131 * @tx_ring: ptr to the ring to send
2132 * @skb: ptr to the skb we're sending
2133 * @hdr_len: ptr to the size of the packet header
2134 * @cd_tunneling: ptr to context descriptor bits
2136 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2138 static int i40e_tso(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2139 u8
*hdr_len
, u64
*cd_type_cmd_tso_mss
,
2142 u32 cd_cmd
, cd_tso_len
, cd_mss
;
2143 struct ipv6hdr
*ipv6h
;
2144 struct tcphdr
*tcph
;
2149 if (!skb_is_gso(skb
))
2152 err
= skb_cow_head(skb
, 0);
2156 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
2157 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
) : ipv6_hdr(skb
);
2159 if (iph
->version
== 4) {
2160 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
2163 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
2165 } else if (ipv6h
->version
== 6) {
2166 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
2167 ipv6h
->payload_len
= 0;
2168 tcph
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
2172 l4len
= skb
->encapsulation
? inner_tcp_hdrlen(skb
) : tcp_hdrlen(skb
);
2173 *hdr_len
= (skb
->encapsulation
2174 ? (skb_inner_transport_header(skb
) - skb
->data
)
2175 : skb_transport_offset(skb
)) + l4len
;
2177 /* find the field values */
2178 cd_cmd
= I40E_TX_CTX_DESC_TSO
;
2179 cd_tso_len
= skb
->len
- *hdr_len
;
2180 cd_mss
= skb_shinfo(skb
)->gso_size
;
2181 *cd_type_cmd_tso_mss
|= ((u64
)cd_cmd
<< I40E_TXD_CTX_QW1_CMD_SHIFT
) |
2183 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT
) |
2184 ((u64
)cd_mss
<< I40E_TXD_CTX_QW1_MSS_SHIFT
);
2189 * i40e_tsyn - set up the tsyn context descriptor
2190 * @tx_ring: ptr to the ring to send
2191 * @skb: ptr to the skb we're sending
2192 * @tx_flags: the collected send information
2194 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2196 static int i40e_tsyn(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2197 u32 tx_flags
, u64
*cd_type_cmd_tso_mss
)
2201 if (likely(!(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)))
2204 /* Tx timestamps cannot be sampled when doing TSO */
2205 if (tx_flags
& I40E_TX_FLAGS_TSO
)
2208 /* only timestamp the outbound packet if the user has requested it and
2209 * we are not already transmitting a packet to be timestamped
2211 pf
= i40e_netdev_to_pf(tx_ring
->netdev
);
2212 if (!(pf
->flags
& I40E_FLAG_PTP
))
2216 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS
, &pf
->state
)) {
2217 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2218 pf
->ptp_tx_skb
= skb_get(skb
);
2223 *cd_type_cmd_tso_mss
|= (u64
)I40E_TX_CTX_DESC_TSYN
<<
2224 I40E_TXD_CTX_QW1_CMD_SHIFT
;
2230 * i40e_tx_enable_csum - Enable Tx checksum offloads
2232 * @tx_flags: pointer to Tx flags currently set
2233 * @td_cmd: Tx descriptor command bits to set
2234 * @td_offset: Tx descriptor header offsets to set
2235 * @cd_tunneling: ptr to context desc bits
2237 static void i40e_tx_enable_csum(struct sk_buff
*skb
, u32
*tx_flags
,
2238 u32
*td_cmd
, u32
*td_offset
,
2239 struct i40e_ring
*tx_ring
,
2242 struct ipv6hdr
*this_ipv6_hdr
;
2243 unsigned int this_tcp_hdrlen
;
2244 struct iphdr
*this_ip_hdr
;
2245 u32 network_hdr_len
;
2249 if (skb
->encapsulation
) {
2250 switch (ip_hdr(skb
)->protocol
) {
2252 l4_tunnel
= I40E_TXD_CTX_UDP_TUNNELING
;
2253 *tx_flags
|= I40E_TX_FLAGS_VXLAN_TUNNEL
;
2258 network_hdr_len
= skb_inner_network_header_len(skb
);
2259 this_ip_hdr
= inner_ip_hdr(skb
);
2260 this_ipv6_hdr
= inner_ipv6_hdr(skb
);
2261 this_tcp_hdrlen
= inner_tcp_hdrlen(skb
);
2263 if (*tx_flags
& I40E_TX_FLAGS_IPV4
) {
2264 if (*tx_flags
& I40E_TX_FLAGS_TSO
) {
2265 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV4
;
2266 ip_hdr(skb
)->check
= 0;
2269 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
2271 } else if (*tx_flags
& I40E_TX_FLAGS_IPV6
) {
2272 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV6
;
2273 if (*tx_flags
& I40E_TX_FLAGS_TSO
)
2274 ip_hdr(skb
)->check
= 0;
2277 /* Now set the ctx descriptor fields */
2278 *cd_tunneling
|= (skb_network_header_len(skb
) >> 2) <<
2279 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT
|
2281 ((skb_inner_network_offset(skb
) -
2282 skb_transport_offset(skb
)) >> 1) <<
2283 I40E_TXD_CTX_QW0_NATLEN_SHIFT
;
2284 if (this_ip_hdr
->version
== 6) {
2285 *tx_flags
&= ~I40E_TX_FLAGS_IPV4
;
2286 *tx_flags
|= I40E_TX_FLAGS_IPV6
;
2289 network_hdr_len
= skb_network_header_len(skb
);
2290 this_ip_hdr
= ip_hdr(skb
);
2291 this_ipv6_hdr
= ipv6_hdr(skb
);
2292 this_tcp_hdrlen
= tcp_hdrlen(skb
);
2295 /* Enable IP checksum offloads */
2296 if (*tx_flags
& I40E_TX_FLAGS_IPV4
) {
2297 l4_hdr
= this_ip_hdr
->protocol
;
2298 /* the stack computes the IP header already, the only time we
2299 * need the hardware to recompute it is in the case of TSO.
2301 if (*tx_flags
& I40E_TX_FLAGS_TSO
) {
2302 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM
;
2303 this_ip_hdr
->check
= 0;
2305 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4
;
2307 /* Now set the td_offset for IP header length */
2308 *td_offset
= (network_hdr_len
>> 2) <<
2309 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
2310 } else if (*tx_flags
& I40E_TX_FLAGS_IPV6
) {
2311 l4_hdr
= this_ipv6_hdr
->nexthdr
;
2312 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV6
;
2313 /* Now set the td_offset for IP header length */
2314 *td_offset
= (network_hdr_len
>> 2) <<
2315 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
2317 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2318 *td_offset
|= (skb_network_offset(skb
) >> 1) <<
2319 I40E_TX_DESC_LENGTH_MACLEN_SHIFT
;
2321 /* Enable L4 checksum offloads */
2324 /* enable checksum offloads */
2325 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_TCP
;
2326 *td_offset
|= (this_tcp_hdrlen
>> 2) <<
2327 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
2330 /* enable SCTP checksum offload */
2331 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_SCTP
;
2332 *td_offset
|= (sizeof(struct sctphdr
) >> 2) <<
2333 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
2336 /* enable UDP checksum offload */
2337 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_UDP
;
2338 *td_offset
|= (sizeof(struct udphdr
) >> 2) <<
2339 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
2347 * i40e_create_tx_ctx Build the Tx context descriptor
2348 * @tx_ring: ring to create the descriptor on
2349 * @cd_type_cmd_tso_mss: Quad Word 1
2350 * @cd_tunneling: Quad Word 0 - bits 0-31
2351 * @cd_l2tag2: Quad Word 0 - bits 32-63
2353 static void i40e_create_tx_ctx(struct i40e_ring
*tx_ring
,
2354 const u64 cd_type_cmd_tso_mss
,
2355 const u32 cd_tunneling
, const u32 cd_l2tag2
)
2357 struct i40e_tx_context_desc
*context_desc
;
2358 int i
= tx_ring
->next_to_use
;
2360 if ((cd_type_cmd_tso_mss
== I40E_TX_DESC_DTYPE_CONTEXT
) &&
2361 !cd_tunneling
&& !cd_l2tag2
)
2364 /* grab the next descriptor */
2365 context_desc
= I40E_TX_CTXTDESC(tx_ring
, i
);
2368 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2370 /* cpu_to_le32 and assign to struct fields */
2371 context_desc
->tunneling_params
= cpu_to_le32(cd_tunneling
);
2372 context_desc
->l2tag2
= cpu_to_le16(cd_l2tag2
);
2373 context_desc
->rsvd
= cpu_to_le16(0);
2374 context_desc
->type_cmd_tso_mss
= cpu_to_le64(cd_type_cmd_tso_mss
);
2378 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2379 * @tx_ring: the ring to be checked
2380 * @size: the size buffer we want to assure is available
2382 * Returns -EBUSY if a stop is needed, else 0
2384 static inline int __i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2386 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2387 /* Memory barrier before checking head and tail */
2390 /* Check again in a case another CPU has just made room available. */
2391 if (likely(I40E_DESC_UNUSED(tx_ring
) < size
))
2394 /* A reprieve! - use start_queue because it doesn't call schedule */
2395 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2396 ++tx_ring
->tx_stats
.restart_queue
;
2401 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2402 * @tx_ring: the ring to be checked
2403 * @size: the size buffer we want to assure is available
2405 * Returns 0 if stop is not needed
2408 int i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2410 static int i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2413 if (likely(I40E_DESC_UNUSED(tx_ring
) >= size
))
2415 return __i40e_maybe_stop_tx(tx_ring
, size
);
2419 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2421 * @tx_flags: collected send information
2422 * @hdr_len: size of the packet header
2424 * Note: Our HW can't scatter-gather more than 8 fragments to build
2425 * a packet on the wire and so we need to figure out the cases where we
2426 * need to linearize the skb.
2428 static bool i40e_chk_linearize(struct sk_buff
*skb
, u32 tx_flags
,
2431 struct skb_frag_struct
*frag
;
2432 bool linearize
= false;
2433 unsigned int size
= 0;
2437 num_frags
= skb_shinfo(skb
)->nr_frags
;
2438 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2440 if (tx_flags
& (I40E_TX_FLAGS_TSO
| I40E_TX_FLAGS_FSO
)) {
2443 if (num_frags
< (I40E_MAX_BUFFER_TXD
))
2444 goto linearize_chk_done
;
2445 /* try the simple math, if we have too many frags per segment */
2446 if (DIV_ROUND_UP((num_frags
+ gso_segs
), gso_segs
) >
2447 I40E_MAX_BUFFER_TXD
) {
2449 goto linearize_chk_done
;
2451 frag
= &skb_shinfo(skb
)->frags
[0];
2453 /* we might still have more fragments per segment */
2455 size
+= skb_frag_size(frag
);
2457 if (j
== I40E_MAX_BUFFER_TXD
) {
2458 if (size
< skb_shinfo(skb
)->gso_size
) {
2463 size
-= skb_shinfo(skb
)->gso_size
;
2469 } while (num_frags
);
2471 if (num_frags
>= I40E_MAX_BUFFER_TXD
)
2480 * i40e_tx_map - Build the Tx descriptor
2481 * @tx_ring: ring to send buffer on
2483 * @first: first buffer info buffer to use
2484 * @tx_flags: collected send information
2485 * @hdr_len: size of the packet header
2486 * @td_cmd: the command field in the descriptor
2487 * @td_offset: offset for checksum or crc
2490 void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2491 struct i40e_tx_buffer
*first
, u32 tx_flags
,
2492 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
2494 static void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2495 struct i40e_tx_buffer
*first
, u32 tx_flags
,
2496 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
2499 unsigned int data_len
= skb
->data_len
;
2500 unsigned int size
= skb_headlen(skb
);
2501 struct skb_frag_struct
*frag
;
2502 struct i40e_tx_buffer
*tx_bi
;
2503 struct i40e_tx_desc
*tx_desc
;
2504 u16 i
= tx_ring
->next_to_use
;
2509 if (tx_flags
& I40E_TX_FLAGS_HW_VLAN
) {
2510 td_cmd
|= I40E_TX_DESC_CMD_IL2TAG1
;
2511 td_tag
= (tx_flags
& I40E_TX_FLAGS_VLAN_MASK
) >>
2512 I40E_TX_FLAGS_VLAN_SHIFT
;
2515 if (tx_flags
& (I40E_TX_FLAGS_TSO
| I40E_TX_FLAGS_FSO
))
2516 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2520 /* multiply data chunks by size of headers */
2521 first
->bytecount
= skb
->len
- hdr_len
+ (gso_segs
* hdr_len
);
2522 first
->gso_segs
= gso_segs
;
2524 first
->tx_flags
= tx_flags
;
2526 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
2528 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
2531 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
2532 if (dma_mapping_error(tx_ring
->dev
, dma
))
2535 /* record length, and DMA address */
2536 dma_unmap_len_set(tx_bi
, len
, size
);
2537 dma_unmap_addr_set(tx_bi
, dma
, dma
);
2539 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2541 while (unlikely(size
> I40E_MAX_DATA_PER_TXD
)) {
2542 tx_desc
->cmd_type_offset_bsz
=
2543 build_ctob(td_cmd
, td_offset
,
2544 I40E_MAX_DATA_PER_TXD
, td_tag
);
2548 if (i
== tx_ring
->count
) {
2549 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2553 dma
+= I40E_MAX_DATA_PER_TXD
;
2554 size
-= I40E_MAX_DATA_PER_TXD
;
2556 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2559 if (likely(!data_len
))
2562 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
2567 if (i
== tx_ring
->count
) {
2568 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2572 size
= skb_frag_size(frag
);
2575 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
2578 tx_bi
= &tx_ring
->tx_bi
[i
];
2581 /* Place RS bit on last descriptor of any packet that spans across the
2582 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2584 if (((i
& WB_STRIDE
) != WB_STRIDE
) &&
2585 (first
<= &tx_ring
->tx_bi
[i
]) &&
2586 (first
>= &tx_ring
->tx_bi
[i
& ~WB_STRIDE
])) {
2587 tx_desc
->cmd_type_offset_bsz
=
2588 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2589 cpu_to_le64((u64
)I40E_TX_DESC_CMD_EOP
<<
2590 I40E_TXD_QW1_CMD_SHIFT
);
2592 tx_desc
->cmd_type_offset_bsz
=
2593 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2594 cpu_to_le64((u64
)I40E_TXD_CMD
<<
2595 I40E_TXD_QW1_CMD_SHIFT
);
2598 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring
->netdev
,
2599 tx_ring
->queue_index
),
2602 /* set the timestamp */
2603 first
->time_stamp
= jiffies
;
2605 /* Force memory writes to complete before letting h/w
2606 * know there are new descriptors to fetch. (Only
2607 * applicable for weak-ordered memory model archs,
2612 /* set next_to_watch value indicating a packet is present */
2613 first
->next_to_watch
= tx_desc
;
2616 if (i
== tx_ring
->count
)
2619 tx_ring
->next_to_use
= i
;
2621 i40e_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
2622 /* notify HW of packet */
2623 if (!skb
->xmit_more
||
2624 netif_xmit_stopped(netdev_get_tx_queue(tx_ring
->netdev
,
2625 tx_ring
->queue_index
)))
2626 writel(i
, tx_ring
->tail
);
2631 dev_info(tx_ring
->dev
, "TX DMA map failed\n");
2633 /* clear dma mappings for failed tx_bi map */
2635 tx_bi
= &tx_ring
->tx_bi
[i
];
2636 i40e_unmap_and_free_tx_resource(tx_ring
, tx_bi
);
2644 tx_ring
->next_to_use
= i
;
2648 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2650 * @tx_ring: ring to send buffer on
2652 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2653 * there is not enough descriptors available in this ring since we need at least
2657 int i40e_xmit_descriptor_count(struct sk_buff
*skb
,
2658 struct i40e_ring
*tx_ring
)
2660 static int i40e_xmit_descriptor_count(struct sk_buff
*skb
,
2661 struct i40e_ring
*tx_ring
)
2667 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2668 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2669 * + 4 desc gap to avoid the cache line where head is,
2670 * + 1 desc for context descriptor,
2671 * otherwise try next time
2673 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
2674 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
2676 count
+= TXD_USE_COUNT(skb_headlen(skb
));
2677 if (i40e_maybe_stop_tx(tx_ring
, count
+ 4 + 1)) {
2678 tx_ring
->tx_stats
.tx_busy
++;
2685 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2687 * @tx_ring: ring to send buffer on
2689 * Returns NETDEV_TX_OK if sent, else an error code
2691 static netdev_tx_t
i40e_xmit_frame_ring(struct sk_buff
*skb
,
2692 struct i40e_ring
*tx_ring
)
2694 u64 cd_type_cmd_tso_mss
= I40E_TX_DESC_DTYPE_CONTEXT
;
2695 u32 cd_tunneling
= 0, cd_l2tag2
= 0;
2696 struct i40e_tx_buffer
*first
;
2704 if (0 == i40e_xmit_descriptor_count(skb
, tx_ring
))
2705 return NETDEV_TX_BUSY
;
2707 /* prepare the xmit flags */
2708 if (i40e_tx_prepare_vlan_flags(skb
, tx_ring
, &tx_flags
))
2711 /* obtain protocol of skb */
2712 protocol
= vlan_get_protocol(skb
);
2714 /* record the location of the first descriptor for this packet */
2715 first
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
2717 /* setup IPv4/IPv6 offloads */
2718 if (protocol
== htons(ETH_P_IP
))
2719 tx_flags
|= I40E_TX_FLAGS_IPV4
;
2720 else if (protocol
== htons(ETH_P_IPV6
))
2721 tx_flags
|= I40E_TX_FLAGS_IPV6
;
2723 tso
= i40e_tso(tx_ring
, skb
, &hdr_len
,
2724 &cd_type_cmd_tso_mss
, &cd_tunneling
);
2729 tx_flags
|= I40E_TX_FLAGS_TSO
;
2731 tsyn
= i40e_tsyn(tx_ring
, skb
, tx_flags
, &cd_type_cmd_tso_mss
);
2734 tx_flags
|= I40E_TX_FLAGS_TSYN
;
2736 if (i40e_chk_linearize(skb
, tx_flags
, hdr_len
))
2737 if (skb_linearize(skb
))
2740 skb_tx_timestamp(skb
);
2742 /* always enable CRC insertion offload */
2743 td_cmd
|= I40E_TX_DESC_CMD_ICRC
;
2745 /* Always offload the checksum, since it's in the data descriptor */
2746 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2747 tx_flags
|= I40E_TX_FLAGS_CSUM
;
2749 i40e_tx_enable_csum(skb
, &tx_flags
, &td_cmd
, &td_offset
,
2750 tx_ring
, &cd_tunneling
);
2753 i40e_create_tx_ctx(tx_ring
, cd_type_cmd_tso_mss
,
2754 cd_tunneling
, cd_l2tag2
);
2756 /* Add Flow Director ATR if it's enabled.
2758 * NOTE: this must always be directly before the data descriptor.
2760 i40e_atr(tx_ring
, skb
, tx_flags
, protocol
);
2762 i40e_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
,
2765 return NETDEV_TX_OK
;
2768 dev_kfree_skb_any(skb
);
2769 return NETDEV_TX_OK
;
2773 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2775 * @netdev: network interface device structure
2777 * Returns NETDEV_TX_OK if sent, else an error code
2779 netdev_tx_t
i40e_lan_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2781 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2782 struct i40e_vsi
*vsi
= np
->vsi
;
2783 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[skb
->queue_mapping
];
2785 /* hardware can't handle really short frames, hardware padding works
2788 if (skb_put_padto(skb
, I40E_MIN_TX_LEN
))
2789 return NETDEV_TX_OK
;
2791 return i40e_xmit_frame_ring(skb
, tx_ring
);