4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "ena_eth_com.h"
36 static inline struct ena_eth_io_rx_cdesc_base
*ena_com_get_next_rx_cdesc(
37 struct ena_com_io_cq
*io_cq
)
39 struct ena_eth_io_rx_cdesc_base
*cdesc
;
40 u16 expected_phase
, head_masked
;
43 head_masked
= io_cq
->head
& (io_cq
->q_depth
- 1);
44 expected_phase
= io_cq
->phase
;
46 cdesc
= (struct ena_eth_io_rx_cdesc_base
*)(io_cq
->cdesc_addr
.virt_addr
47 + (head_masked
* io_cq
->cdesc_entry_size_in_bytes
));
49 desc_phase
= (READ_ONCE(cdesc
->status
) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK
) >>
50 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT
;
52 if (desc_phase
!= expected_phase
)
58 static inline void ena_com_cq_inc_head(struct ena_com_io_cq
*io_cq
)
62 /* Switch phase bit in case of wrap around */
63 if (unlikely((io_cq
->head
& (io_cq
->q_depth
- 1)) == 0))
67 static inline void *get_sq_desc(struct ena_com_io_sq
*io_sq
)
72 tail_masked
= io_sq
->tail
& (io_sq
->q_depth
- 1);
74 offset
= tail_masked
* io_sq
->desc_entry_size
;
76 return (void *)((uintptr_t)io_sq
->desc_addr
.virt_addr
+ offset
);
79 static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq
*io_sq
)
81 u16 tail_masked
= io_sq
->tail
& (io_sq
->q_depth
- 1);
82 u32 offset
= tail_masked
* io_sq
->desc_entry_size
;
84 /* In case this queue isn't a LLQ */
85 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
88 memcpy_toio(io_sq
->desc_addr
.pbuf_dev_addr
+ offset
,
89 io_sq
->desc_addr
.virt_addr
+ offset
,
90 io_sq
->desc_entry_size
);
93 static inline void ena_com_sq_update_tail(struct ena_com_io_sq
*io_sq
)
97 /* Switch phase bit in case of wrap around */
98 if (unlikely((io_sq
->tail
& (io_sq
->q_depth
- 1)) == 0))
102 static inline int ena_com_write_header(struct ena_com_io_sq
*io_sq
,
103 u8
*head_src
, u16 header_len
)
105 u16 tail_masked
= io_sq
->tail
& (io_sq
->q_depth
- 1);
106 u8 __iomem
*dev_head_addr
=
107 io_sq
->header_addr
+ (tail_masked
* io_sq
->tx_max_header_size
);
109 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
112 if (unlikely(!io_sq
->header_addr
)) {
113 ena_trc_err("Push buffer header ptr is NULL\n");
114 return ENA_COM_INVAL
;
117 memcpy_toio(dev_head_addr
, head_src
, header_len
);
122 static inline struct ena_eth_io_rx_cdesc_base
*
123 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq
*io_cq
, u16 idx
)
125 idx
&= (io_cq
->q_depth
- 1);
126 return (struct ena_eth_io_rx_cdesc_base
*)
127 ((uintptr_t)io_cq
->cdesc_addr
.virt_addr
+
128 idx
* io_cq
->cdesc_entry_size_in_bytes
);
131 static inline u16
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq
*io_cq
,
132 u16
*first_cdesc_idx
)
134 struct ena_eth_io_rx_cdesc_base
*cdesc
;
135 u16 count
= 0, head_masked
;
139 cdesc
= ena_com_get_next_rx_cdesc(io_cq
);
143 ena_com_cq_inc_head(io_cq
);
145 last
= (READ_ONCE(cdesc
->status
) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK
) >>
146 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT
;
150 *first_cdesc_idx
= io_cq
->cur_rx_pkt_cdesc_start_idx
;
151 count
+= io_cq
->cur_rx_pkt_cdesc_count
;
153 head_masked
= io_cq
->head
& (io_cq
->q_depth
- 1);
155 io_cq
->cur_rx_pkt_cdesc_count
= 0;
156 io_cq
->cur_rx_pkt_cdesc_start_idx
= head_masked
;
158 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
159 io_cq
->qid
, *first_cdesc_idx
, count
);
161 io_cq
->cur_rx_pkt_cdesc_count
+= count
;
168 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq
*io_sq
,
169 struct ena_com_tx_ctx
*ena_tx_ctx
)
173 if (ena_tx_ctx
->meta_valid
) {
174 rc
= memcmp(&io_sq
->cached_tx_meta
,
175 &ena_tx_ctx
->ena_meta
,
176 sizeof(struct ena_com_tx_meta
));
178 if (unlikely(rc
!= 0))
185 static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq
*io_sq
,
186 struct ena_com_tx_ctx
*ena_tx_ctx
)
188 struct ena_eth_io_tx_meta_desc
*meta_desc
= NULL
;
189 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
191 meta_desc
= get_sq_desc(io_sq
);
192 memset(meta_desc
, 0x0, sizeof(struct ena_eth_io_tx_meta_desc
));
194 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK
;
196 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK
;
198 /* bits 0-9 of the mss */
199 meta_desc
->word2
|= (ena_meta
->mss
<<
200 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT
) &
201 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK
;
202 /* bits 10-13 of the mss */
203 meta_desc
->len_ctrl
|= ((ena_meta
->mss
>> 10) <<
204 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT
) &
205 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK
;
207 /* Extended meta desc */
208 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK
;
209 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK
;
210 meta_desc
->len_ctrl
|= (io_sq
->phase
<<
211 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT
) &
212 ENA_ETH_IO_TX_META_DESC_PHASE_MASK
;
214 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_FIRST_MASK
;
215 meta_desc
->word2
|= ena_meta
->l3_hdr_len
&
216 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK
;
217 meta_desc
->word2
|= (ena_meta
->l3_hdr_offset
<<
218 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT
) &
219 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK
;
221 meta_desc
->word2
|= (ena_meta
->l4_hdr_len
<<
222 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT
) &
223 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK
;
225 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK
;
227 /* Cached the meta desc */
228 memcpy(&io_sq
->cached_tx_meta
, ena_meta
,
229 sizeof(struct ena_com_tx_meta
));
231 ena_com_copy_curr_sq_desc_to_dev(io_sq
);
232 ena_com_sq_update_tail(io_sq
);
235 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx
*ena_rx_ctx
,
236 struct ena_eth_io_rx_cdesc_base
*cdesc
)
238 ena_rx_ctx
->l3_proto
= cdesc
->status
&
239 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK
;
240 ena_rx_ctx
->l4_proto
=
241 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK
) >>
242 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT
;
243 ena_rx_ctx
->l3_csum_err
=
244 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK
) >>
245 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT
;
246 ena_rx_ctx
->l4_csum_err
=
247 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK
) >>
248 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT
;
249 ena_rx_ctx
->hash
= cdesc
->hash
;
251 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK
) >>
252 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT
;
254 ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
255 ena_rx_ctx
->l3_proto
,
256 ena_rx_ctx
->l4_proto
,
257 ena_rx_ctx
->l3_csum_err
,
258 ena_rx_ctx
->l4_csum_err
,
264 /*****************************************************************************/
265 /***************************** API **********************************/
266 /*****************************************************************************/
268 int ena_com_prepare_tx(struct ena_com_io_sq
*io_sq
,
269 struct ena_com_tx_ctx
*ena_tx_ctx
,
272 struct ena_eth_io_tx_desc
*desc
= NULL
;
273 struct ena_com_buf
*ena_bufs
= ena_tx_ctx
->ena_bufs
;
274 void *push_header
= ena_tx_ctx
->push_header
;
275 u16 header_len
= ena_tx_ctx
->header_len
;
276 u16 num_bufs
= ena_tx_ctx
->num_bufs
;
277 int total_desc
, i
, rc
;
281 ENA_WARN(io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_TX
,
284 /* num_bufs +1 for potential meta desc */
285 if (ena_com_sq_empty_space(io_sq
) < (num_bufs
+ 1)) {
286 ena_trc_err("Not enough space in the tx queue\n");
287 return ENA_COM_NO_MEM
;
290 if (unlikely(header_len
> io_sq
->tx_max_header_size
)) {
291 ena_trc_err("header size is too large %d max header: %d\n",
292 header_len
, io_sq
->tx_max_header_size
);
293 return ENA_COM_INVAL
;
296 /* start with pushing the header (if needed) */
297 rc
= ena_com_write_header(io_sq
, push_header
, header_len
);
301 have_meta
= ena_tx_ctx
->meta_valid
&& ena_com_meta_desc_changed(io_sq
,
304 ena_com_create_and_store_tx_meta_desc(io_sq
, ena_tx_ctx
);
306 /* If the caller doesn't want send packets */
307 if (unlikely(!num_bufs
&& !header_len
)) {
308 *nb_hw_desc
= have_meta
? 0 : 1;
312 desc
= get_sq_desc(io_sq
);
313 memset(desc
, 0x0, sizeof(struct ena_eth_io_tx_desc
));
315 /* Set first desc when we don't have meta descriptor */
317 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_FIRST_MASK
;
319 desc
->buff_addr_hi_hdr_sz
|= (header_len
<<
320 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT
) &
321 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK
;
322 desc
->len_ctrl
|= (io_sq
->phase
<< ENA_ETH_IO_TX_DESC_PHASE_SHIFT
) &
323 ENA_ETH_IO_TX_DESC_PHASE_MASK
;
325 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK
;
328 desc
->meta_ctrl
|= (ena_tx_ctx
->req_id
<<
329 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT
) &
330 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK
;
332 desc
->meta_ctrl
|= (ena_tx_ctx
->df
<<
333 ENA_ETH_IO_TX_DESC_DF_SHIFT
) &
334 ENA_ETH_IO_TX_DESC_DF_MASK
;
337 desc
->len_ctrl
|= ((ena_tx_ctx
->req_id
>> 10) <<
338 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT
) &
339 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK
;
341 if (ena_tx_ctx
->meta_valid
) {
342 desc
->meta_ctrl
|= (ena_tx_ctx
->tso_enable
<<
343 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT
) &
344 ENA_ETH_IO_TX_DESC_TSO_EN_MASK
;
345 desc
->meta_ctrl
|= ena_tx_ctx
->l3_proto
&
346 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK
;
347 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_proto
<<
348 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT
) &
349 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK
;
350 desc
->meta_ctrl
|= (ena_tx_ctx
->l3_csum_enable
<<
351 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT
) &
352 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK
;
353 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_csum_enable
<<
354 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT
) &
355 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK
;
356 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_csum_partial
<<
357 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT
) &
358 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK
;
361 for (i
= 0; i
< num_bufs
; i
++) {
362 /* The first desc share the same desc as the header */
363 if (likely(i
!= 0)) {
364 ena_com_copy_curr_sq_desc_to_dev(io_sq
);
365 ena_com_sq_update_tail(io_sq
);
367 desc
= get_sq_desc(io_sq
);
368 memset(desc
, 0x0, sizeof(struct ena_eth_io_tx_desc
));
370 desc
->len_ctrl
|= (io_sq
->phase
<<
371 ENA_ETH_IO_TX_DESC_PHASE_SHIFT
) &
372 ENA_ETH_IO_TX_DESC_PHASE_MASK
;
375 desc
->len_ctrl
|= ena_bufs
->len
&
376 ENA_ETH_IO_TX_DESC_LENGTH_MASK
;
378 addr_hi
= ((ena_bufs
->paddr
&
379 GENMASK_ULL(io_sq
->dma_addr_bits
- 1, 32)) >> 32);
381 desc
->buff_addr_lo
= (u32
)ena_bufs
->paddr
;
382 desc
->buff_addr_hi_hdr_sz
|= addr_hi
&
383 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK
;
387 /* set the last desc indicator */
388 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_LAST_MASK
;
390 ena_com_copy_curr_sq_desc_to_dev(io_sq
);
392 ena_com_sq_update_tail(io_sq
);
394 total_desc
= ENA_MAX16(num_bufs
, 1);
395 total_desc
+= have_meta
? 1 : 0;
397 *nb_hw_desc
= total_desc
;
401 int ena_com_rx_pkt(struct ena_com_io_cq
*io_cq
,
402 struct ena_com_io_sq
*io_sq
,
403 struct ena_com_rx_ctx
*ena_rx_ctx
)
405 struct ena_com_rx_buf_info
*ena_buf
= &ena_rx_ctx
->ena_bufs
[0];
406 struct ena_eth_io_rx_cdesc_base
*cdesc
= NULL
;
411 ENA_WARN(io_cq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
,
414 nb_hw_desc
= ena_com_cdesc_rx_pkt_get(io_cq
, &cdesc_idx
);
415 if (nb_hw_desc
== 0) {
416 ena_rx_ctx
->descs
= nb_hw_desc
;
420 ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
421 io_cq
->qid
, nb_hw_desc
);
423 if (unlikely(nb_hw_desc
> ena_rx_ctx
->max_bufs
)) {
424 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
425 nb_hw_desc
, ena_rx_ctx
->max_bufs
);
426 return ENA_COM_NO_SPACE
;
429 for (i
= 0; i
< nb_hw_desc
; i
++) {
430 cdesc
= ena_com_rx_cdesc_idx_to_ptr(io_cq
, cdesc_idx
+ i
);
432 ena_buf
->len
= cdesc
->length
;
433 ena_buf
->req_id
= cdesc
->req_id
;
437 /* Update SQ head ptr */
438 io_sq
->next_to_comp
+= nb_hw_desc
;
440 ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__
,
441 io_sq
->qid
, io_sq
->next_to_comp
);
443 /* Get rx flags from the last pkt */
444 ena_com_rx_set_flags(ena_rx_ctx
, cdesc
);
446 ena_rx_ctx
->descs
= nb_hw_desc
;
450 int ena_com_add_single_rx_desc(struct ena_com_io_sq
*io_sq
,
451 struct ena_com_buf
*ena_buf
,
454 struct ena_eth_io_rx_desc
*desc
;
456 ENA_WARN(io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
,
459 if (unlikely(ena_com_sq_empty_space(io_sq
) == 0))
460 return ENA_COM_NO_SPACE
;
462 desc
= get_sq_desc(io_sq
);
463 memset(desc
, 0x0, sizeof(struct ena_eth_io_rx_desc
));
465 desc
->length
= ena_buf
->len
;
467 desc
->ctrl
|= ENA_ETH_IO_RX_DESC_FIRST_MASK
;
468 desc
->ctrl
|= ENA_ETH_IO_RX_DESC_LAST_MASK
;
469 desc
->ctrl
|= io_sq
->phase
& ENA_ETH_IO_RX_DESC_PHASE_MASK
;
470 desc
->ctrl
|= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK
;
472 desc
->req_id
= req_id
;
474 desc
->buff_addr_lo
= (u32
)ena_buf
->paddr
;
476 ((ena_buf
->paddr
& GENMASK_ULL(io_sq
->dma_addr_bits
- 1, 32)) >> 32);
478 ena_com_sq_update_tail(io_sq
);
483 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq
*io_cq
, u16
*req_id
)
485 u8 expected_phase
, cdesc_phase
;
486 struct ena_eth_io_tx_cdesc
*cdesc
;
489 masked_head
= io_cq
->head
& (io_cq
->q_depth
- 1);
490 expected_phase
= io_cq
->phase
;
492 cdesc
= (struct ena_eth_io_tx_cdesc
*)
493 ((uintptr_t)io_cq
->cdesc_addr
.virt_addr
+
494 (masked_head
* io_cq
->cdesc_entry_size_in_bytes
));
496 /* When the current completion descriptor phase isn't the same as the
497 * expected, it mean that the device still didn't update
500 cdesc_phase
= READ_ONCE(cdesc
->flags
) & ENA_ETH_IO_TX_CDESC_PHASE_MASK
;
501 if (cdesc_phase
!= expected_phase
)
502 return ENA_COM_TRY_AGAIN
;
504 if (unlikely(cdesc
->req_id
>= io_cq
->q_depth
)) {
505 ena_trc_err("Invalid req id %d\n", cdesc
->req_id
);
506 return ENA_COM_INVAL
;
509 ena_com_cq_inc_head(io_cq
);
511 *req_id
= READ_ONCE(cdesc
->req_id
);
516 bool ena_com_cq_empty(struct ena_com_io_cq
*io_cq
)
518 struct ena_eth_io_rx_cdesc_base
*cdesc
;
520 cdesc
= ena_com_get_next_rx_cdesc(io_cq
);