4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "ena_eth_com.h"
36 static inline struct ena_eth_io_rx_cdesc_base
*ena_com_get_next_rx_cdesc(
37 struct ena_com_io_cq
*io_cq
)
39 struct ena_eth_io_rx_cdesc_base
*cdesc
;
40 u16 expected_phase
, head_masked
;
43 head_masked
= io_cq
->head
& (io_cq
->q_depth
- 1);
44 expected_phase
= io_cq
->phase
;
46 cdesc
= (struct ena_eth_io_rx_cdesc_base
*)(io_cq
->cdesc_addr
.virt_addr
47 + (head_masked
* io_cq
->cdesc_entry_size_in_bytes
));
49 desc_phase
= (READ_ONCE32(cdesc
->status
) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK
) >>
50 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT
;
52 if (desc_phase
!= expected_phase
)
55 /* Make sure we read the rest of the descriptor after the phase bit
63 static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq
*io_sq
)
68 tail_masked
= io_sq
->tail
& (io_sq
->q_depth
- 1);
70 offset
= tail_masked
* io_sq
->desc_entry_size
;
72 return (void *)((uintptr_t)io_sq
->desc_addr
.virt_addr
+ offset
);
75 static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq
*io_sq
,
78 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
83 dst_tail_mask
= io_sq
->tail
& (io_sq
->q_depth
- 1);
84 dst_offset
= dst_tail_mask
* llq_info
->desc_list_entry_size
;
86 if (is_llq_max_tx_burst_exists(io_sq
)) {
87 if (!io_sq
->entries_in_tx_burst_left
) {
88 ena_trc_err("Error: trying to write an llq entry to a full llq entries cache\n");
89 return ENA_COM_NO_SPACE
;
92 io_sq
->entries_in_tx_burst_left
--;
93 ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
94 io_sq
->qid
, io_sq
->entries_in_tx_burst_left
);
97 /* Make sure everything was written into the bounce buffer before
98 * writing the bounce buffer to the device
102 /* The line is completed. Copy it to dev */
103 ENA_MEMCPY_TO_DEVICE_64(io_sq
->desc_addr
.pbuf_dev_addr
+ dst_offset
,
105 llq_info
->desc_list_entry_size
);
109 /* Switch phase bit in case of wrap around */
110 if (unlikely((io_sq
->tail
& (io_sq
->q_depth
- 1)) == 0))
116 static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq
*io_sq
,
120 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
121 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
122 u8
*bounce_buffer
= pkt_ctrl
->curr_bounce_buf
;
125 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
129 llq_info
->descs_num_before_header
* io_sq
->desc_entry_size
;
131 if (unlikely((header_offset
+ header_len
) > llq_info
->desc_list_entry_size
)) {
132 ena_trc_err("trying to write header larger than llq entry can accommodate\n");
133 return ENA_COM_FAULT
;
136 if (unlikely(!bounce_buffer
)) {
137 ena_trc_err("bounce buffer is NULL\n");
138 return ENA_COM_FAULT
;
141 memcpy(bounce_buffer
+ header_offset
, header_src
, header_len
);
146 static inline void *get_sq_desc_llq(struct ena_com_io_sq
*io_sq
)
148 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
152 bounce_buffer
= pkt_ctrl
->curr_bounce_buf
;
154 if (unlikely(!bounce_buffer
)) {
155 ena_trc_err("bounce buffer is NULL\n");
159 sq_desc
= bounce_buffer
+ pkt_ctrl
->idx
* io_sq
->desc_entry_size
;
161 pkt_ctrl
->descs_left_in_line
--;
166 static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq
*io_sq
)
168 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
169 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
172 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
175 /* bounce buffer was used, so write it and get a new one */
177 rc
= ena_com_write_bounce_buffer_to_dev(io_sq
,
178 pkt_ctrl
->curr_bounce_buf
);
182 pkt_ctrl
->curr_bounce_buf
=
183 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
184 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
185 0x0, llq_info
->desc_list_entry_size
);
189 pkt_ctrl
->descs_left_in_line
= llq_info
->descs_num_before_header
;
193 static inline void *get_sq_desc(struct ena_com_io_sq
*io_sq
)
195 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
196 return get_sq_desc_llq(io_sq
);
198 return get_sq_desc_regular_queue(io_sq
);
201 static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq
*io_sq
)
203 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
204 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
207 if (!pkt_ctrl
->descs_left_in_line
) {
208 rc
= ena_com_write_bounce_buffer_to_dev(io_sq
,
209 pkt_ctrl
->curr_bounce_buf
);
213 pkt_ctrl
->curr_bounce_buf
=
214 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
215 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
216 0x0, llq_info
->desc_list_entry_size
);
219 if (llq_info
->desc_stride_ctrl
== ENA_ADMIN_SINGLE_DESC_PER_ENTRY
)
220 pkt_ctrl
->descs_left_in_line
= 1;
222 pkt_ctrl
->descs_left_in_line
=
223 llq_info
->desc_list_entry_size
/ io_sq
->desc_entry_size
;
229 static inline int ena_com_sq_update_tail(struct ena_com_io_sq
*io_sq
)
231 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
232 return ena_com_sq_update_llq_tail(io_sq
);
236 /* Switch phase bit in case of wrap around */
237 if (unlikely((io_sq
->tail
& (io_sq
->q_depth
- 1)) == 0))
243 static inline struct ena_eth_io_rx_cdesc_base
*
244 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq
*io_cq
, u16 idx
)
246 idx
&= (io_cq
->q_depth
- 1);
247 return (struct ena_eth_io_rx_cdesc_base
*)
248 ((uintptr_t)io_cq
->cdesc_addr
.virt_addr
+
249 idx
* io_cq
->cdesc_entry_size_in_bytes
);
252 static inline u16
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq
*io_cq
,
253 u16
*first_cdesc_idx
)
255 struct ena_eth_io_rx_cdesc_base
*cdesc
;
256 u16 count
= 0, head_masked
;
260 cdesc
= ena_com_get_next_rx_cdesc(io_cq
);
264 ena_com_cq_inc_head(io_cq
);
266 last
= (READ_ONCE32(cdesc
->status
) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK
) >>
267 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT
;
271 *first_cdesc_idx
= io_cq
->cur_rx_pkt_cdesc_start_idx
;
272 count
+= io_cq
->cur_rx_pkt_cdesc_count
;
274 head_masked
= io_cq
->head
& (io_cq
->q_depth
- 1);
276 io_cq
->cur_rx_pkt_cdesc_count
= 0;
277 io_cq
->cur_rx_pkt_cdesc_start_idx
= head_masked
;
279 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
280 io_cq
->qid
, *first_cdesc_idx
, count
);
282 io_cq
->cur_rx_pkt_cdesc_count
+= count
;
289 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq
*io_sq
,
290 struct ena_com_tx_ctx
*ena_tx_ctx
)
294 if (ena_tx_ctx
->meta_valid
) {
295 rc
= memcmp(&io_sq
->cached_tx_meta
,
296 &ena_tx_ctx
->ena_meta
,
297 sizeof(struct ena_com_tx_meta
));
299 if (unlikely(rc
!= 0))
306 static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq
*io_sq
,
307 struct ena_com_tx_ctx
*ena_tx_ctx
)
309 struct ena_eth_io_tx_meta_desc
*meta_desc
= NULL
;
310 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
312 meta_desc
= get_sq_desc(io_sq
);
313 memset(meta_desc
, 0x0, sizeof(struct ena_eth_io_tx_meta_desc
));
315 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK
;
317 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK
;
319 /* bits 0-9 of the mss */
320 meta_desc
->word2
|= (ena_meta
->mss
<<
321 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT
) &
322 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK
;
323 /* bits 10-13 of the mss */
324 meta_desc
->len_ctrl
|= ((ena_meta
->mss
>> 10) <<
325 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT
) &
326 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK
;
328 /* Extended meta desc */
329 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK
;
330 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK
;
331 meta_desc
->len_ctrl
|= (io_sq
->phase
<<
332 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT
) &
333 ENA_ETH_IO_TX_META_DESC_PHASE_MASK
;
335 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_FIRST_MASK
;
336 meta_desc
->word2
|= ena_meta
->l3_hdr_len
&
337 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK
;
338 meta_desc
->word2
|= (ena_meta
->l3_hdr_offset
<<
339 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT
) &
340 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK
;
342 meta_desc
->word2
|= (ena_meta
->l4_hdr_len
<<
343 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT
) &
344 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK
;
346 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK
;
348 /* Cached the meta desc */
349 memcpy(&io_sq
->cached_tx_meta
, ena_meta
,
350 sizeof(struct ena_com_tx_meta
));
352 return ena_com_sq_update_tail(io_sq
);
355 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx
*ena_rx_ctx
,
356 struct ena_eth_io_rx_cdesc_base
*cdesc
)
358 ena_rx_ctx
->l3_proto
= cdesc
->status
&
359 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK
;
360 ena_rx_ctx
->l4_proto
=
361 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK
) >>
362 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT
;
363 ena_rx_ctx
->l3_csum_err
=
364 !!((cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK
) >>
365 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT
);
366 ena_rx_ctx
->l4_csum_err
=
367 !!((cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK
) >>
368 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT
);
369 ena_rx_ctx
->l4_csum_checked
=
370 !!((cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK
) >>
371 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT
);
372 ena_rx_ctx
->hash
= cdesc
->hash
;
374 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK
) >>
375 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT
;
377 ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
378 ena_rx_ctx
->l3_proto
,
379 ena_rx_ctx
->l4_proto
,
380 ena_rx_ctx
->l3_csum_err
,
381 ena_rx_ctx
->l4_csum_err
,
387 /*****************************************************************************/
388 /***************************** API **********************************/
389 /*****************************************************************************/
391 bool ena_com_is_doorbell_needed(struct ena_com_io_sq
*io_sq
,
392 struct ena_com_tx_ctx
*ena_tx_ctx
)
395 int num_entries_needed
;
396 int descs_after_first_entry
;
398 struct ena_com_llq_info
*llq_info
;
400 if (!is_llq_max_tx_burst_exists(io_sq
))
403 num_entries_needed
= 1;
404 llq_info
= &io_sq
->llq_info
;
405 num_descs
= ena_tx_ctx
->num_bufs
;
406 have_meta
= ena_tx_ctx
->meta_valid
&&
407 ena_com_meta_desc_changed(io_sq
, ena_tx_ctx
);
412 if (num_descs
> llq_info
->descs_num_before_header
) {
413 descs_after_first_entry
= num_descs
- llq_info
->descs_num_before_header
;
414 num_entries_needed
+= DIV_ROUND_UP(descs_after_first_entry
,
415 llq_info
->descs_per_entry
);
418 ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n",
419 io_sq
->qid
, num_descs
, num_entries_needed
);
421 return num_entries_needed
> io_sq
->entries_in_tx_burst_left
;
424 int ena_com_prepare_tx(struct ena_com_io_sq
*io_sq
,
425 struct ena_com_tx_ctx
*ena_tx_ctx
,
428 struct ena_eth_io_tx_desc
*desc
= NULL
;
429 struct ena_com_buf
*ena_bufs
= ena_tx_ctx
->ena_bufs
;
430 void *buffer_to_push
= ena_tx_ctx
->push_header
;
431 u16 header_len
= ena_tx_ctx
->header_len
;
432 u16 num_bufs
= ena_tx_ctx
->num_bufs
;
433 u16 start_tail
= io_sq
->tail
;
438 ENA_WARN(io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_TX
,
441 /* num_bufs +1 for potential meta desc */
442 if (!ena_com_sq_have_enough_space(io_sq
, num_bufs
+ 1)) {
443 ena_trc_dbg("Not enough space in the tx queue\n");
444 return ENA_COM_NO_MEM
;
447 if (unlikely(header_len
> io_sq
->tx_max_header_size
)) {
448 ena_trc_err("header size is too large %d max header: %d\n",
449 header_len
, io_sq
->tx_max_header_size
);
450 return ENA_COM_INVAL
;
453 if (unlikely((io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
455 return ENA_COM_INVAL
;
457 rc
= ena_com_write_header_to_bounce(io_sq
, buffer_to_push
, header_len
);
461 have_meta
= ena_tx_ctx
->meta_valid
&& ena_com_meta_desc_changed(io_sq
,
464 rc
= ena_com_create_and_store_tx_meta_desc(io_sq
, ena_tx_ctx
);
469 /* If the caller doesn't want to send packets */
470 if (unlikely(!num_bufs
&& !header_len
)) {
471 rc
= ena_com_close_bounce_buffer(io_sq
);
472 *nb_hw_desc
= io_sq
->tail
- start_tail
;
476 desc
= get_sq_desc(io_sq
);
478 return ENA_COM_FAULT
;
479 memset(desc
, 0x0, sizeof(struct ena_eth_io_tx_desc
));
481 /* Set first desc when we don't have meta descriptor */
483 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_FIRST_MASK
;
485 desc
->buff_addr_hi_hdr_sz
|= (header_len
<<
486 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT
) &
487 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK
;
488 desc
->len_ctrl
|= (io_sq
->phase
<< ENA_ETH_IO_TX_DESC_PHASE_SHIFT
) &
489 ENA_ETH_IO_TX_DESC_PHASE_MASK
;
491 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK
;
494 desc
->meta_ctrl
|= (ena_tx_ctx
->req_id
<<
495 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT
) &
496 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK
;
498 desc
->meta_ctrl
|= (ena_tx_ctx
->df
<<
499 ENA_ETH_IO_TX_DESC_DF_SHIFT
) &
500 ENA_ETH_IO_TX_DESC_DF_MASK
;
503 desc
->len_ctrl
|= ((ena_tx_ctx
->req_id
>> 10) <<
504 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT
) &
505 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK
;
507 if (ena_tx_ctx
->meta_valid
) {
508 desc
->meta_ctrl
|= (ena_tx_ctx
->tso_enable
<<
509 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT
) &
510 ENA_ETH_IO_TX_DESC_TSO_EN_MASK
;
511 desc
->meta_ctrl
|= ena_tx_ctx
->l3_proto
&
512 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK
;
513 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_proto
<<
514 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT
) &
515 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK
;
516 desc
->meta_ctrl
|= (ena_tx_ctx
->l3_csum_enable
<<
517 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT
) &
518 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK
;
519 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_csum_enable
<<
520 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT
) &
521 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK
;
522 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_csum_partial
<<
523 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT
) &
524 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK
;
527 for (i
= 0; i
< num_bufs
; i
++) {
528 /* The first desc share the same desc as the header */
529 if (likely(i
!= 0)) {
530 rc
= ena_com_sq_update_tail(io_sq
);
534 desc
= get_sq_desc(io_sq
);
536 return ENA_COM_FAULT
;
538 memset(desc
, 0x0, sizeof(struct ena_eth_io_tx_desc
));
540 desc
->len_ctrl
|= (io_sq
->phase
<<
541 ENA_ETH_IO_TX_DESC_PHASE_SHIFT
) &
542 ENA_ETH_IO_TX_DESC_PHASE_MASK
;
545 desc
->len_ctrl
|= ena_bufs
->len
&
546 ENA_ETH_IO_TX_DESC_LENGTH_MASK
;
548 addr_hi
= ((ena_bufs
->paddr
&
549 GENMASK_ULL(io_sq
->dma_addr_bits
- 1, 32)) >> 32);
551 desc
->buff_addr_lo
= (u32
)ena_bufs
->paddr
;
552 desc
->buff_addr_hi_hdr_sz
|= addr_hi
&
553 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK
;
557 /* set the last desc indicator */
558 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_LAST_MASK
;
560 rc
= ena_com_sq_update_tail(io_sq
);
564 rc
= ena_com_close_bounce_buffer(io_sq
);
566 *nb_hw_desc
= io_sq
->tail
- start_tail
;
570 int ena_com_rx_pkt(struct ena_com_io_cq
*io_cq
,
571 struct ena_com_io_sq
*io_sq
,
572 struct ena_com_rx_ctx
*ena_rx_ctx
)
574 struct ena_com_rx_buf_info
*ena_buf
= &ena_rx_ctx
->ena_bufs
[0];
575 struct ena_eth_io_rx_cdesc_base
*cdesc
= NULL
;
580 ENA_WARN(io_cq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
,
583 nb_hw_desc
= ena_com_cdesc_rx_pkt_get(io_cq
, &cdesc_idx
);
584 if (nb_hw_desc
== 0) {
585 ena_rx_ctx
->descs
= nb_hw_desc
;
589 ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
590 io_cq
->qid
, nb_hw_desc
);
592 if (unlikely(nb_hw_desc
> ena_rx_ctx
->max_bufs
)) {
593 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
594 nb_hw_desc
, ena_rx_ctx
->max_bufs
);
595 return ENA_COM_NO_SPACE
;
598 for (i
= 0; i
< nb_hw_desc
; i
++) {
599 cdesc
= ena_com_rx_cdesc_idx_to_ptr(io_cq
, cdesc_idx
+ i
);
601 ena_buf
->len
= cdesc
->length
;
602 ena_buf
->req_id
= cdesc
->req_id
;
606 /* Update SQ head ptr */
607 io_sq
->next_to_comp
+= nb_hw_desc
;
609 ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__
,
610 io_sq
->qid
, io_sq
->next_to_comp
);
612 /* Get rx flags from the last pkt */
613 ena_com_rx_set_flags(ena_rx_ctx
, cdesc
);
615 ena_rx_ctx
->descs
= nb_hw_desc
;
619 int ena_com_add_single_rx_desc(struct ena_com_io_sq
*io_sq
,
620 struct ena_com_buf
*ena_buf
,
623 struct ena_eth_io_rx_desc
*desc
;
625 ENA_WARN(io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
,
628 if (unlikely(!ena_com_sq_have_enough_space(io_sq
, 1)))
629 return ENA_COM_NO_SPACE
;
631 desc
= get_sq_desc(io_sq
);
633 return ENA_COM_FAULT
;
635 memset(desc
, 0x0, sizeof(struct ena_eth_io_rx_desc
));
637 desc
->length
= ena_buf
->len
;
639 desc
->ctrl
|= ENA_ETH_IO_RX_DESC_FIRST_MASK
;
640 desc
->ctrl
|= ENA_ETH_IO_RX_DESC_LAST_MASK
;
641 desc
->ctrl
|= io_sq
->phase
& ENA_ETH_IO_RX_DESC_PHASE_MASK
;
642 desc
->ctrl
|= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK
;
644 desc
->req_id
= req_id
;
646 desc
->buff_addr_lo
= (u32
)ena_buf
->paddr
;
648 ((ena_buf
->paddr
& GENMASK_ULL(io_sq
->dma_addr_bits
- 1, 32)) >> 32);
650 return ena_com_sq_update_tail(io_sq
);
653 bool ena_com_cq_empty(struct ena_com_io_cq
*io_cq
)
655 struct ena_eth_io_rx_cdesc_base
*cdesc
;
657 cdesc
= ena_com_get_next_rx_cdesc(io_cq
);