]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ena / base / ena_eth_com.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "ena_eth_com.h"
35
36 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 struct ena_com_io_cq *io_cq)
38 {
39 struct ena_eth_io_rx_cdesc_base *cdesc;
40 u16 expected_phase, head_masked;
41 u16 desc_phase;
42
43 head_masked = io_cq->head & (io_cq->q_depth - 1);
44 expected_phase = io_cq->phase;
45
46 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
48
49 desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51
52 if (desc_phase != expected_phase)
53 return NULL;
54
55 /* Make sure we read the rest of the descriptor after the phase bit
56 * has been read
57 */
58 dma_rmb();
59
60 return cdesc;
61 }
62
63 static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
64 {
65 u16 tail_masked;
66 u32 offset;
67
68 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
69
70 offset = tail_masked * io_sq->desc_entry_size;
71
72 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
73 }
74
75 static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
76 u8 *bounce_buffer)
77 {
78 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
79
80 u16 dst_tail_mask;
81 u32 dst_offset;
82
83 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
84 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
85
86 if (is_llq_max_tx_burst_exists(io_sq)) {
87 if (!io_sq->entries_in_tx_burst_left) {
88 ena_trc_err("Error: trying to write an llq entry to a full llq entries cache\n");
89 return ENA_COM_NO_SPACE;
90 }
91
92 io_sq->entries_in_tx_burst_left--;
93 ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
94 io_sq->qid, io_sq->entries_in_tx_burst_left);
95 }
96
97 /* Make sure everything was written into the bounce buffer before
98 * writing the bounce buffer to the device
99 */
100 wmb();
101
102 /* The line is completed. Copy it to dev */
103 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
104 bounce_buffer,
105 llq_info->desc_list_entry_size);
106
107 io_sq->tail++;
108
109 /* Switch phase bit in case of wrap around */
110 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
111 io_sq->phase ^= 1;
112
113 return ENA_COM_OK;
114 }
115
116 static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
117 u8 *header_src,
118 u16 header_len)
119 {
120 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
121 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
122 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
123 u16 header_offset;
124
125 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
126 return 0;
127
128 header_offset =
129 llq_info->descs_num_before_header * io_sq->desc_entry_size;
130
131 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
132 ena_trc_err("trying to write header larger than llq entry can accommodate\n");
133 return ENA_COM_FAULT;
134 }
135
136 if (unlikely(!bounce_buffer)) {
137 ena_trc_err("bounce buffer is NULL\n");
138 return ENA_COM_FAULT;
139 }
140
141 memcpy(bounce_buffer + header_offset, header_src, header_len);
142
143 return 0;
144 }
145
146 static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
147 {
148 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
149 u8 *bounce_buffer;
150 void *sq_desc;
151
152 bounce_buffer = pkt_ctrl->curr_bounce_buf;
153
154 if (unlikely(!bounce_buffer)) {
155 ena_trc_err("bounce buffer is NULL\n");
156 return NULL;
157 }
158
159 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
160 pkt_ctrl->idx++;
161 pkt_ctrl->descs_left_in_line--;
162
163 return sq_desc;
164 }
165
166 static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
167 {
168 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
169 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
170 int rc;
171
172 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
173 return ENA_COM_OK;
174
175 /* bounce buffer was used, so write it and get a new one */
176 if (pkt_ctrl->idx) {
177 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
178 pkt_ctrl->curr_bounce_buf);
179 if (unlikely(rc))
180 return rc;
181
182 pkt_ctrl->curr_bounce_buf =
183 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
184 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
185 0x0, llq_info->desc_list_entry_size);
186 }
187
188 pkt_ctrl->idx = 0;
189 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
190 return ENA_COM_OK;
191 }
192
193 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
194 {
195 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
196 return get_sq_desc_llq(io_sq);
197
198 return get_sq_desc_regular_queue(io_sq);
199 }
200
201 static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
202 {
203 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
204 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
205 int rc;
206
207 if (!pkt_ctrl->descs_left_in_line) {
208 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
209 pkt_ctrl->curr_bounce_buf);
210 if (unlikely(rc))
211 return rc;
212
213 pkt_ctrl->curr_bounce_buf =
214 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
215 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
216 0x0, llq_info->desc_list_entry_size);
217
218 pkt_ctrl->idx = 0;
219 if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
220 pkt_ctrl->descs_left_in_line = 1;
221 else
222 pkt_ctrl->descs_left_in_line =
223 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
224 }
225
226 return ENA_COM_OK;
227 }
228
229 static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
230 {
231 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
232 return ena_com_sq_update_llq_tail(io_sq);
233
234 io_sq->tail++;
235
236 /* Switch phase bit in case of wrap around */
237 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
238 io_sq->phase ^= 1;
239
240 return ENA_COM_OK;
241 }
242
243 static inline struct ena_eth_io_rx_cdesc_base *
244 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
245 {
246 idx &= (io_cq->q_depth - 1);
247 return (struct ena_eth_io_rx_cdesc_base *)
248 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
249 idx * io_cq->cdesc_entry_size_in_bytes);
250 }
251
252 static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
253 u16 *first_cdesc_idx)
254 {
255 struct ena_eth_io_rx_cdesc_base *cdesc;
256 u16 count = 0, head_masked;
257 u32 last = 0;
258
259 do {
260 cdesc = ena_com_get_next_rx_cdesc(io_cq);
261 if (!cdesc)
262 break;
263
264 ena_com_cq_inc_head(io_cq);
265 count++;
266 last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
267 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
268 } while (!last);
269
270 if (last) {
271 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
272 count += io_cq->cur_rx_pkt_cdesc_count;
273
274 head_masked = io_cq->head & (io_cq->q_depth - 1);
275
276 io_cq->cur_rx_pkt_cdesc_count = 0;
277 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
278
279 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
280 io_cq->qid, *first_cdesc_idx, count);
281 } else {
282 io_cq->cur_rx_pkt_cdesc_count += count;
283 count = 0;
284 }
285
286 return count;
287 }
288
289 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
290 struct ena_com_tx_ctx *ena_tx_ctx)
291 {
292 int rc;
293
294 if (ena_tx_ctx->meta_valid) {
295 rc = memcmp(&io_sq->cached_tx_meta,
296 &ena_tx_ctx->ena_meta,
297 sizeof(struct ena_com_tx_meta));
298
299 if (unlikely(rc != 0))
300 return true;
301 }
302
303 return false;
304 }
305
306 static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
307 struct ena_com_tx_ctx *ena_tx_ctx)
308 {
309 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
310 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
311
312 meta_desc = get_sq_desc(io_sq);
313 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
314
315 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
316
317 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
318
319 /* bits 0-9 of the mss */
320 meta_desc->word2 |= (ena_meta->mss <<
321 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
322 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
323 /* bits 10-13 of the mss */
324 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
325 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
326 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
327
328 /* Extended meta desc */
329 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
330 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
331 meta_desc->len_ctrl |= (io_sq->phase <<
332 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
333 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
334
335 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
336 meta_desc->word2 |= ena_meta->l3_hdr_len &
337 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
338 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
339 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
340 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
341
342 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
343 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
344 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
345
346 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
347
348 /* Cached the meta desc */
349 memcpy(&io_sq->cached_tx_meta, ena_meta,
350 sizeof(struct ena_com_tx_meta));
351
352 return ena_com_sq_update_tail(io_sq);
353 }
354
355 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
356 struct ena_eth_io_rx_cdesc_base *cdesc)
357 {
358 ena_rx_ctx->l3_proto = cdesc->status &
359 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
360 ena_rx_ctx->l4_proto =
361 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
362 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
363 ena_rx_ctx->l3_csum_err =
364 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
365 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
366 ena_rx_ctx->l4_csum_err =
367 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
368 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
369 ena_rx_ctx->l4_csum_checked =
370 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
371 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
372 ena_rx_ctx->hash = cdesc->hash;
373 ena_rx_ctx->frag =
374 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
375 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
376
377 ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
378 ena_rx_ctx->l3_proto,
379 ena_rx_ctx->l4_proto,
380 ena_rx_ctx->l3_csum_err,
381 ena_rx_ctx->l4_csum_err,
382 ena_rx_ctx->hash,
383 ena_rx_ctx->frag,
384 cdesc->status);
385 }
386
387 /*****************************************************************************/
388 /***************************** API **********************************/
389 /*****************************************************************************/
390
391 bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
392 struct ena_com_tx_ctx *ena_tx_ctx)
393 {
394 u16 num_descs;
395 int num_entries_needed;
396 int descs_after_first_entry;
397 bool have_meta;
398 struct ena_com_llq_info *llq_info;
399
400 if (!is_llq_max_tx_burst_exists(io_sq))
401 return false;
402
403 num_entries_needed = 1;
404 llq_info = &io_sq->llq_info;
405 num_descs = ena_tx_ctx->num_bufs;
406 have_meta = ena_tx_ctx->meta_valid &&
407 ena_com_meta_desc_changed(io_sq, ena_tx_ctx);
408
409 if (have_meta)
410 ++num_descs;
411
412 if (num_descs > llq_info->descs_num_before_header) {
413 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
414 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
415 llq_info->descs_per_entry);
416 }
417
418 ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n",
419 io_sq->qid, num_descs, num_entries_needed);
420
421 return num_entries_needed > io_sq->entries_in_tx_burst_left;
422 }
423
424 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
425 struct ena_com_tx_ctx *ena_tx_ctx,
426 int *nb_hw_desc)
427 {
428 struct ena_eth_io_tx_desc *desc = NULL;
429 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
430 void *buffer_to_push = ena_tx_ctx->push_header;
431 u16 header_len = ena_tx_ctx->header_len;
432 u16 num_bufs = ena_tx_ctx->num_bufs;
433 u16 start_tail = io_sq->tail;
434 int i, rc;
435 bool have_meta;
436 u64 addr_hi;
437
438 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
439 "wrong Q type");
440
441 /* num_bufs +1 for potential meta desc */
442 if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
443 ena_trc_dbg("Not enough space in the tx queue\n");
444 return ENA_COM_NO_MEM;
445 }
446
447 if (unlikely(header_len > io_sq->tx_max_header_size)) {
448 ena_trc_err("header size is too large %d max header: %d\n",
449 header_len, io_sq->tx_max_header_size);
450 return ENA_COM_INVAL;
451 }
452
453 if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
454 && !buffer_to_push))
455 return ENA_COM_INVAL;
456
457 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
458 if (unlikely(rc))
459 return rc;
460
461 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
462 ena_tx_ctx);
463 if (have_meta) {
464 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
465 if (unlikely(rc))
466 return rc;
467 }
468
469 /* If the caller doesn't want to send packets */
470 if (unlikely(!num_bufs && !header_len)) {
471 rc = ena_com_close_bounce_buffer(io_sq);
472 *nb_hw_desc = io_sq->tail - start_tail;
473 return rc;
474 }
475
476 desc = get_sq_desc(io_sq);
477 if (unlikely(!desc))
478 return ENA_COM_FAULT;
479 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
480
481 /* Set first desc when we don't have meta descriptor */
482 if (!have_meta)
483 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
484
485 desc->buff_addr_hi_hdr_sz |= (header_len <<
486 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
487 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
488 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
489 ENA_ETH_IO_TX_DESC_PHASE_MASK;
490
491 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
492
493 /* Bits 0-9 */
494 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
495 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
496 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
497
498 desc->meta_ctrl |= (ena_tx_ctx->df <<
499 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
500 ENA_ETH_IO_TX_DESC_DF_MASK;
501
502 /* Bits 10-15 */
503 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
504 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
505 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
506
507 if (ena_tx_ctx->meta_valid) {
508 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
509 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
510 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
511 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
512 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
513 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
514 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
515 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
516 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
517 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
518 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
519 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
520 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
521 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
522 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
523 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
524 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
525 }
526
527 for (i = 0; i < num_bufs; i++) {
528 /* The first desc share the same desc as the header */
529 if (likely(i != 0)) {
530 rc = ena_com_sq_update_tail(io_sq);
531 if (unlikely(rc))
532 return rc;
533
534 desc = get_sq_desc(io_sq);
535 if (unlikely(!desc))
536 return ENA_COM_FAULT;
537
538 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
539
540 desc->len_ctrl |= (io_sq->phase <<
541 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
542 ENA_ETH_IO_TX_DESC_PHASE_MASK;
543 }
544
545 desc->len_ctrl |= ena_bufs->len &
546 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
547
548 addr_hi = ((ena_bufs->paddr &
549 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
550
551 desc->buff_addr_lo = (u32)ena_bufs->paddr;
552 desc->buff_addr_hi_hdr_sz |= addr_hi &
553 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
554 ena_bufs++;
555 }
556
557 /* set the last desc indicator */
558 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
559
560 rc = ena_com_sq_update_tail(io_sq);
561 if (unlikely(rc))
562 return rc;
563
564 rc = ena_com_close_bounce_buffer(io_sq);
565
566 *nb_hw_desc = io_sq->tail - start_tail;
567 return rc;
568 }
569
570 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
571 struct ena_com_io_sq *io_sq,
572 struct ena_com_rx_ctx *ena_rx_ctx)
573 {
574 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
575 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
576 u16 cdesc_idx = 0;
577 u16 nb_hw_desc;
578 u16 i;
579
580 ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
581 "wrong Q type");
582
583 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
584 if (nb_hw_desc == 0) {
585 ena_rx_ctx->descs = nb_hw_desc;
586 return 0;
587 }
588
589 ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
590 io_cq->qid, nb_hw_desc);
591
592 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
593 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
594 nb_hw_desc, ena_rx_ctx->max_bufs);
595 return ENA_COM_NO_SPACE;
596 }
597
598 for (i = 0; i < nb_hw_desc; i++) {
599 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
600
601 ena_buf->len = cdesc->length;
602 ena_buf->req_id = cdesc->req_id;
603 ena_buf++;
604 }
605
606 /* Update SQ head ptr */
607 io_sq->next_to_comp += nb_hw_desc;
608
609 ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
610 io_sq->qid, io_sq->next_to_comp);
611
612 /* Get rx flags from the last pkt */
613 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
614
615 ena_rx_ctx->descs = nb_hw_desc;
616 return 0;
617 }
618
619 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
620 struct ena_com_buf *ena_buf,
621 u16 req_id)
622 {
623 struct ena_eth_io_rx_desc *desc;
624
625 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
626 "wrong Q type");
627
628 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
629 return ENA_COM_NO_SPACE;
630
631 desc = get_sq_desc(io_sq);
632 if (unlikely(!desc))
633 return ENA_COM_FAULT;
634
635 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
636
637 desc->length = ena_buf->len;
638
639 desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
640 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
641 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
642 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
643
644 desc->req_id = req_id;
645
646 desc->buff_addr_lo = (u32)ena_buf->paddr;
647 desc->buff_addr_hi =
648 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
649
650 return ena_com_sq_update_tail(io_sq);
651 }
652
653 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
654 {
655 struct ena_eth_io_rx_cdesc_base *cdesc;
656
657 cdesc = ena_com_get_next_rx_cdesc(io_cq);
658 if (cdesc)
659 return false;
660 else
661 return true;
662 }