1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
43 #include "i40iw_puda.h"
45 static void i40iw_ieq_receive(struct i40iw_sc_vsi
*vsi
,
46 struct i40iw_puda_buf
*buf
);
47 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi
*vsi
, void *sqwrid
);
48 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp
*qp
, u32 wqe_idx
);
49 static enum i40iw_status_code
i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
51 static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc
*ieq
, struct i40iw_sc_qp
*qp
);
53 * i40iw_puda_get_listbuf - get buffer from puda list
54 * @list: list to use for buffers (ILQ or IEQ)
56 static struct i40iw_puda_buf
*i40iw_puda_get_listbuf(struct list_head
*list
)
58 struct i40iw_puda_buf
*buf
= NULL
;
60 if (!list_empty(list
)) {
61 buf
= (struct i40iw_puda_buf
*)list
->next
;
62 list_del((struct list_head
*)&buf
->list
);
68 * i40iw_puda_get_bufpool - return buffer from resource
69 * @rsrc: resource to use for buffer
71 struct i40iw_puda_buf
*i40iw_puda_get_bufpool(struct i40iw_puda_rsrc
*rsrc
)
73 struct i40iw_puda_buf
*buf
= NULL
;
74 struct list_head
*list
= &rsrc
->bufpool
;
77 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
78 buf
= i40iw_puda_get_listbuf(list
);
80 rsrc
->avail_buf_count
--;
82 rsrc
->stats_buf_alloc_fail
++;
83 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
88 * i40iw_puda_ret_bufpool - return buffer to rsrc list
89 * @rsrc: resource to use for buffer
90 * @buf: buffe to return to resouce
92 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc
*rsrc
,
93 struct i40iw_puda_buf
*buf
)
97 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
98 list_add(&buf
->list
, &rsrc
->bufpool
);
99 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
100 rsrc
->avail_buf_count
++;
104 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
105 * @rsrc: resource ptr
106 * @wqe_idx: wqe index to use
107 * @buf: puda buffer for rcv q
108 * @initial: flag if during init time
110 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc
*rsrc
, u32 wqe_idx
,
111 struct i40iw_puda_buf
*buf
, bool initial
)
114 struct i40iw_sc_qp
*qp
= &rsrc
->qp
;
117 qp
->qp_uk
.rq_wrid_array
[wqe_idx
] = (uintptr_t)buf
;
118 wqe
= qp
->qp_uk
.rq_base
[wqe_idx
].elem
;
119 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
,
120 "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__
,
123 get_64bit_val(wqe
, 24, &offset24
);
125 offset24
= (offset24
) ? 0 : LS_64(1, I40IWQPSQ_VALID
);
126 set_64bit_val(wqe
, 24, offset24
);
128 set_64bit_val(wqe
, 0, buf
->mem
.pa
);
129 set_64bit_val(wqe
, 8,
130 LS_64(buf
->mem
.size
, I40IWQPSQ_FRAG_LEN
));
131 set_64bit_val(wqe
, 24, offset24
);
135 * i40iw_puda_replenish_rq - post rcv buffers
136 * @rsrc: resource to use for buffer
137 * @initial: flag if during init time
139 static enum i40iw_status_code
i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc
,
143 u32 invalid_cnt
= rsrc
->rxq_invalid_cnt
;
144 struct i40iw_puda_buf
*buf
= NULL
;
146 for (i
= 0; i
< invalid_cnt
; i
++) {
147 buf
= i40iw_puda_get_bufpool(rsrc
);
149 return I40IW_ERR_list_empty
;
150 i40iw_puda_post_recvbuf(rsrc
, rsrc
->rx_wqe_idx
, buf
,
153 ((rsrc
->rx_wqe_idx
+ 1) % rsrc
->rq_size
);
154 rsrc
->rxq_invalid_cnt
--;
160 * i40iw_puda_alloc_buf - allocate mem for buffer
162 * @length: length of buffer
164 static struct i40iw_puda_buf
*i40iw_puda_alloc_buf(struct i40iw_sc_dev
*dev
,
167 struct i40iw_puda_buf
*buf
= NULL
;
168 struct i40iw_virt_mem buf_mem
;
169 enum i40iw_status_code ret
;
171 ret
= i40iw_allocate_virt_mem(dev
->hw
, &buf_mem
,
172 sizeof(struct i40iw_puda_buf
));
174 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
175 "%s: error mem for buf\n", __func__
);
178 buf
= (struct i40iw_puda_buf
*)buf_mem
.va
;
179 ret
= i40iw_allocate_dma_mem(dev
->hw
, &buf
->mem
, length
, 1);
181 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
182 "%s: error dma mem for buf\n", __func__
);
183 i40iw_free_virt_mem(dev
->hw
, &buf_mem
);
186 buf
->buf_mem
.va
= buf_mem
.va
;
187 buf
->buf_mem
.size
= buf_mem
.size
;
192 * i40iw_puda_dele_buf - delete buffer back to system
194 * @buf: buffer to free
196 static void i40iw_puda_dele_buf(struct i40iw_sc_dev
*dev
,
197 struct i40iw_puda_buf
*buf
)
199 i40iw_free_dma_mem(dev
->hw
, &buf
->mem
);
200 i40iw_free_virt_mem(dev
->hw
, &buf
->buf_mem
);
204 * i40iw_puda_get_next_send_wqe - return next wqe for processing
205 * @qp: puda qp for wqe
206 * @wqe_idx: wqe index for caller
208 static u64
*i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk
*qp
, u32
*wqe_idx
)
211 enum i40iw_status_code ret_code
= 0;
213 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
215 qp
->swqe_polarity
= !qp
->swqe_polarity
;
216 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
219 wqe
= qp
->sq_base
[*wqe_idx
].elem
;
225 * i40iw_puda_poll_info - poll cq for completion
227 * @info: info return for successful completion
229 static enum i40iw_status_code
i40iw_puda_poll_info(struct i40iw_sc_cq
*cq
,
230 struct i40iw_puda_completion_info
*info
)
232 u64 qword0
, qword2
, qword3
;
236 u32 major_err
, minor_err
;
239 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(&cq
->cq_uk
);
240 get_64bit_val(cqe
, 24, &qword3
);
241 valid_bit
= (bool)RS_64(qword3
, I40IW_CQ_VALID
);
243 if (valid_bit
!= cq
->cq_uk
.polarity
)
244 return I40IW_ERR_QUEUE_EMPTY
;
246 i40iw_debug_buf(cq
->dev
, I40IW_DEBUG_PUDA
, "PUDA CQE", cqe
, 32);
247 error
= (bool)RS_64(qword3
, I40IW_CQ_ERROR
);
249 i40iw_debug(cq
->dev
, I40IW_DEBUG_PUDA
, "%s receive error\n", __func__
);
250 major_err
= (u32
)(RS_64(qword3
, I40IW_CQ_MAJERR
));
251 minor_err
= (u32
)(RS_64(qword3
, I40IW_CQ_MINERR
));
252 info
->compl_error
= major_err
<< 16 | minor_err
;
253 return I40IW_ERR_CQ_COMPL_ERROR
;
256 get_64bit_val(cqe
, 0, &qword0
);
257 get_64bit_val(cqe
, 16, &qword2
);
259 info
->q_type
= (u8
)RS_64(qword3
, I40IW_CQ_SQ
);
260 info
->qp_id
= (u32
)RS_64(qword2
, I40IWCQ_QPID
);
262 get_64bit_val(cqe
, 8, &comp_ctx
);
263 info
->qp
= (struct i40iw_qp_uk
*)(unsigned long)comp_ctx
;
264 info
->wqe_idx
= (u32
)RS_64(qword3
, I40IW_CQ_WQEIDX
);
266 if (info
->q_type
== I40IW_CQE_QTYPE_RQ
) {
267 info
->vlan_valid
= (bool)RS_64(qword3
, I40IW_VLAN_TAG_VALID
);
268 info
->l4proto
= (u8
)RS_64(qword2
, I40IW_UDA_L4PROTO
);
269 info
->l3proto
= (u8
)RS_64(qword2
, I40IW_UDA_L3PROTO
);
270 info
->payload_len
= (u16
)RS_64(qword0
, I40IW_UDA_PAYLOADLEN
);
277 * i40iw_puda_poll_completion - processes completion for cq
279 * @cq: cq getting interrupt
280 * @compl_err: return any completion err
282 enum i40iw_status_code
i40iw_puda_poll_completion(struct i40iw_sc_dev
*dev
,
283 struct i40iw_sc_cq
*cq
, u32
*compl_err
)
285 struct i40iw_qp_uk
*qp
;
286 struct i40iw_cq_uk
*cq_uk
= &cq
->cq_uk
;
287 struct i40iw_puda_completion_info info
;
288 enum i40iw_status_code ret
= 0;
289 struct i40iw_puda_buf
*buf
;
290 struct i40iw_puda_rsrc
*rsrc
;
292 u8 cq_type
= cq
->cq_type
;
295 if ((cq_type
== I40IW_CQ_TYPE_ILQ
) || (cq_type
== I40IW_CQ_TYPE_IEQ
)) {
296 rsrc
= (cq_type
== I40IW_CQ_TYPE_ILQ
) ? cq
->vsi
->ilq
: cq
->vsi
->ieq
;
298 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s qp_type error\n", __func__
);
299 return I40IW_ERR_BAD_PTR
;
301 memset(&info
, 0, sizeof(info
));
302 ret
= i40iw_puda_poll_info(cq
, &info
);
303 *compl_err
= info
.compl_error
;
304 if (ret
== I40IW_ERR_QUEUE_EMPTY
)
311 ret
= I40IW_ERR_BAD_PTR
;
315 if (qp
->qp_id
!= rsrc
->qp_id
) {
316 ret
= I40IW_ERR_BAD_PTR
;
320 if (info
.q_type
== I40IW_CQE_QTYPE_RQ
) {
321 buf
= (struct i40iw_puda_buf
*)(uintptr_t)qp
->rq_wrid_array
[info
.wqe_idx
];
322 /* Get all the tcpip information in the buf header */
323 ret
= i40iw_puda_get_tcpip_info(&info
, buf
);
325 rsrc
->stats_rcvd_pkt_err
++;
326 if (cq_type
== I40IW_CQ_TYPE_ILQ
) {
327 i40iw_ilq_putback_rcvbuf(&rsrc
->qp
,
330 i40iw_puda_ret_bufpool(rsrc
, buf
);
331 i40iw_puda_replenish_rq(rsrc
, false);
336 rsrc
->stats_pkt_rcvd
++;
337 rsrc
->compl_rxwqe_idx
= info
.wqe_idx
;
338 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s RQ completion\n", __func__
);
339 rsrc
->receive(rsrc
->vsi
, buf
);
340 if (cq_type
== I40IW_CQ_TYPE_ILQ
)
341 i40iw_ilq_putback_rcvbuf(&rsrc
->qp
, info
.wqe_idx
);
343 i40iw_puda_replenish_rq(rsrc
, false);
346 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s SQ completion\n", __func__
);
347 sqwrid
= (void *)(uintptr_t)qp
->sq_wrtrk_array
[info
.wqe_idx
].wrid
;
348 I40IW_RING_SET_TAIL(qp
->sq_ring
, info
.wqe_idx
);
349 rsrc
->xmit_complete(rsrc
->vsi
, sqwrid
);
350 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
351 rsrc
->tx_wqe_avail_cnt
++;
352 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
353 if (!list_empty(&rsrc
->vsi
->ilq
->txpend
))
354 i40iw_puda_send_buf(rsrc
->vsi
->ilq
, NULL
);
358 I40IW_RING_MOVE_HEAD(cq_uk
->cq_ring
, ret
);
359 if (I40IW_RING_GETCURRENT_HEAD(cq_uk
->cq_ring
) == 0)
360 cq_uk
->polarity
= !cq_uk
->polarity
;
361 /* update cq tail in cq shadow memory also */
362 I40IW_RING_MOVE_TAIL(cq_uk
->cq_ring
);
363 set_64bit_val(cq_uk
->shadow_area
, 0,
364 I40IW_RING_GETCURRENT_HEAD(cq_uk
->cq_ring
));
369 * i40iw_puda_send - complete send wqe for transmit
370 * @qp: puda qp for send
371 * @info: buffer information for transmit
373 enum i40iw_status_code
i40iw_puda_send(struct i40iw_sc_qp
*qp
,
374 struct i40iw_puda_send_info
*info
)
382 /* number of 32 bits DWORDS in header */
383 l4len
= info
->tcplen
>> 2;
392 wqe
= i40iw_puda_get_next_send_wqe(&qp
->qp_uk
, &wqe_idx
);
394 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
395 qp
->qp_uk
.sq_wrtrk_array
[wqe_idx
].wrid
= (uintptr_t)info
->scratch
;
396 /* Third line of WQE descriptor */
397 /* maclen is in words */
398 header
[0] = LS_64((info
->maclen
>> 1), I40IW_UDA_QPSQ_MACLEN
) |
399 LS_64(iplen
, I40IW_UDA_QPSQ_IPLEN
) | LS_64(1, I40IW_UDA_QPSQ_L4T
) |
400 LS_64(iipt
, I40IW_UDA_QPSQ_IIPT
) |
401 LS_64(l4len
, I40IW_UDA_QPSQ_L4LEN
);
402 /* Forth line of WQE descriptor */
403 header
[1] = LS_64(I40IW_OP_TYPE_SEND
, I40IW_UDA_QPSQ_OPCODE
) |
404 LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL
) |
405 LS_64(info
->doloopback
, I40IW_UDA_QPSQ_DOLOOPBACK
) |
406 LS_64(qp
->qp_uk
.swqe_polarity
, I40IW_UDA_QPSQ_VALID
);
408 set_64bit_val(wqe
, 0, info
->paddr
);
409 set_64bit_val(wqe
, 8, LS_64(info
->len
, I40IWQPSQ_FRAG_LEN
));
410 set_64bit_val(wqe
, 16, header
[0]);
411 set_64bit_val(wqe
, 24, header
[1]);
413 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_PUDA
, "PUDA SEND WQE", wqe
, 32);
414 i40iw_qp_post_wr(&qp
->qp_uk
);
419 * i40iw_puda_send_buf - transmit puda buffer
420 * @rsrc: resource to use for buffer
421 * @buf: puda buffer to transmit
423 void i40iw_puda_send_buf(struct i40iw_puda_rsrc
*rsrc
, struct i40iw_puda_buf
*buf
)
425 struct i40iw_puda_send_info info
;
426 enum i40iw_status_code ret
= 0;
429 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
430 /* if no wqe available or not from a completion and we have
431 * pending buffers, we must queue new buffer
433 if (!rsrc
->tx_wqe_avail_cnt
|| (buf
&& !list_empty(&rsrc
->txpend
))) {
434 list_add_tail(&buf
->list
, &rsrc
->txpend
);
435 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
436 rsrc
->stats_sent_pkt_q
++;
437 if (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
)
438 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
,
439 "%s: adding to txpend\n", __func__
);
442 rsrc
->tx_wqe_avail_cnt
--;
443 /* if we are coming from a completion and have pending buffers
444 * then Get one from pending list
447 buf
= i40iw_puda_get_listbuf(&rsrc
->txpend
);
452 info
.scratch
= (void *)buf
;
453 info
.paddr
= buf
->mem
.pa
;
454 info
.len
= buf
->totallen
;
455 info
.tcplen
= buf
->tcphlen
;
456 info
.maclen
= buf
->maclen
;
457 info
.ipv4
= buf
->ipv4
;
458 info
.doloopback
= (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_IEQ
);
460 ret
= i40iw_puda_send(&rsrc
->qp
, &info
);
462 rsrc
->tx_wqe_avail_cnt
++;
463 rsrc
->stats_sent_pkt_q
++;
464 list_add(&buf
->list
, &rsrc
->txpend
);
465 if (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
)
466 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
,
467 "%s: adding to puda_send\n", __func__
);
469 rsrc
->stats_pkt_sent
++;
472 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
476 * i40iw_puda_qp_setctx - during init, set qp's context
477 * @rsrc: qp's resource
479 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc
*rsrc
)
481 struct i40iw_sc_qp
*qp
= &rsrc
->qp
;
482 u64
*qp_ctx
= qp
->hw_host_ctx
;
484 set_64bit_val(qp_ctx
, 8, qp
->sq_pa
);
485 set_64bit_val(qp_ctx
, 16, qp
->rq_pa
);
487 set_64bit_val(qp_ctx
, 24,
488 LS_64(qp
->hw_rq_size
, I40IWQPC_RQSIZE
) |
489 LS_64(qp
->hw_sq_size
, I40IWQPC_SQSIZE
));
491 set_64bit_val(qp_ctx
, 48, LS_64(1514, I40IWQPC_SNDMSS
));
492 set_64bit_val(qp_ctx
, 56, 0);
493 set_64bit_val(qp_ctx
, 64, 1);
495 set_64bit_val(qp_ctx
, 136,
496 LS_64(rsrc
->cq_id
, I40IWQPC_TXCQNUM
) |
497 LS_64(rsrc
->cq_id
, I40IWQPC_RXCQNUM
));
499 set_64bit_val(qp_ctx
, 160, LS_64(1, I40IWQPC_PRIVEN
));
501 set_64bit_val(qp_ctx
, 168,
502 LS_64((uintptr_t)qp
, I40IWQPC_QPCOMPCTX
));
504 set_64bit_val(qp_ctx
, 176,
505 LS_64(qp
->sq_tph_val
, I40IWQPC_SQTPHVAL
) |
506 LS_64(qp
->rq_tph_val
, I40IWQPC_RQTPHVAL
) |
507 LS_64(qp
->qs_handle
, I40IWQPC_QSHANDLE
));
509 i40iw_debug_buf(rsrc
->dev
, I40IW_DEBUG_PUDA
, "PUDA QP CONTEXT",
510 qp_ctx
, I40IW_QP_CTX_SIZE
);
514 * i40iw_puda_qp_wqe - setup wqe for qp create
515 * @rsrc: resource for qp
517 static enum i40iw_status_code
i40iw_puda_qp_wqe(struct i40iw_sc_dev
*dev
, struct i40iw_sc_qp
*qp
)
519 struct i40iw_sc_cqp
*cqp
;
522 struct i40iw_ccq_cqe_info compl_info
;
523 enum i40iw_status_code status
= 0;
526 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, 0);
528 return I40IW_ERR_RING_FULL
;
530 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
531 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
532 header
= qp
->qp_uk
.qp_id
|
533 LS_64(I40IW_CQP_OP_CREATE_QP
, I40IW_CQPSQ_OPCODE
) |
534 LS_64(I40IW_QP_TYPE_UDA
, I40IW_CQPSQ_QP_QPTYPE
) |
535 LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID
) |
536 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
537 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
539 set_64bit_val(wqe
, 24, header
);
541 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_PUDA
, "PUDA CQE", wqe
, 32);
542 i40iw_sc_cqp_post_sq(cqp
);
543 status
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
544 I40IW_CQP_OP_CREATE_QP
,
550 * i40iw_puda_qp_create - create qp for resource
551 * @rsrc: resource to use for buffer
553 static enum i40iw_status_code
i40iw_puda_qp_create(struct i40iw_puda_rsrc
*rsrc
)
555 struct i40iw_sc_qp
*qp
= &rsrc
->qp
;
556 struct i40iw_qp_uk
*ukqp
= &qp
->qp_uk
;
557 enum i40iw_status_code ret
= 0;
558 u32 sq_size
, rq_size
, t_size
;
559 struct i40iw_dma_mem
*mem
;
561 sq_size
= rsrc
->sq_size
* I40IW_QP_WQE_MIN_SIZE
;
562 rq_size
= rsrc
->rq_size
* I40IW_QP_WQE_MIN_SIZE
;
563 t_size
= (sq_size
+ rq_size
+ (I40IW_SHADOW_AREA_SIZE
<< 3) +
565 /* Get page aligned memory */
567 i40iw_allocate_dma_mem(rsrc
->dev
->hw
, &rsrc
->qpmem
, t_size
,
570 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
, "%s: error dma mem\n", __func__
);
575 memset(mem
->va
, 0, t_size
);
576 qp
->hw_sq_size
= i40iw_get_encoded_wqe_size(rsrc
->sq_size
, false);
577 qp
->hw_rq_size
= i40iw_get_encoded_wqe_size(rsrc
->rq_size
, false);
578 qp
->pd
= &rsrc
->sc_pd
;
579 qp
->qp_type
= I40IW_QP_TYPE_UDA
;
581 qp
->back_qp
= (void *)rsrc
;
583 qp
->rq_pa
= qp
->sq_pa
+ sq_size
;
585 ukqp
->sq_base
= mem
->va
;
586 ukqp
->rq_base
= &ukqp
->sq_base
[rsrc
->sq_size
];
587 ukqp
->shadow_area
= ukqp
->rq_base
[rsrc
->rq_size
].elem
;
588 qp
->shadow_area_pa
= qp
->rq_pa
+ rq_size
;
589 qp
->hw_host_ctx
= ukqp
->shadow_area
+ I40IW_SHADOW_AREA_SIZE
;
591 qp
->shadow_area_pa
+ (I40IW_SHADOW_AREA_SIZE
<< 3);
592 ukqp
->qp_id
= rsrc
->qp_id
;
593 ukqp
->sq_wrtrk_array
= rsrc
->sq_wrtrk_array
;
594 ukqp
->rq_wrid_array
= rsrc
->rq_wrid_array
;
596 ukqp
->qp_id
= rsrc
->qp_id
;
597 ukqp
->sq_size
= rsrc
->sq_size
;
598 ukqp
->rq_size
= rsrc
->rq_size
;
600 I40IW_RING_INIT(ukqp
->sq_ring
, ukqp
->sq_size
);
601 I40IW_RING_INIT(ukqp
->initial_ring
, ukqp
->sq_size
);
602 I40IW_RING_INIT(ukqp
->rq_ring
, ukqp
->rq_size
);
604 if (qp
->pd
->dev
->is_pf
)
605 ukqp
->wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
608 ukqp
->wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
609 I40E_VFPE_WQEALLOC1
);
612 i40iw_qp_add_qos(qp
);
613 i40iw_puda_qp_setctx(rsrc
);
615 ret
= i40iw_cqp_qp_create_cmd(rsrc
->dev
, qp
);
617 ret
= i40iw_puda_qp_wqe(rsrc
->dev
, qp
);
619 i40iw_free_dma_mem(rsrc
->dev
->hw
, &rsrc
->qpmem
);
624 * i40iw_puda_cq_wqe - setup wqe for cq create
625 * @rsrc: resource for cq
627 static enum i40iw_status_code
i40iw_puda_cq_wqe(struct i40iw_sc_dev
*dev
, struct i40iw_sc_cq
*cq
)
630 struct i40iw_sc_cqp
*cqp
;
632 struct i40iw_ccq_cqe_info compl_info
;
633 enum i40iw_status_code status
= 0;
636 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, 0);
638 return I40IW_ERR_RING_FULL
;
640 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
641 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
642 set_64bit_val(wqe
, 16,
643 LS_64(cq
->shadow_read_threshold
,
644 I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
645 set_64bit_val(wqe
, 32, cq
->cq_pa
);
647 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
649 header
= cq
->cq_uk
.cq_id
|
650 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
651 LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
652 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
653 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID
) |
654 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
655 set_64bit_val(wqe
, 24, header
);
657 i40iw_debug_buf(dev
, I40IW_DEBUG_PUDA
, "PUDA CQE",
658 wqe
, I40IW_CQP_WQE_SIZE
* 8);
660 i40iw_sc_cqp_post_sq(dev
->cqp
);
661 status
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
662 I40IW_CQP_OP_CREATE_CQ
,
668 * i40iw_puda_cq_create - create cq for resource
669 * @rsrc: resource for which cq to create
671 static enum i40iw_status_code
i40iw_puda_cq_create(struct i40iw_puda_rsrc
*rsrc
)
673 struct i40iw_sc_dev
*dev
= rsrc
->dev
;
674 struct i40iw_sc_cq
*cq
= &rsrc
->cq
;
675 enum i40iw_status_code ret
= 0;
677 struct i40iw_dma_mem
*mem
;
678 struct i40iw_cq_init_info info
;
679 struct i40iw_cq_uk_init_info
*init_info
= &info
.cq_uk_init_info
;
682 cqsize
= rsrc
->cq_size
* (sizeof(struct i40iw_cqe
));
683 tsize
= cqsize
+ sizeof(struct i40iw_cq_shadow_area
);
684 ret
= i40iw_allocate_dma_mem(dev
->hw
, &rsrc
->cqmem
, tsize
,
685 I40IW_CQ0_ALIGNMENT_MASK
);
690 memset(&info
, 0, sizeof(info
));
692 info
.type
= (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
) ?
693 I40IW_CQ_TYPE_ILQ
: I40IW_CQ_TYPE_IEQ
;
694 info
.shadow_read_threshold
= rsrc
->cq_size
>> 2;
695 info
.ceq_id_valid
= true;
696 info
.cq_base_pa
= mem
->pa
;
697 info
.shadow_area_pa
= mem
->pa
+ cqsize
;
698 init_info
->cq_base
= mem
->va
;
699 init_info
->shadow_area
= (u64
*)((u8
*)mem
->va
+ cqsize
);
700 init_info
->cq_size
= rsrc
->cq_size
;
701 init_info
->cq_id
= rsrc
->cq_id
;
702 info
.ceqe_mask
= true;
703 info
.ceq_id_valid
= true;
704 ret
= dev
->iw_priv_cq_ops
->cq_init(cq
, &info
);
708 ret
= i40iw_cqp_cq_create_cmd(dev
, cq
);
710 ret
= i40iw_puda_cq_wqe(dev
, cq
);
713 i40iw_free_dma_mem(dev
->hw
, &rsrc
->cqmem
);
718 * i40iw_puda_free_qp - free qp for resource
719 * @rsrc: resource for which qp to free
721 static void i40iw_puda_free_qp(struct i40iw_puda_rsrc
*rsrc
)
723 enum i40iw_status_code ret
;
724 struct i40iw_ccq_cqe_info compl_info
;
725 struct i40iw_sc_dev
*dev
= rsrc
->dev
;
727 if (rsrc
->ceq_valid
) {
728 i40iw_cqp_qp_destroy_cmd(dev
, &rsrc
->qp
);
732 ret
= dev
->iw_priv_qp_ops
->qp_destroy(&rsrc
->qp
,
733 0, false, true, true);
735 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
736 "%s error puda qp destroy wqe\n",
740 ret
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
741 I40IW_CQP_OP_DESTROY_QP
,
744 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
745 "%s error puda qp destroy failed\n",
751 * i40iw_puda_free_cq - free cq for resource
752 * @rsrc: resource for which cq to free
754 static void i40iw_puda_free_cq(struct i40iw_puda_rsrc
*rsrc
)
756 enum i40iw_status_code ret
;
757 struct i40iw_ccq_cqe_info compl_info
;
758 struct i40iw_sc_dev
*dev
= rsrc
->dev
;
760 if (rsrc
->ceq_valid
) {
761 i40iw_cqp_cq_destroy_cmd(dev
, &rsrc
->cq
);
764 ret
= dev
->iw_priv_cq_ops
->cq_destroy(&rsrc
->cq
, 0, true);
767 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
768 "%s error ieq cq destroy\n",
772 ret
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
773 I40IW_CQP_OP_DESTROY_CQ
,
776 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
777 "%s error ieq qp destroy done\n",
783 * i40iw_puda_dele_resources - delete all resources during close
785 * @type: type of resource to dele
786 * @reset: true if reset chip
788 void i40iw_puda_dele_resources(struct i40iw_sc_vsi
*vsi
,
789 enum puda_resource_type type
,
792 struct i40iw_sc_dev
*dev
= vsi
->dev
;
793 struct i40iw_puda_rsrc
*rsrc
;
794 struct i40iw_puda_buf
*buf
= NULL
;
795 struct i40iw_puda_buf
*nextbuf
= NULL
;
796 struct i40iw_virt_mem
*vmem
;
799 case I40IW_PUDA_RSRC_TYPE_ILQ
:
801 vmem
= &vsi
->ilq_mem
;
803 case I40IW_PUDA_RSRC_TYPE_IEQ
:
805 vmem
= &vsi
->ieq_mem
;
808 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s: error resource type = 0x%x\n",
813 switch (rsrc
->completion
) {
814 case PUDA_HASH_CRC_COMPLETE
:
815 i40iw_free_hash_desc(rsrc
->hash_desc
);
816 case PUDA_QP_CREATED
:
818 i40iw_puda_free_qp(rsrc
);
820 i40iw_free_dma_mem(dev
->hw
, &rsrc
->qpmem
);
822 case PUDA_CQ_CREATED
:
824 i40iw_puda_free_cq(rsrc
);
826 i40iw_free_dma_mem(dev
->hw
, &rsrc
->cqmem
);
829 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
, "%s error no resources\n", __func__
);
832 /* Free all allocated puda buffers for both tx and rx */
833 buf
= rsrc
->alloclist
;
836 i40iw_puda_dele_buf(dev
, buf
);
838 rsrc
->alloc_buf_count
--;
840 i40iw_free_virt_mem(dev
->hw
, vmem
);
844 * i40iw_puda_allocbufs - allocate buffers for resource
845 * @rsrc: resource for buffer allocation
846 * @count: number of buffers to create
848 static enum i40iw_status_code
i40iw_puda_allocbufs(struct i40iw_puda_rsrc
*rsrc
,
852 struct i40iw_puda_buf
*buf
;
853 struct i40iw_puda_buf
*nextbuf
;
855 for (i
= 0; i
< count
; i
++) {
856 buf
= i40iw_puda_alloc_buf(rsrc
->dev
, rsrc
->buf_size
);
858 rsrc
->stats_buf_alloc_fail
++;
859 return I40IW_ERR_NO_MEMORY
;
861 i40iw_puda_ret_bufpool(rsrc
, buf
);
862 rsrc
->alloc_buf_count
++;
863 if (!rsrc
->alloclist
) {
864 rsrc
->alloclist
= buf
;
866 nextbuf
= rsrc
->alloclist
;
867 rsrc
->alloclist
= buf
;
871 rsrc
->avail_buf_count
= rsrc
->alloc_buf_count
;
876 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
878 * @info: resource information
880 enum i40iw_status_code
i40iw_puda_create_rsrc(struct i40iw_sc_vsi
*vsi
,
881 struct i40iw_puda_rsrc_info
*info
)
883 struct i40iw_sc_dev
*dev
= vsi
->dev
;
884 enum i40iw_status_code ret
= 0;
885 struct i40iw_puda_rsrc
*rsrc
;
887 u32 sqwridsize
, rqwridsize
;
888 struct i40iw_virt_mem
*vmem
;
891 pudasize
= sizeof(struct i40iw_puda_rsrc
);
892 sqwridsize
= info
->sq_size
* sizeof(struct i40iw_sq_uk_wr_trk_info
);
893 rqwridsize
= info
->rq_size
* 8;
894 switch (info
->type
) {
895 case I40IW_PUDA_RSRC_TYPE_ILQ
:
896 vmem
= &vsi
->ilq_mem
;
898 case I40IW_PUDA_RSRC_TYPE_IEQ
:
899 vmem
= &vsi
->ieq_mem
;
902 return I40IW_NOT_SUPPORTED
;
905 i40iw_allocate_virt_mem(dev
->hw
, vmem
,
906 pudasize
+ sqwridsize
+ rqwridsize
);
909 rsrc
= (struct i40iw_puda_rsrc
*)vmem
->va
;
910 spin_lock_init(&rsrc
->bufpool_lock
);
911 if (info
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
) {
912 vsi
->ilq
= (struct i40iw_puda_rsrc
*)vmem
->va
;
913 vsi
->ilq_count
= info
->count
;
914 rsrc
->receive
= info
->receive
;
915 rsrc
->xmit_complete
= info
->xmit_complete
;
917 vmem
= &vsi
->ieq_mem
;
918 vsi
->ieq_count
= info
->count
;
919 vsi
->ieq
= (struct i40iw_puda_rsrc
*)vmem
->va
;
920 rsrc
->receive
= i40iw_ieq_receive
;
921 rsrc
->xmit_complete
= i40iw_ieq_tx_compl
;
924 rsrc
->ceq_valid
= info
->ceq_valid
;
925 rsrc
->type
= info
->type
;
926 rsrc
->sq_wrtrk_array
= (struct i40iw_sq_uk_wr_trk_info
*)((u8
*)vmem
->va
+ pudasize
);
927 rsrc
->rq_wrid_array
= (u64
*)((u8
*)vmem
->va
+ pudasize
+ sqwridsize
);
928 /* Initialize all ieq lists */
929 INIT_LIST_HEAD(&rsrc
->bufpool
);
930 INIT_LIST_HEAD(&rsrc
->txpend
);
932 rsrc
->tx_wqe_avail_cnt
= info
->sq_size
- 1;
933 dev
->iw_pd_ops
->pd_init(dev
, &rsrc
->sc_pd
, info
->pd_id
, -1);
934 rsrc
->qp_id
= info
->qp_id
;
935 rsrc
->cq_id
= info
->cq_id
;
936 rsrc
->sq_size
= info
->sq_size
;
937 rsrc
->rq_size
= info
->rq_size
;
938 rsrc
->cq_size
= info
->rq_size
+ info
->sq_size
;
939 rsrc
->buf_size
= info
->buf_size
;
943 ret
= i40iw_puda_cq_create(rsrc
);
945 rsrc
->completion
= PUDA_CQ_CREATED
;
946 ret
= i40iw_puda_qp_create(rsrc
);
949 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "[%s] error qp_create\n", __func__
);
952 rsrc
->completion
= PUDA_QP_CREATED
;
954 ret
= i40iw_puda_allocbufs(rsrc
, info
->tx_buf_cnt
+ info
->rq_size
);
956 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "[%s] error allloc_buf\n", __func__
);
960 rsrc
->rxq_invalid_cnt
= info
->rq_size
;
961 ret
= i40iw_puda_replenish_rq(rsrc
, true);
965 if (info
->type
== I40IW_PUDA_RSRC_TYPE_IEQ
) {
966 if (!i40iw_init_hash_desc(&rsrc
->hash_desc
)) {
967 rsrc
->check_crc
= true;
968 rsrc
->completion
= PUDA_HASH_CRC_COMPLETE
;
973 dev
->ccq_ops
->ccq_arm(&rsrc
->cq
);
976 i40iw_puda_dele_resources(vsi
, info
->type
, false);
982 * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
983 * @qp: ilq's qp resource
984 * @wqe_idx: wqe index of completed rcvbuf
986 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp
*qp
, u32 wqe_idx
)
991 wqe
= qp
->qp_uk
.rq_base
[wqe_idx
].elem
;
992 get_64bit_val(wqe
, 24, &offset24
);
993 offset24
= (offset24
) ? 0 : LS_64(1, I40IWQPSQ_VALID
);
994 set_64bit_val(wqe
, 24, offset24
);
998 * i40iw_ieq_get_fpdu - given length return fpdu length
999 * @length: length if fpdu
1001 static u16
i40iw_ieq_get_fpdu_length(u16 length
)
1005 fpdu_len
= length
+ I40IW_IEQ_MPA_FRAMING
;
1006 fpdu_len
= (fpdu_len
+ 3) & 0xfffffffc;
1011 * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
1012 * @buf: rcv buffer with partial
1013 * @txbuf: tx buffer for sendign back
1014 * @buf_offset: rcv buffer offset to copy from
1015 * @txbuf_offset: at offset in tx buf to copy
1016 * @length: length of data to copy
1018 static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf
*buf
,
1019 struct i40iw_puda_buf
*txbuf
,
1020 u16 buf_offset
, u32 txbuf_offset
,
1023 void *mem1
= (u8
*)buf
->mem
.va
+ buf_offset
;
1024 void *mem2
= (u8
*)txbuf
->mem
.va
+ txbuf_offset
;
1026 memcpy(mem2
, mem1
, length
);
1030 * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
1031 * @buf: reeive buffer with partial
1032 * @txbuf: buffer to prepare
1034 static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf
*buf
,
1035 struct i40iw_puda_buf
*txbuf
)
1037 txbuf
->maclen
= buf
->maclen
;
1038 txbuf
->tcphlen
= buf
->tcphlen
;
1039 txbuf
->ipv4
= buf
->ipv4
;
1040 txbuf
->hdrlen
= buf
->hdrlen
;
1041 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, 0, 0, buf
->hdrlen
);
1045 * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
1046 * @buf: receive exception buffer
1047 * @fps: first partial sequence number
1049 static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf
*buf
, u32 fps
)
1053 if (buf
->seqnum
< fps
) {
1054 offset
= fps
- buf
->seqnum
;
1055 if (offset
> buf
->datalen
)
1057 buf
->data
+= offset
;
1058 buf
->datalen
-= (u16
)offset
;
1064 * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1065 * @ieq: ieq resource
1066 * @rxlist: ieq's received buffer list
1067 * @pbufl: temporary list for buffers for fpddu
1068 * @txbuf: tx buffer for fpdu
1069 * @fpdu_len: total length of fpdu
1071 static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc
*ieq
,
1072 struct list_head
*rxlist
,
1073 struct list_head
*pbufl
,
1074 struct i40iw_puda_buf
*txbuf
,
1077 struct i40iw_puda_buf
*buf
;
1079 u16 txoffset
, bufoffset
;
1081 buf
= i40iw_puda_get_listbuf(pbufl
);
1084 nextseqnum
= buf
->seqnum
+ fpdu_len
;
1085 txbuf
->totallen
= buf
->hdrlen
+ fpdu_len
;
1086 txbuf
->data
= (u8
*)txbuf
->mem
.va
+ buf
->hdrlen
;
1087 i40iw_ieq_setup_tx_buf(buf
, txbuf
);
1089 txoffset
= buf
->hdrlen
;
1090 bufoffset
= (u16
)(buf
->data
- (u8
*)buf
->mem
.va
);
1093 if (buf
->datalen
>= fpdu_len
) {
1094 /* copied full fpdu */
1095 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, bufoffset
, txoffset
, fpdu_len
);
1096 buf
->datalen
-= fpdu_len
;
1097 buf
->data
+= fpdu_len
;
1098 buf
->seqnum
= nextseqnum
;
1101 /* copy partial fpdu */
1102 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, bufoffset
, txoffset
, buf
->datalen
);
1103 txoffset
+= buf
->datalen
;
1104 fpdu_len
-= buf
->datalen
;
1105 i40iw_puda_ret_bufpool(ieq
, buf
);
1106 buf
= i40iw_puda_get_listbuf(pbufl
);
1109 bufoffset
= (u16
)(buf
->data
- (u8
*)buf
->mem
.va
);
1112 /* last buffer on the list*/
1114 list_add(&buf
->list
, rxlist
);
1116 i40iw_puda_ret_bufpool(ieq
, buf
);
1120 * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1121 * @rxlist: resource list for receive ieq buffes
1122 * @pbufl: temp. list for buffers for fpddu
1123 * @buf: first receive buffer
1124 * @fpdu_len: total length of fpdu
1126 static enum i40iw_status_code
i40iw_ieq_create_pbufl(
1127 struct i40iw_pfpdu
*pfpdu
,
1128 struct list_head
*rxlist
,
1129 struct list_head
*pbufl
,
1130 struct i40iw_puda_buf
*buf
,
1133 enum i40iw_status_code status
= 0;
1134 struct i40iw_puda_buf
*nextbuf
;
1136 u16 plen
= fpdu_len
- buf
->datalen
;
1139 nextseqnum
= buf
->seqnum
+ buf
->datalen
;
1141 nextbuf
= i40iw_puda_get_listbuf(rxlist
);
1143 status
= I40IW_ERR_list_empty
;
1146 list_add_tail(&nextbuf
->list
, pbufl
);
1147 if (nextbuf
->seqnum
!= nextseqnum
) {
1148 pfpdu
->bad_seq_num
++;
1149 status
= I40IW_ERR_SEQ_NUM
;
1152 if (nextbuf
->datalen
>= plen
) {
1155 plen
-= nextbuf
->datalen
;
1156 nextseqnum
= nextbuf
->seqnum
+ nextbuf
->datalen
;
1165 * i40iw_ieq_handle_partial - process partial fpdu buffer
1166 * @ieq: ieq resource
1167 * @pfpdu: partial management per user qp
1168 * @buf: receive buffer
1169 * @fpdu_len: fpdu len in the buffer
1171 static enum i40iw_status_code
i40iw_ieq_handle_partial(struct i40iw_puda_rsrc
*ieq
,
1172 struct i40iw_pfpdu
*pfpdu
,
1173 struct i40iw_puda_buf
*buf
,
1176 enum i40iw_status_code status
= 0;
1179 u32 seqnum
= buf
->seqnum
;
1180 struct list_head pbufl
; /* partial buffer list */
1181 struct i40iw_puda_buf
*txbuf
= NULL
;
1182 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1184 INIT_LIST_HEAD(&pbufl
);
1185 list_add(&buf
->list
, &pbufl
);
1187 status
= i40iw_ieq_create_pbufl(pfpdu
, rxlist
, &pbufl
, buf
, fpdu_len
);
1191 txbuf
= i40iw_puda_get_bufpool(ieq
);
1193 pfpdu
->no_tx_bufs
++;
1194 status
= I40IW_ERR_NO_TXBUFS
;
1198 i40iw_ieq_compl_pfpdu(ieq
, rxlist
, &pbufl
, txbuf
, fpdu_len
);
1199 i40iw_ieq_update_tcpip_info(txbuf
, fpdu_len
, seqnum
);
1200 crcptr
= txbuf
->data
+ fpdu_len
- 4;
1201 mpacrc
= *(u32
*)crcptr
;
1202 if (ieq
->check_crc
) {
1203 status
= i40iw_ieq_check_mpacrc(ieq
->hash_desc
, txbuf
->data
,
1204 (fpdu_len
- 4), mpacrc
);
1206 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1207 "%s: error bad crc\n", __func__
);
1212 i40iw_debug_buf(ieq
->dev
, I40IW_DEBUG_IEQ
, "IEQ TX BUFFER",
1213 txbuf
->mem
.va
, txbuf
->totallen
);
1214 i40iw_puda_send_buf(ieq
, txbuf
);
1215 pfpdu
->rcv_nxt
= seqnum
+ fpdu_len
;
1218 while (!list_empty(&pbufl
)) {
1219 buf
= (struct i40iw_puda_buf
*)(pbufl
.prev
);
1220 list_del(&buf
->list
);
1221 list_add(&buf
->list
, rxlist
);
1224 i40iw_puda_ret_bufpool(ieq
, txbuf
);
1229 * i40iw_ieq_process_buf - process buffer rcvd for ieq
1230 * @ieq: ieq resource
1231 * @pfpdu: partial management per user qp
1232 * @buf: receive buffer
1234 static enum i40iw_status_code
i40iw_ieq_process_buf(struct i40iw_puda_rsrc
*ieq
,
1235 struct i40iw_pfpdu
*pfpdu
,
1236 struct i40iw_puda_buf
*buf
)
1239 u16 datalen
= buf
->datalen
;
1240 u8
*datap
= buf
->data
;
1244 u32 seqnum
= buf
->seqnum
;
1247 bool partial
= false;
1248 struct i40iw_puda_buf
*txbuf
;
1249 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1250 enum i40iw_status_code ret
= 0;
1251 enum i40iw_status_code status
= 0;
1253 ioffset
= (u16
)(buf
->data
- (u8
*)buf
->mem
.va
);
1255 fpdu_len
= i40iw_ieq_get_fpdu_length(ntohs(*(__be16
*)datap
));
1256 if (fpdu_len
> pfpdu
->max_fpdu_data
) {
1257 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1258 "%s: error bad fpdu_len\n", __func__
);
1259 status
= I40IW_ERR_MPA_CRC
;
1260 list_add(&buf
->list
, rxlist
);
1264 if (datalen
< fpdu_len
) {
1268 crcptr
= datap
+ fpdu_len
- 4;
1269 mpacrc
= *(u32
*)crcptr
;
1271 ret
= i40iw_ieq_check_mpacrc(ieq
->hash_desc
,
1272 datap
, fpdu_len
- 4, mpacrc
);
1274 status
= I40IW_ERR_MPA_CRC
;
1275 list_add(&buf
->list
, rxlist
);
1279 pfpdu
->fpdu_processed
++;
1282 datalen
-= fpdu_len
;
1285 /* copy full pdu's in the txbuf and send them out */
1286 txbuf
= i40iw_puda_get_bufpool(ieq
);
1288 pfpdu
->no_tx_bufs
++;
1289 status
= I40IW_ERR_NO_TXBUFS
;
1290 list_add(&buf
->list
, rxlist
);
1293 /* modify txbuf's buffer header */
1294 i40iw_ieq_setup_tx_buf(buf
, txbuf
);
1295 /* copy full fpdu's to new buffer */
1296 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, ioffset
, buf
->hdrlen
,
1298 txbuf
->totallen
= buf
->hdrlen
+ length
;
1300 i40iw_ieq_update_tcpip_info(txbuf
, length
, buf
->seqnum
);
1301 i40iw_puda_send_buf(ieq
, txbuf
);
1304 pfpdu
->rcv_nxt
= buf
->seqnum
+ length
;
1305 i40iw_puda_ret_bufpool(ieq
, buf
);
1309 buf
->seqnum
= seqnum
+ length
;
1310 buf
->datalen
= datalen
;
1311 pfpdu
->rcv_nxt
= buf
->seqnum
;
1314 status
= i40iw_ieq_handle_partial(ieq
, pfpdu
, buf
, fpdu_len
);
1320 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1321 * @qp: qp for which partial fpdus
1322 * @ieq: ieq resource
1324 static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp
*qp
,
1325 struct i40iw_puda_rsrc
*ieq
)
1327 struct i40iw_pfpdu
*pfpdu
= &qp
->pfpdu
;
1328 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1329 struct i40iw_puda_buf
*buf
;
1330 enum i40iw_status_code status
;
1333 if (list_empty(rxlist
))
1335 buf
= i40iw_puda_get_listbuf(rxlist
);
1337 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1338 "%s: error no buf\n", __func__
);
1341 if (buf
->seqnum
!= pfpdu
->rcv_nxt
) {
1342 /* This could be out of order or missing packet */
1343 pfpdu
->out_of_order
++;
1344 list_add(&buf
->list
, rxlist
);
1347 /* keep processing buffers from the head of the list */
1348 status
= i40iw_ieq_process_buf(ieq
, pfpdu
, buf
);
1349 if (status
== I40IW_ERR_MPA_CRC
) {
1350 pfpdu
->mpa_crc_err
= true;
1351 while (!list_empty(rxlist
)) {
1352 buf
= i40iw_puda_get_listbuf(rxlist
);
1353 i40iw_puda_ret_bufpool(ieq
, buf
);
1356 /* create CQP for AE */
1357 i40iw_ieq_mpa_crc_ae(ieq
->dev
, qp
);
1363 * i40iw_ieq_handle_exception - handle qp's exception
1364 * @ieq: ieq resource
1365 * @qp: qp receiving excpetion
1366 * @buf: receive buffer
1368 static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc
*ieq
,
1369 struct i40iw_sc_qp
*qp
,
1370 struct i40iw_puda_buf
*buf
)
1372 struct i40iw_puda_buf
*tmpbuf
= NULL
;
1373 struct i40iw_pfpdu
*pfpdu
= &qp
->pfpdu
;
1374 u32
*hw_host_ctx
= (u32
*)qp
->hw_host_ctx
;
1375 u32 rcv_wnd
= hw_host_ctx
[23];
1376 /* first partial seq # in q2 */
1377 u32 fps
= qp
->q2_buf
[16];
1378 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1379 struct list_head
*plist
;
1381 pfpdu
->total_ieq_bufs
++;
1383 if (pfpdu
->mpa_crc_err
) {
1387 if (pfpdu
->mode
&& (fps
!= pfpdu
->fps
)) {
1388 /* clean up qp as it is new partial sequence */
1389 i40iw_ieq_cleanup_qp(ieq
, qp
);
1390 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1391 "%s: restarting new partial\n", __func__
);
1392 pfpdu
->mode
= false;
1396 i40iw_debug_buf(ieq
->dev
, I40IW_DEBUG_IEQ
, "Q2 BUFFER", (u64
*)qp
->q2_buf
, 128);
1397 /* First_Partial_Sequence_Number check */
1398 pfpdu
->rcv_nxt
= fps
;
1401 pfpdu
->max_fpdu_data
= ieq
->vsi
->mss
;
1402 pfpdu
->pmode_count
++;
1403 INIT_LIST_HEAD(rxlist
);
1404 i40iw_ieq_check_first_buf(buf
, fps
);
1407 if (!(rcv_wnd
>= (buf
->seqnum
- pfpdu
->rcv_nxt
))) {
1408 pfpdu
->bad_seq_num
++;
1412 if (!list_empty(rxlist
)) {
1413 tmpbuf
= (struct i40iw_puda_buf
*)rxlist
->next
;
1414 plist
= &tmpbuf
->list
;
1415 while ((struct list_head
*)tmpbuf
!= rxlist
) {
1416 if ((int)(buf
->seqnum
- tmpbuf
->seqnum
) < 0)
1418 tmpbuf
= (struct i40iw_puda_buf
*)plist
->next
;
1420 /* Insert buf before tmpbuf */
1421 list_add_tail(&buf
->list
, &tmpbuf
->list
);
1423 list_add_tail(&buf
->list
, rxlist
);
1425 i40iw_ieq_process_fpdus(qp
, ieq
);
1428 i40iw_puda_ret_bufpool(ieq
, buf
);
1432 * i40iw_ieq_receive - received exception buffer
1433 * @dev: iwarp device
1434 * @buf: exception buffer received
1436 static void i40iw_ieq_receive(struct i40iw_sc_vsi
*vsi
,
1437 struct i40iw_puda_buf
*buf
)
1439 struct i40iw_puda_rsrc
*ieq
= vsi
->ieq
;
1440 struct i40iw_sc_qp
*qp
= NULL
;
1441 u32 wqe_idx
= ieq
->compl_rxwqe_idx
;
1443 qp
= i40iw_ieq_get_qp(vsi
->dev
, buf
);
1445 ieq
->stats_bad_qp_id
++;
1446 i40iw_puda_ret_bufpool(ieq
, buf
);
1448 i40iw_ieq_handle_exception(ieq
, qp
, buf
);
1451 * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1452 * on which wqe_idx to start replenish rq
1454 if (!ieq
->rxq_invalid_cnt
)
1455 ieq
->rx_wqe_idx
= wqe_idx
;
1456 ieq
->rxq_invalid_cnt
++;
1460 * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1461 * @vsi: pointer to the vsi structure
1462 * @sqwrid: pointer to puda buffer
1464 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi
*vsi
, void *sqwrid
)
1466 struct i40iw_puda_rsrc
*ieq
= vsi
->ieq
;
1467 struct i40iw_puda_buf
*buf
= (struct i40iw_puda_buf
*)sqwrid
;
1469 i40iw_puda_ret_bufpool(ieq
, buf
);
1470 if (!list_empty(&ieq
->txpend
)) {
1471 buf
= i40iw_puda_get_listbuf(&ieq
->txpend
);
1472 i40iw_puda_send_buf(ieq
, buf
);
1477 * i40iw_ieq_cleanup_qp - qp is being destroyed
1478 * @ieq: ieq resource
1479 * @qp: all pending fpdu buffers
1481 static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc
*ieq
, struct i40iw_sc_qp
*qp
)
1483 struct i40iw_puda_buf
*buf
;
1484 struct i40iw_pfpdu
*pfpdu
= &qp
->pfpdu
;
1485 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1489 while (!list_empty(rxlist
)) {
1490 buf
= i40iw_puda_get_listbuf(rxlist
);
1491 i40iw_puda_ret_bufpool(ieq
, buf
);