]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/qlogic/qed/qed_ll2.c
qed: Call rx_release_cb() when flushing LL2
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_ll2.c
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
42 #include <net/ipv6.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
47 #include <linux/io.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
53 #include "qed.h"
54 #include "qed_cxt.h"
55 #include "qed_dev_api.h"
56 #include "qed_hsi.h"
57 #include "qed_hw.h"
58 #include "qed_int.h"
59 #include "qed_ll2.h"
60 #include "qed_mcp.h"
61 #include "qed_ooo.h"
62 #include "qed_reg_addr.h"
63 #include "qed_sp.h"
64 #include "qed_roce.h"
65
66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
71
72 struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
76
77 /* Lock protecting LL2 buffer lists in sleepless context */
78 spinlock_t lock;
79 struct list_head list;
80
81 const struct qed_ll2_cb_ops *cbs;
82 void *cb_cookie;
83 };
84
85 struct qed_ll2_buffer {
86 struct list_head list;
87 void *data;
88 dma_addr_t phys_addr;
89 };
90
91 static void qed_ll2b_complete_tx_packet(void *cxt,
92 u8 connection_handle,
93 void *cookie,
94 dma_addr_t first_frag_addr,
95 bool b_last_fragment,
96 bool b_last_packet)
97 {
98 struct qed_hwfn *p_hwfn = cxt;
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
110 dev_kfree_skb_any(skb);
111 }
112
113 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
115 {
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
117 if (!(*data)) {
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
119 return -ENOMEM;
120 }
121
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
127 kfree((*data));
128 return -ENOMEM;
129 }
130
131 return 0;
132 }
133
134 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
136 {
137 spin_lock_bh(&cdev->ll2->lock);
138
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
141 kfree(buffer->data);
142 list_del(&buffer->list);
143
144 cdev->ll2->rx_cnt--;
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
147
148 spin_unlock_bh(&cdev->ll2->lock);
149
150 return 0;
151 }
152
153 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
154 {
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
156
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
159 }
160
161 void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
162 {
163 struct qed_hwfn *p_hwfn = cxt;
164 struct qed_ll2_buffer *buffer = data->cookie;
165 struct qed_dev *cdev = p_hwfn->cdev;
166 dma_addr_t new_phys_addr;
167 struct sk_buff *skb;
168 bool reuse = false;
169 int rc = -EINVAL;
170 u8 *new_data;
171
172 DP_VERBOSE(p_hwfn,
173 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
174 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
175 (u64)data->rx_buf_addr,
176 data->u.placement_offset,
177 data->length.packet_length,
178 data->parse_flags,
179 data->vlan, data->opaque_data_0, data->opaque_data_1);
180
181 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
182 print_hex_dump(KERN_INFO, "",
183 DUMP_PREFIX_OFFSET, 16, 1,
184 buffer->data, data->length.packet_length, false);
185 }
186
187 /* Determine if data is valid */
188 if (data->length.packet_length < ETH_HLEN)
189 reuse = true;
190
191 /* Allocate a replacement for buffer; Reuse upon failure */
192 if (!reuse)
193 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
194 &new_phys_addr);
195
196 /* If need to reuse or there's no replacement buffer, repost this */
197 if (rc)
198 goto out_post;
199 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
200 cdev->ll2->rx_size, DMA_FROM_DEVICE);
201
202 skb = build_skb(buffer->data, 0);
203 if (!skb) {
204 rc = -ENOMEM;
205 goto out_post;
206 }
207
208 data->u.placement_offset += NET_SKB_PAD;
209 skb_reserve(skb, data->u.placement_offset);
210 skb_put(skb, data->length.packet_length);
211 skb_checksum_none_assert(skb);
212
213 /* Get parital ethernet information instead of eth_type_trans(),
214 * Since we don't have an associated net_device.
215 */
216 skb_reset_mac_header(skb);
217 skb->protocol = eth_hdr(skb)->h_proto;
218
219 /* Pass SKB onward */
220 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
221 if (data->vlan)
222 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
223 data->vlan);
224 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
225 data->opaque_data_0,
226 data->opaque_data_1);
227 }
228
229 /* Update Buffer information and update FW producer */
230 buffer->data = new_data;
231 buffer->phys_addr = new_phys_addr;
232
233 out_post:
234 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
235 buffer->phys_addr, 0, buffer, 1);
236
237 if (rc)
238 qed_ll2_dealloc_buffer(cdev, buffer);
239 }
240
241 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
242 u8 connection_handle,
243 bool b_lock,
244 bool b_only_active)
245 {
246 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
247
248 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
249 return NULL;
250
251 if (!p_hwfn->p_ll2_info)
252 return NULL;
253
254 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
255
256 if (b_only_active) {
257 if (b_lock)
258 mutex_lock(&p_ll2_conn->mutex);
259 if (p_ll2_conn->b_active)
260 p_ret = p_ll2_conn;
261 if (b_lock)
262 mutex_unlock(&p_ll2_conn->mutex);
263 } else {
264 p_ret = p_ll2_conn;
265 }
266
267 return p_ret;
268 }
269
270 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
271 u8 connection_handle)
272 {
273 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
274 }
275
276 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
277 u8 connection_handle)
278 {
279 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
280 }
281
282 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
283 *p_hwfn,
284 u8 connection_handle)
285 {
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
287 }
288
289 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
290 {
291 bool b_last_packet = false, b_last_frag = false;
292 struct qed_ll2_tx_packet *p_pkt = NULL;
293 struct qed_ll2_info *p_ll2_conn;
294 struct qed_ll2_tx_queue *p_tx;
295 dma_addr_t tx_frag;
296
297 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
298 if (!p_ll2_conn)
299 return;
300
301 p_tx = &p_ll2_conn->tx_queue;
302
303 while (!list_empty(&p_tx->active_descq)) {
304 p_pkt = list_first_entry(&p_tx->active_descq,
305 struct qed_ll2_tx_packet, list_entry);
306 if (!p_pkt)
307 break;
308
309 list_del(&p_pkt->list_entry);
310 b_last_packet = list_empty(&p_tx->active_descq);
311 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
312 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
313 struct qed_ooo_buffer *p_buffer;
314
315 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
316 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
317 p_buffer);
318 } else {
319 p_tx->cur_completing_packet = *p_pkt;
320 p_tx->cur_completing_bd_idx = 1;
321 b_last_frag =
322 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
323 tx_frag = p_pkt->bds_set[0].tx_frag;
324 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
325 p_ll2_conn->my_id,
326 p_pkt->cookie,
327 tx_frag,
328 b_last_frag,
329 b_last_packet);
330 }
331 }
332 }
333
334 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
335 {
336 struct qed_ll2_info *p_ll2_conn = p_cookie;
337 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
338 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
339 struct qed_ll2_tx_packet *p_pkt;
340 bool b_last_frag = false;
341 unsigned long flags;
342 int rc = -EINVAL;
343
344 spin_lock_irqsave(&p_tx->lock, flags);
345 if (p_tx->b_completing_packet) {
346 rc = -EBUSY;
347 goto out;
348 }
349
350 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
351 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
352 while (num_bds) {
353 if (list_empty(&p_tx->active_descq))
354 goto out;
355
356 p_pkt = list_first_entry(&p_tx->active_descq,
357 struct qed_ll2_tx_packet, list_entry);
358 if (!p_pkt)
359 goto out;
360
361 p_tx->b_completing_packet = true;
362 p_tx->cur_completing_packet = *p_pkt;
363 num_bds_in_packet = p_pkt->bd_used;
364 list_del(&p_pkt->list_entry);
365
366 if (num_bds < num_bds_in_packet) {
367 DP_NOTICE(p_hwfn,
368 "Rest of BDs does not cover whole packet\n");
369 goto out;
370 }
371
372 num_bds -= num_bds_in_packet;
373 p_tx->bds_idx += num_bds_in_packet;
374 while (num_bds_in_packet--)
375 qed_chain_consume(&p_tx->txq_chain);
376
377 p_tx->cur_completing_bd_idx = 1;
378 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
379 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
380
381 spin_unlock_irqrestore(&p_tx->lock, flags);
382
383 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
384 p_ll2_conn->my_id,
385 p_pkt->cookie,
386 p_pkt->bds_set[0].tx_frag,
387 b_last_frag, !num_bds);
388
389 spin_lock_irqsave(&p_tx->lock, flags);
390 }
391
392 p_tx->b_completing_packet = false;
393 rc = 0;
394 out:
395 spin_unlock_irqrestore(&p_tx->lock, flags);
396 return rc;
397 }
398
399 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
400 union core_rx_cqe_union *p_cqe,
401 struct qed_ll2_comp_rx_data *data)
402 {
403 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
404 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
405 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
406 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
407 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
408 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
409 }
410
411 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
412 union core_rx_cqe_union *p_cqe,
413 struct qed_ll2_comp_rx_data *data)
414 {
415 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
416 data->length.packet_length =
417 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
418 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
419 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
420 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
421 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
422 }
423
424 static int
425 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
426 struct qed_ll2_info *p_ll2_conn,
427 union core_rx_cqe_union *p_cqe,
428 unsigned long *p_lock_flags, bool b_last_cqe)
429 {
430 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
431 struct qed_ll2_rx_packet *p_pkt = NULL;
432 struct qed_ll2_comp_rx_data data;
433
434 if (!list_empty(&p_rx->active_descq))
435 p_pkt = list_first_entry(&p_rx->active_descq,
436 struct qed_ll2_rx_packet, list_entry);
437 if (!p_pkt) {
438 DP_NOTICE(p_hwfn,
439 "[%d] LL2 Rx completion but active_descq is empty\n",
440 p_ll2_conn->input.conn_type);
441
442 return -EIO;
443 }
444 list_del(&p_pkt->list_entry);
445
446 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
447 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
448 else
449 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
450 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
451 DP_NOTICE(p_hwfn,
452 "Mismatch between active_descq and the LL2 Rx chain\n");
453
454 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
455
456 data.connection_handle = p_ll2_conn->my_id;
457 data.cookie = p_pkt->cookie;
458 data.rx_buf_addr = p_pkt->rx_buf_addr;
459 data.b_last_packet = b_last_cqe;
460
461 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
462 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
463
464 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
465
466 return 0;
467 }
468
469 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
470 {
471 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
472 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
473 union core_rx_cqe_union *cqe = NULL;
474 u16 cq_new_idx = 0, cq_old_idx = 0;
475 unsigned long flags = 0;
476 int rc = 0;
477
478 spin_lock_irqsave(&p_rx->lock, flags);
479 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
480 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
481
482 while (cq_new_idx != cq_old_idx) {
483 bool b_last_cqe = (cq_new_idx == cq_old_idx);
484
485 cqe =
486 (union core_rx_cqe_union *)
487 qed_chain_consume(&p_rx->rcq_chain);
488 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
489
490 DP_VERBOSE(p_hwfn,
491 QED_MSG_LL2,
492 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
493 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
494
495 switch (cqe->rx_cqe_sp.type) {
496 case CORE_RX_CQE_TYPE_SLOW_PATH:
497 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
498 rc = -EINVAL;
499 break;
500 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
501 case CORE_RX_CQE_TYPE_REGULAR:
502 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
503 cqe, &flags,
504 b_last_cqe);
505 break;
506 default:
507 rc = -EIO;
508 }
509 }
510
511 spin_unlock_irqrestore(&p_rx->lock, flags);
512 return rc;
513 }
514
515 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
516 {
517 struct qed_ll2_info *p_ll2_conn = NULL;
518 struct qed_ll2_rx_packet *p_pkt = NULL;
519 struct qed_ll2_rx_queue *p_rx;
520
521 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
522 if (!p_ll2_conn)
523 return;
524
525 p_rx = &p_ll2_conn->rx_queue;
526
527 while (!list_empty(&p_rx->active_descq)) {
528 p_pkt = list_first_entry(&p_rx->active_descq,
529 struct qed_ll2_rx_packet, list_entry);
530 if (!p_pkt)
531 break;
532
533 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
534
535 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
536 struct qed_ooo_buffer *p_buffer;
537
538 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
539 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
540 p_buffer);
541 } else {
542 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
543 void *cookie = p_pkt->cookie;
544 bool b_last;
545
546 b_last = list_empty(&p_rx->active_descq);
547 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
548 p_ll2_conn->my_id,
549 cookie,
550 rx_buf_addr, b_last);
551 }
552 }
553 }
554
555 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
556 {
557 u8 bd_flags = 0;
558
559 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
560 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
561
562 return bd_flags;
563 }
564
565 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
566 struct qed_ll2_info *p_ll2_conn)
567 {
568 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
569 u16 packet_length = 0, parse_flags = 0, vlan = 0;
570 struct qed_ll2_rx_packet *p_pkt = NULL;
571 u32 num_ooo_add_to_peninsula = 0, cid;
572 union core_rx_cqe_union *cqe = NULL;
573 u16 cq_new_idx = 0, cq_old_idx = 0;
574 struct qed_ooo_buffer *p_buffer;
575 struct ooo_opaque *iscsi_ooo;
576 u8 placement_offset = 0;
577 u8 cqe_type;
578
579 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
580 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
581 if (cq_new_idx == cq_old_idx)
582 return 0;
583
584 while (cq_new_idx != cq_old_idx) {
585 struct core_rx_fast_path_cqe *p_cqe_fp;
586
587 cqe = qed_chain_consume(&p_rx->rcq_chain);
588 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
589 cqe_type = cqe->rx_cqe_sp.type;
590
591 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
592 DP_NOTICE(p_hwfn,
593 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
594 cqe_type);
595 return -EINVAL;
596 }
597 p_cqe_fp = &cqe->rx_cqe_fp;
598
599 placement_offset = p_cqe_fp->placement_offset;
600 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
601 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
602 vlan = le16_to_cpu(p_cqe_fp->vlan);
603 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
604 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
605 iscsi_ooo);
606 cid = le32_to_cpu(iscsi_ooo->cid);
607
608 /* Process delete isle first */
609 if (iscsi_ooo->drop_size)
610 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
611 iscsi_ooo->drop_isle,
612 iscsi_ooo->drop_size);
613
614 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
615 continue;
616
617 /* Now process create/add/join isles */
618 if (list_empty(&p_rx->active_descq)) {
619 DP_NOTICE(p_hwfn,
620 "LL2 OOO RX chain has no submitted buffers\n"
621 );
622 return -EIO;
623 }
624
625 p_pkt = list_first_entry(&p_rx->active_descq,
626 struct qed_ll2_rx_packet, list_entry);
627
628 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
629 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
630 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
631 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
632 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
633 if (!p_pkt) {
634 DP_NOTICE(p_hwfn,
635 "LL2 OOO RX packet is not valid\n");
636 return -EIO;
637 }
638 list_del(&p_pkt->list_entry);
639 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
640 p_buffer->packet_length = packet_length;
641 p_buffer->parse_flags = parse_flags;
642 p_buffer->vlan = vlan;
643 p_buffer->placement_offset = placement_offset;
644 qed_chain_consume(&p_rx->rxq_chain);
645 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
646
647 switch (iscsi_ooo->ooo_opcode) {
648 case TCP_EVENT_ADD_NEW_ISLE:
649 qed_ooo_add_new_isle(p_hwfn,
650 p_hwfn->p_ooo_info,
651 cid,
652 iscsi_ooo->ooo_isle,
653 p_buffer);
654 break;
655 case TCP_EVENT_ADD_ISLE_RIGHT:
656 qed_ooo_add_new_buffer(p_hwfn,
657 p_hwfn->p_ooo_info,
658 cid,
659 iscsi_ooo->ooo_isle,
660 p_buffer,
661 QED_OOO_RIGHT_BUF);
662 break;
663 case TCP_EVENT_ADD_ISLE_LEFT:
664 qed_ooo_add_new_buffer(p_hwfn,
665 p_hwfn->p_ooo_info,
666 cid,
667 iscsi_ooo->ooo_isle,
668 p_buffer,
669 QED_OOO_LEFT_BUF);
670 break;
671 case TCP_EVENT_JOIN:
672 qed_ooo_add_new_buffer(p_hwfn,
673 p_hwfn->p_ooo_info,
674 cid,
675 iscsi_ooo->ooo_isle +
676 1,
677 p_buffer,
678 QED_OOO_LEFT_BUF);
679 qed_ooo_join_isles(p_hwfn,
680 p_hwfn->p_ooo_info,
681 cid, iscsi_ooo->ooo_isle);
682 break;
683 case TCP_EVENT_ADD_PEN:
684 num_ooo_add_to_peninsula++;
685 qed_ooo_put_ready_buffer(p_hwfn,
686 p_hwfn->p_ooo_info,
687 p_buffer, true);
688 break;
689 }
690 } else {
691 DP_NOTICE(p_hwfn,
692 "Unexpected event (%d) TX OOO completion\n",
693 iscsi_ooo->ooo_opcode);
694 }
695 }
696
697 return 0;
698 }
699
700 static void
701 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
702 struct qed_ll2_info *p_ll2_conn)
703 {
704 struct qed_ll2_tx_pkt_info tx_pkt;
705 struct qed_ooo_buffer *p_buffer;
706 u16 l4_hdr_offset_w;
707 dma_addr_t first_frag;
708 u16 parse_flags;
709 u8 bd_flags;
710 int rc;
711
712 /* Submit Tx buffers here */
713 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
714 p_hwfn->p_ooo_info))) {
715 l4_hdr_offset_w = 0;
716 bd_flags = 0;
717
718 first_frag = p_buffer->rx_buffer_phys_addr +
719 p_buffer->placement_offset;
720 parse_flags = p_buffer->parse_flags;
721 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
722 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
723 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
724
725 memset(&tx_pkt, 0, sizeof(tx_pkt));
726 tx_pkt.num_of_bds = 1;
727 tx_pkt.vlan = p_buffer->vlan;
728 tx_pkt.bd_flags = bd_flags;
729 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
730 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
731 tx_pkt.first_frag = first_frag;
732 tx_pkt.first_frag_len = p_buffer->packet_length;
733 tx_pkt.cookie = p_buffer;
734
735 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
736 &tx_pkt, true);
737 if (rc) {
738 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
739 p_buffer, false);
740 break;
741 }
742 }
743 }
744
745 static void
746 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
747 struct qed_ll2_info *p_ll2_conn)
748 {
749 struct qed_ooo_buffer *p_buffer;
750 int rc;
751
752 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
753 p_hwfn->p_ooo_info))) {
754 rc = qed_ll2_post_rx_buffer(p_hwfn,
755 p_ll2_conn->my_id,
756 p_buffer->rx_buffer_phys_addr,
757 0, p_buffer, true);
758 if (rc) {
759 qed_ooo_put_free_buffer(p_hwfn,
760 p_hwfn->p_ooo_info, p_buffer);
761 break;
762 }
763 }
764 }
765
766 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
767 {
768 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
769 int rc;
770
771 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
772 if (rc)
773 return rc;
774
775 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
776 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
777
778 return 0;
779 }
780
781 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
782 {
783 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
784 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
785 struct qed_ll2_tx_packet *p_pkt = NULL;
786 struct qed_ooo_buffer *p_buffer;
787 bool b_dont_submit_rx = false;
788 u16 new_idx = 0, num_bds = 0;
789 int rc;
790
791 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
792 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
793
794 if (!num_bds)
795 return 0;
796
797 while (num_bds) {
798 if (list_empty(&p_tx->active_descq))
799 return -EINVAL;
800
801 p_pkt = list_first_entry(&p_tx->active_descq,
802 struct qed_ll2_tx_packet, list_entry);
803 if (!p_pkt)
804 return -EINVAL;
805
806 if (p_pkt->bd_used != 1) {
807 DP_NOTICE(p_hwfn,
808 "Unexpectedly many BDs(%d) in TX OOO completion\n",
809 p_pkt->bd_used);
810 return -EINVAL;
811 }
812
813 list_del(&p_pkt->list_entry);
814
815 num_bds--;
816 p_tx->bds_idx++;
817 qed_chain_consume(&p_tx->txq_chain);
818
819 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
820 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
821
822 if (b_dont_submit_rx) {
823 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
824 p_buffer);
825 continue;
826 }
827
828 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
829 p_buffer->rx_buffer_phys_addr, 0,
830 p_buffer, true);
831 if (rc != 0) {
832 qed_ooo_put_free_buffer(p_hwfn,
833 p_hwfn->p_ooo_info, p_buffer);
834 b_dont_submit_rx = true;
835 }
836 }
837
838 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
839
840 return 0;
841 }
842
843 static void qed_ll2_stop_ooo(struct qed_dev *cdev)
844 {
845 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
846 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
847
848 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
849 *handle);
850
851 qed_ll2_terminate_connection(hwfn, *handle);
852 qed_ll2_release_connection(hwfn, *handle);
853 *handle = QED_LL2_UNUSED_HANDLE;
854 }
855
856 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
857 struct qed_ll2_info *p_ll2_conn,
858 u8 action_on_error)
859 {
860 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
861 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
862 struct core_rx_start_ramrod_data *p_ramrod = NULL;
863 struct qed_spq_entry *p_ent = NULL;
864 struct qed_sp_init_data init_data;
865 u16 cqe_pbl_size;
866 int rc = 0;
867
868 /* Get SPQ entry */
869 memset(&init_data, 0, sizeof(init_data));
870 init_data.cid = p_ll2_conn->cid;
871 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
872 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
873
874 rc = qed_sp_init_request(p_hwfn, &p_ent,
875 CORE_RAMROD_RX_QUEUE_START,
876 PROTOCOLID_CORE, &init_data);
877 if (rc)
878 return rc;
879
880 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
881
882 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
883 p_ramrod->sb_index = p_rx->rx_sb_index;
884 p_ramrod->complete_event_flg = 1;
885
886 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
887 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
888 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
889 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
890 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
891 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
892
893 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
894 p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
895 p_ramrod->queue_id = p_ll2_conn->queue_id;
896 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
897 : 1;
898
899 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
900 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
901 p_ramrod->mf_si_bcast_accept_all = 1;
902 p_ramrod->mf_si_mcast_accept_all = 1;
903 } else {
904 p_ramrod->mf_si_bcast_accept_all = 0;
905 p_ramrod->mf_si_mcast_accept_all = 0;
906 }
907
908 p_ramrod->action_on_error.error_type = action_on_error;
909 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
910 return qed_spq_post(p_hwfn, p_ent, NULL);
911 }
912
913 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
914 struct qed_ll2_info *p_ll2_conn)
915 {
916 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
917 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
918 struct core_tx_start_ramrod_data *p_ramrod = NULL;
919 struct qed_spq_entry *p_ent = NULL;
920 struct qed_sp_init_data init_data;
921 u16 pq_id = 0, pbl_size;
922 int rc = -EINVAL;
923
924 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
925 return 0;
926
927 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
928 p_ll2_conn->tx_stats_en = 0;
929 else
930 p_ll2_conn->tx_stats_en = 1;
931
932 /* Get SPQ entry */
933 memset(&init_data, 0, sizeof(init_data));
934 init_data.cid = p_ll2_conn->cid;
935 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
936 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
937
938 rc = qed_sp_init_request(p_hwfn, &p_ent,
939 CORE_RAMROD_TX_QUEUE_START,
940 PROTOCOLID_CORE, &init_data);
941 if (rc)
942 return rc;
943
944 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
945
946 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
947 p_ramrod->sb_index = p_tx->tx_sb_index;
948 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
949 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
950 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
951
952 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
953 qed_chain_get_pbl_phys(&p_tx->txq_chain));
954 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
955 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
956
957 switch (p_ll2_conn->input.tx_tc) {
958 case LB_TC:
959 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
960 break;
961 case OOO_LB_TC:
962 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
963 break;
964 default:
965 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
966 break;
967 }
968
969 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
970
971 switch (conn_type) {
972 case QED_LL2_TYPE_FCOE:
973 p_ramrod->conn_type = PROTOCOLID_FCOE;
974 break;
975 case QED_LL2_TYPE_ISCSI:
976 case QED_LL2_TYPE_ISCSI_OOO:
977 p_ramrod->conn_type = PROTOCOLID_ISCSI;
978 break;
979 case QED_LL2_TYPE_ROCE:
980 p_ramrod->conn_type = PROTOCOLID_ROCE;
981 break;
982 default:
983 p_ramrod->conn_type = PROTOCOLID_ETH;
984 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
985 }
986
987 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
988
989 return qed_spq_post(p_hwfn, p_ent, NULL);
990 }
991
992 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
993 struct qed_ll2_info *p_ll2_conn)
994 {
995 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
996 struct qed_spq_entry *p_ent = NULL;
997 struct qed_sp_init_data init_data;
998 int rc = -EINVAL;
999
1000 /* Get SPQ entry */
1001 memset(&init_data, 0, sizeof(init_data));
1002 init_data.cid = p_ll2_conn->cid;
1003 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1004 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1005
1006 rc = qed_sp_init_request(p_hwfn, &p_ent,
1007 CORE_RAMROD_RX_QUEUE_STOP,
1008 PROTOCOLID_CORE, &init_data);
1009 if (rc)
1010 return rc;
1011
1012 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1013
1014 p_ramrod->complete_event_flg = 1;
1015 p_ramrod->queue_id = p_ll2_conn->queue_id;
1016
1017 return qed_spq_post(p_hwfn, p_ent, NULL);
1018 }
1019
1020 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1021 struct qed_ll2_info *p_ll2_conn)
1022 {
1023 struct qed_spq_entry *p_ent = NULL;
1024 struct qed_sp_init_data init_data;
1025 int rc = -EINVAL;
1026
1027 /* Get SPQ entry */
1028 memset(&init_data, 0, sizeof(init_data));
1029 init_data.cid = p_ll2_conn->cid;
1030 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1031 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1032
1033 rc = qed_sp_init_request(p_hwfn, &p_ent,
1034 CORE_RAMROD_TX_QUEUE_STOP,
1035 PROTOCOLID_CORE, &init_data);
1036 if (rc)
1037 return rc;
1038
1039 return qed_spq_post(p_hwfn, p_ent, NULL);
1040 }
1041
1042 static int
1043 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1044 struct qed_ll2_info *p_ll2_info)
1045 {
1046 struct qed_ll2_rx_packet *p_descq;
1047 u32 capacity;
1048 int rc = 0;
1049
1050 if (!p_ll2_info->input.rx_num_desc)
1051 goto out;
1052
1053 rc = qed_chain_alloc(p_hwfn->cdev,
1054 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1055 QED_CHAIN_MODE_NEXT_PTR,
1056 QED_CHAIN_CNT_TYPE_U16,
1057 p_ll2_info->input.rx_num_desc,
1058 sizeof(struct core_rx_bd),
1059 &p_ll2_info->rx_queue.rxq_chain);
1060 if (rc) {
1061 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1062 goto out;
1063 }
1064
1065 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1066 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1067 GFP_KERNEL);
1068 if (!p_descq) {
1069 rc = -ENOMEM;
1070 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1071 goto out;
1072 }
1073 p_ll2_info->rx_queue.descq_array = p_descq;
1074
1075 rc = qed_chain_alloc(p_hwfn->cdev,
1076 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1077 QED_CHAIN_MODE_PBL,
1078 QED_CHAIN_CNT_TYPE_U16,
1079 p_ll2_info->input.rx_num_desc,
1080 sizeof(struct core_rx_fast_path_cqe),
1081 &p_ll2_info->rx_queue.rcq_chain);
1082 if (rc) {
1083 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1084 goto out;
1085 }
1086
1087 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1088 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1089 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
1090
1091 out:
1092 return rc;
1093 }
1094
1095 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1096 struct qed_ll2_info *p_ll2_info)
1097 {
1098 struct qed_ll2_tx_packet *p_descq;
1099 u32 capacity;
1100 int rc = 0;
1101
1102 if (!p_ll2_info->input.tx_num_desc)
1103 goto out;
1104
1105 rc = qed_chain_alloc(p_hwfn->cdev,
1106 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1107 QED_CHAIN_MODE_PBL,
1108 QED_CHAIN_CNT_TYPE_U16,
1109 p_ll2_info->input.tx_num_desc,
1110 sizeof(struct core_tx_bd),
1111 &p_ll2_info->tx_queue.txq_chain);
1112 if (rc)
1113 goto out;
1114
1115 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1116 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1117 GFP_KERNEL);
1118 if (!p_descq) {
1119 rc = -ENOMEM;
1120 goto out;
1121 }
1122 p_ll2_info->tx_queue.descq_array = p_descq;
1123
1124 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1125 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1126 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
1127
1128 out:
1129 if (rc)
1130 DP_NOTICE(p_hwfn,
1131 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1132 p_ll2_info->input.tx_num_desc);
1133 return rc;
1134 }
1135
1136 static int
1137 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1138 struct qed_ll2_info *p_ll2_info, u16 mtu)
1139 {
1140 struct qed_ooo_buffer *p_buf = NULL;
1141 void *p_virt;
1142 u16 buf_idx;
1143 int rc = 0;
1144
1145 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1146 return rc;
1147
1148 /* Correct number of requested OOO buffers if needed */
1149 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1150 u16 num_desc = p_ll2_info->input.rx_num_desc;
1151
1152 if (!num_desc)
1153 return -EINVAL;
1154 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1155 }
1156
1157 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1158 buf_idx++) {
1159 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1160 if (!p_buf) {
1161 rc = -ENOMEM;
1162 goto out;
1163 }
1164
1165 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1166 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1167 ETH_CACHE_LINE_SIZE - 1) &
1168 ~(ETH_CACHE_LINE_SIZE - 1);
1169 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1170 p_buf->rx_buffer_size,
1171 &p_buf->rx_buffer_phys_addr,
1172 GFP_KERNEL);
1173 if (!p_virt) {
1174 kfree(p_buf);
1175 rc = -ENOMEM;
1176 goto out;
1177 }
1178
1179 p_buf->rx_buffer_virt_addr = p_virt;
1180 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1181 }
1182
1183 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1184 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1185 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1186
1187 out:
1188 return rc;
1189 }
1190
1191 static int
1192 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1193 {
1194 if (!cbs || (!cbs->rx_comp_cb ||
1195 !cbs->rx_release_cb ||
1196 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1197 return -EINVAL;
1198
1199 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1200 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1201 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1202 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1203 p_ll2_info->cbs.cookie = cbs->cookie;
1204
1205 return 0;
1206 }
1207
1208 static enum core_error_handle
1209 qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1210 {
1211 switch (err) {
1212 case QED_LL2_DROP_PACKET:
1213 return LL2_DROP_PACKET;
1214 case QED_LL2_DO_NOTHING:
1215 return LL2_DO_NOTHING;
1216 case QED_LL2_ASSERT:
1217 return LL2_ASSERT;
1218 default:
1219 return LL2_DO_NOTHING;
1220 }
1221 }
1222
1223 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
1224 {
1225 struct qed_hwfn *p_hwfn = cxt;
1226 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1227 struct qed_ll2_info *p_ll2_info = NULL;
1228 u8 i, *p_tx_max;
1229 int rc;
1230
1231 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
1232 return -EINVAL;
1233
1234 /* Find a free connection to be used */
1235 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1236 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1237 if (p_hwfn->p_ll2_info[i].b_active) {
1238 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1239 continue;
1240 }
1241
1242 p_hwfn->p_ll2_info[i].b_active = true;
1243 p_ll2_info = &p_hwfn->p_ll2_info[i];
1244 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1245 break;
1246 }
1247 if (!p_ll2_info)
1248 return -EBUSY;
1249
1250 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
1251
1252 p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
1253 CORE_TX_DEST_NW : CORE_TX_DEST_LB;
1254
1255 /* Correct maximum number of Tx BDs */
1256 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1257 if (*p_tx_max == 0)
1258 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1259 else
1260 *p_tx_max = min_t(u8, *p_tx_max,
1261 CORE_LL2_TX_MAX_BDS_PER_PACKET);
1262
1263 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1264 if (rc) {
1265 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1266 goto q_allocate_fail;
1267 }
1268
1269 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
1270 if (rc)
1271 goto q_allocate_fail;
1272
1273 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
1274 if (rc)
1275 goto q_allocate_fail;
1276
1277 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1278 data->input.mtu);
1279 if (rc)
1280 goto q_allocate_fail;
1281
1282 /* Register callbacks for the Rx/Tx queues */
1283 if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1284 comp_rx_cb = qed_ll2_lb_rxq_completion;
1285 comp_tx_cb = qed_ll2_lb_txq_completion;
1286 } else {
1287 comp_rx_cb = qed_ll2_rxq_completion;
1288 comp_tx_cb = qed_ll2_txq_completion;
1289 }
1290
1291 if (data->input.rx_num_desc) {
1292 qed_int_register_cb(p_hwfn, comp_rx_cb,
1293 &p_hwfn->p_ll2_info[i],
1294 &p_ll2_info->rx_queue.rx_sb_index,
1295 &p_ll2_info->rx_queue.p_fw_cons);
1296 p_ll2_info->rx_queue.b_cb_registred = true;
1297 }
1298
1299 if (data->input.tx_num_desc) {
1300 qed_int_register_cb(p_hwfn,
1301 comp_tx_cb,
1302 &p_hwfn->p_ll2_info[i],
1303 &p_ll2_info->tx_queue.tx_sb_index,
1304 &p_ll2_info->tx_queue.p_fw_cons);
1305 p_ll2_info->tx_queue.b_cb_registred = true;
1306 }
1307
1308 *data->p_connection_handle = i;
1309 return rc;
1310
1311 q_allocate_fail:
1312 qed_ll2_release_connection(p_hwfn, i);
1313 return -ENOMEM;
1314 }
1315
1316 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1317 struct qed_ll2_info *p_ll2_conn)
1318 {
1319 enum qed_ll2_error_handle error_input;
1320 enum core_error_handle error_mode;
1321 u8 action_on_error = 0;
1322
1323 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1324 return 0;
1325
1326 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1327 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1328 error_mode = qed_ll2_get_error_choice(error_input);
1329 SET_FIELD(action_on_error,
1330 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1331 error_input = p_ll2_conn->input.ai_err_no_buf;
1332 error_mode = qed_ll2_get_error_choice(error_input);
1333 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
1334
1335 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1336 }
1337
1338 static void
1339 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1340 struct qed_ll2_info *p_ll2_conn)
1341 {
1342 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1343 return;
1344
1345 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1346 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1347 }
1348
1349 int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
1350 {
1351 struct qed_hwfn *p_hwfn = cxt;
1352 struct qed_ll2_info *p_ll2_conn;
1353 struct qed_ll2_rx_queue *p_rx;
1354 struct qed_ll2_tx_queue *p_tx;
1355 struct qed_ptt *p_ptt;
1356 int rc = -EINVAL;
1357 u32 i, capacity;
1358 u8 qid;
1359
1360 p_ptt = qed_ptt_acquire(p_hwfn);
1361 if (!p_ptt)
1362 return -EAGAIN;
1363
1364 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1365 if (!p_ll2_conn) {
1366 rc = -EINVAL;
1367 goto out;
1368 }
1369
1370 p_rx = &p_ll2_conn->rx_queue;
1371 p_tx = &p_ll2_conn->tx_queue;
1372
1373 qed_chain_reset(&p_rx->rxq_chain);
1374 qed_chain_reset(&p_rx->rcq_chain);
1375 INIT_LIST_HEAD(&p_rx->active_descq);
1376 INIT_LIST_HEAD(&p_rx->free_descq);
1377 INIT_LIST_HEAD(&p_rx->posting_descq);
1378 spin_lock_init(&p_rx->lock);
1379 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1380 for (i = 0; i < capacity; i++)
1381 list_add_tail(&p_rx->descq_array[i].list_entry,
1382 &p_rx->free_descq);
1383 *p_rx->p_fw_cons = 0;
1384
1385 qed_chain_reset(&p_tx->txq_chain);
1386 INIT_LIST_HEAD(&p_tx->active_descq);
1387 INIT_LIST_HEAD(&p_tx->free_descq);
1388 INIT_LIST_HEAD(&p_tx->sending_descq);
1389 spin_lock_init(&p_tx->lock);
1390 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1391 for (i = 0; i < capacity; i++)
1392 list_add_tail(&p_tx->descq_array[i].list_entry,
1393 &p_tx->free_descq);
1394 p_tx->cur_completing_bd_idx = 0;
1395 p_tx->bds_idx = 0;
1396 p_tx->b_completing_packet = false;
1397 p_tx->cur_send_packet = NULL;
1398 p_tx->cur_send_frag_num = 0;
1399 p_tx->cur_completing_frag_num = 0;
1400 *p_tx->p_fw_cons = 0;
1401
1402 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1403 if (rc)
1404 goto out;
1405
1406 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1407 p_ll2_conn->queue_id = qid;
1408 p_ll2_conn->tx_stats_id = qid;
1409 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1410 GTT_BAR0_MAP_REG_TSDM_RAM +
1411 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1412 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1413 qed_db_addr(p_ll2_conn->cid,
1414 DQ_DEMS_LEGACY);
1415
1416 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1417 if (rc)
1418 goto out;
1419
1420 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1421 if (rc)
1422 goto out;
1423
1424 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1425 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1426
1427 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1428
1429 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1430 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1431 0x8906, 0,
1432 QED_LLH_FILTER_ETHERTYPE);
1433 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1434 0x8914, 0,
1435 QED_LLH_FILTER_ETHERTYPE);
1436 }
1437
1438 out:
1439 qed_ptt_release(p_hwfn, p_ptt);
1440 return rc;
1441 }
1442
1443 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1444 struct qed_ll2_rx_queue *p_rx,
1445 struct qed_ll2_rx_packet *p_curp)
1446 {
1447 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1448 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1449 bool b_notify_fw = false;
1450 u16 bd_prod, cq_prod;
1451
1452 /* This handles the flushing of already posted buffers */
1453 while (!list_empty(&p_rx->posting_descq)) {
1454 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1455 struct qed_ll2_rx_packet,
1456 list_entry);
1457 list_move_tail(&p_posting_packet->list_entry,
1458 &p_rx->active_descq);
1459 b_notify_fw = true;
1460 }
1461
1462 /* This handles the supplied packet [if there is one] */
1463 if (p_curp) {
1464 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1465 b_notify_fw = true;
1466 }
1467
1468 if (!b_notify_fw)
1469 return;
1470
1471 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1472 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1473 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1474 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1475 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1476 }
1477
1478 int qed_ll2_post_rx_buffer(void *cxt,
1479 u8 connection_handle,
1480 dma_addr_t addr,
1481 u16 buf_len, void *cookie, u8 notify_fw)
1482 {
1483 struct qed_hwfn *p_hwfn = cxt;
1484 struct core_rx_bd_with_buff_len *p_curb = NULL;
1485 struct qed_ll2_rx_packet *p_curp = NULL;
1486 struct qed_ll2_info *p_ll2_conn;
1487 struct qed_ll2_rx_queue *p_rx;
1488 unsigned long flags;
1489 void *p_data;
1490 int rc = 0;
1491
1492 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1493 if (!p_ll2_conn)
1494 return -EINVAL;
1495 p_rx = &p_ll2_conn->rx_queue;
1496
1497 spin_lock_irqsave(&p_rx->lock, flags);
1498 if (!list_empty(&p_rx->free_descq))
1499 p_curp = list_first_entry(&p_rx->free_descq,
1500 struct qed_ll2_rx_packet, list_entry);
1501 if (p_curp) {
1502 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1503 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1504 p_data = qed_chain_produce(&p_rx->rxq_chain);
1505 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1506 qed_chain_produce(&p_rx->rcq_chain);
1507 }
1508 }
1509
1510 /* If we're lacking entires, let's try to flush buffers to FW */
1511 if (!p_curp || !p_curb) {
1512 rc = -EBUSY;
1513 p_curp = NULL;
1514 goto out_notify;
1515 }
1516
1517 /* We have an Rx packet we can fill */
1518 DMA_REGPAIR_LE(p_curb->addr, addr);
1519 p_curb->buff_length = cpu_to_le16(buf_len);
1520 p_curp->rx_buf_addr = addr;
1521 p_curp->cookie = cookie;
1522 p_curp->rxq_bd = p_curb;
1523 p_curp->buf_length = buf_len;
1524 list_del(&p_curp->list_entry);
1525
1526 /* Check if we only want to enqueue this packet without informing FW */
1527 if (!notify_fw) {
1528 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1529 goto out;
1530 }
1531
1532 out_notify:
1533 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1534 out:
1535 spin_unlock_irqrestore(&p_rx->lock, flags);
1536 return rc;
1537 }
1538
1539 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1540 struct qed_ll2_tx_queue *p_tx,
1541 struct qed_ll2_tx_packet *p_curp,
1542 struct qed_ll2_tx_pkt_info *pkt,
1543 u8 notify_fw)
1544 {
1545 list_del(&p_curp->list_entry);
1546 p_curp->cookie = pkt->cookie;
1547 p_curp->bd_used = pkt->num_of_bds;
1548 p_curp->notify_fw = notify_fw;
1549 p_tx->cur_send_packet = p_curp;
1550 p_tx->cur_send_frag_num = 0;
1551
1552 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1553 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
1554 p_tx->cur_send_frag_num++;
1555 }
1556
1557 static void
1558 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1559 struct qed_ll2_info *p_ll2,
1560 struct qed_ll2_tx_packet *p_curp,
1561 struct qed_ll2_tx_pkt_info *pkt)
1562 {
1563 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1564 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1565 struct core_tx_bd *start_bd = NULL;
1566 enum core_roce_flavor_type roce_flavor;
1567 enum core_tx_dest tx_dest;
1568 u16 bd_data = 0, frag_idx;
1569
1570 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1571 : CORE_RROCE;
1572
1573 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1574 : CORE_TX_DEST_LB;
1575
1576 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1577 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
1578 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1579 cpu_to_le16(pkt->l4_hdr_offset_w));
1580 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1581 bd_data |= pkt->bd_flags;
1582 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1583 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
1584 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1585 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1586 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1587 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
1588
1589 DP_VERBOSE(p_hwfn,
1590 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1591 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1592 p_ll2->queue_id,
1593 p_ll2->cid,
1594 p_ll2->input.conn_type,
1595 prod_idx,
1596 pkt->first_frag_len,
1597 pkt->num_of_bds,
1598 le32_to_cpu(start_bd->addr.hi),
1599 le32_to_cpu(start_bd->addr.lo));
1600
1601 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
1602 return;
1603
1604 /* Need to provide the packet with additional BDs for frags */
1605 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1606 frag_idx < pkt->num_of_bds; frag_idx++) {
1607 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1608
1609 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1610 (*p_bd)->bd_data.as_bitfield = 0;
1611 (*p_bd)->bitfield1 = 0;
1612 p_curp->bds_set[frag_idx].tx_frag = 0;
1613 p_curp->bds_set[frag_idx].frag_len = 0;
1614 }
1615 }
1616
1617 /* This should be called while the Txq spinlock is being held */
1618 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1619 struct qed_ll2_info *p_ll2_conn)
1620 {
1621 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1622 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1623 struct qed_ll2_tx_packet *p_pkt = NULL;
1624 struct core_db_data db_msg = { 0, 0, 0 };
1625 u16 bd_prod;
1626
1627 /* If there are missing BDs, don't do anything now */
1628 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1629 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1630 return;
1631
1632 /* Push the current packet to the list and clean after it */
1633 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1634 &p_ll2_conn->tx_queue.sending_descq);
1635 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1636 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1637
1638 /* Notify FW of packet only if requested to */
1639 if (!b_notify)
1640 return;
1641
1642 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1643
1644 while (!list_empty(&p_tx->sending_descq)) {
1645 p_pkt = list_first_entry(&p_tx->sending_descq,
1646 struct qed_ll2_tx_packet, list_entry);
1647 if (!p_pkt)
1648 break;
1649
1650 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1651 }
1652
1653 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1654 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1655 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1656 DQ_XCM_CORE_TX_BD_PROD_CMD);
1657 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1658 db_msg.spq_prod = cpu_to_le16(bd_prod);
1659
1660 /* Make sure the BDs data is updated before ringing the doorbell */
1661 wmb();
1662
1663 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1664
1665 DP_VERBOSE(p_hwfn,
1666 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1667 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1668 p_ll2_conn->queue_id,
1669 p_ll2_conn->cid,
1670 p_ll2_conn->input.conn_type, db_msg.spq_prod);
1671 }
1672
1673 int qed_ll2_prepare_tx_packet(void *cxt,
1674 u8 connection_handle,
1675 struct qed_ll2_tx_pkt_info *pkt,
1676 bool notify_fw)
1677 {
1678 struct qed_hwfn *p_hwfn = cxt;
1679 struct qed_ll2_tx_packet *p_curp = NULL;
1680 struct qed_ll2_info *p_ll2_conn = NULL;
1681 struct qed_ll2_tx_queue *p_tx;
1682 struct qed_chain *p_tx_chain;
1683 unsigned long flags;
1684 int rc = 0;
1685
1686 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1687 if (!p_ll2_conn)
1688 return -EINVAL;
1689 p_tx = &p_ll2_conn->tx_queue;
1690 p_tx_chain = &p_tx->txq_chain;
1691
1692 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1693 return -EIO;
1694
1695 spin_lock_irqsave(&p_tx->lock, flags);
1696 if (p_tx->cur_send_packet) {
1697 rc = -EEXIST;
1698 goto out;
1699 }
1700
1701 /* Get entry, but only if we have tx elements for it */
1702 if (!list_empty(&p_tx->free_descq))
1703 p_curp = list_first_entry(&p_tx->free_descq,
1704 struct qed_ll2_tx_packet, list_entry);
1705 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
1706 p_curp = NULL;
1707
1708 if (!p_curp) {
1709 rc = -EBUSY;
1710 goto out;
1711 }
1712
1713 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1714 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1715
1716 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
1717
1718 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1719
1720 out:
1721 spin_unlock_irqrestore(&p_tx->lock, flags);
1722 return rc;
1723 }
1724
1725 int qed_ll2_set_fragment_of_tx_packet(void *cxt,
1726 u8 connection_handle,
1727 dma_addr_t addr, u16 nbytes)
1728 {
1729 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1730 struct qed_hwfn *p_hwfn = cxt;
1731 struct qed_ll2_info *p_ll2_conn = NULL;
1732 u16 cur_send_frag_num = 0;
1733 struct core_tx_bd *p_bd;
1734 unsigned long flags;
1735
1736 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1737 if (!p_ll2_conn)
1738 return -EINVAL;
1739
1740 if (!p_ll2_conn->tx_queue.cur_send_packet)
1741 return -EINVAL;
1742
1743 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1744 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1745
1746 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1747 return -EINVAL;
1748
1749 /* Fill the BD information, and possibly notify FW */
1750 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1751 DMA_REGPAIR_LE(p_bd->addr, addr);
1752 p_bd->nbytes = cpu_to_le16(nbytes);
1753 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1754 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1755
1756 p_ll2_conn->tx_queue.cur_send_frag_num++;
1757
1758 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1759 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1760 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1761
1762 return 0;
1763 }
1764
1765 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
1766 {
1767 struct qed_hwfn *p_hwfn = cxt;
1768 struct qed_ll2_info *p_ll2_conn = NULL;
1769 int rc = -EINVAL;
1770 struct qed_ptt *p_ptt;
1771
1772 p_ptt = qed_ptt_acquire(p_hwfn);
1773 if (!p_ptt)
1774 return -EAGAIN;
1775
1776 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1777 if (!p_ll2_conn) {
1778 rc = -EINVAL;
1779 goto out;
1780 }
1781
1782 /* Stop Tx & Rx of connection, if needed */
1783 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1784 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1785 if (rc)
1786 goto out;
1787 qed_ll2_txq_flush(p_hwfn, connection_handle);
1788 }
1789
1790 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1791 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1792 if (rc)
1793 goto out;
1794 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1795 }
1796
1797 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1798 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1799
1800 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1801 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1802 0x8906, 0,
1803 QED_LLH_FILTER_ETHERTYPE);
1804 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1805 0x8914, 0,
1806 QED_LLH_FILTER_ETHERTYPE);
1807 }
1808
1809 out:
1810 qed_ptt_release(p_hwfn, p_ptt);
1811 return rc;
1812 }
1813
1814 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1815 struct qed_ll2_info *p_ll2_conn)
1816 {
1817 struct qed_ooo_buffer *p_buffer;
1818
1819 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1820 return;
1821
1822 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1823 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
1824 p_hwfn->p_ooo_info))) {
1825 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1826 p_buffer->rx_buffer_size,
1827 p_buffer->rx_buffer_virt_addr,
1828 p_buffer->rx_buffer_phys_addr);
1829 kfree(p_buffer);
1830 }
1831 }
1832
1833 void qed_ll2_release_connection(void *cxt, u8 connection_handle)
1834 {
1835 struct qed_hwfn *p_hwfn = cxt;
1836 struct qed_ll2_info *p_ll2_conn = NULL;
1837
1838 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1839 if (!p_ll2_conn)
1840 return;
1841
1842 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1843 p_ll2_conn->rx_queue.b_cb_registred = false;
1844 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1845 }
1846
1847 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1848 p_ll2_conn->tx_queue.b_cb_registred = false;
1849 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1850 }
1851
1852 kfree(p_ll2_conn->tx_queue.descq_array);
1853 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1854
1855 kfree(p_ll2_conn->rx_queue.descq_array);
1856 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1857 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1858
1859 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1860
1861 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1862
1863 mutex_lock(&p_ll2_conn->mutex);
1864 p_ll2_conn->b_active = false;
1865 mutex_unlock(&p_ll2_conn->mutex);
1866 }
1867
1868 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1869 {
1870 struct qed_ll2_info *p_ll2_connections;
1871 u8 i;
1872
1873 /* Allocate LL2's set struct */
1874 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1875 sizeof(struct qed_ll2_info), GFP_KERNEL);
1876 if (!p_ll2_connections) {
1877 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1878 return -ENOMEM;
1879 }
1880
1881 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1882 p_ll2_connections[i].my_id = i;
1883
1884 p_hwfn->p_ll2_info = p_ll2_connections;
1885 return 0;
1886 }
1887
1888 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
1889 {
1890 int i;
1891
1892 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1893 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
1894 }
1895
1896 void qed_ll2_free(struct qed_hwfn *p_hwfn)
1897 {
1898 if (!p_hwfn->p_ll2_info)
1899 return;
1900
1901 kfree(p_hwfn->p_ll2_info);
1902 p_hwfn->p_ll2_info = NULL;
1903 }
1904
1905 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1906 struct qed_ptt *p_ptt,
1907 struct qed_ll2_info *p_ll2_conn,
1908 struct qed_ll2_stats *p_stats)
1909 {
1910 struct core_ll2_tstorm_per_queue_stat tstats;
1911 u8 qid = p_ll2_conn->queue_id;
1912 u32 tstats_addr;
1913
1914 memset(&tstats, 0, sizeof(tstats));
1915 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1916 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1917 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1918
1919 p_stats->packet_too_big_discard =
1920 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1921 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1922 }
1923
1924 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1925 struct qed_ptt *p_ptt,
1926 struct qed_ll2_info *p_ll2_conn,
1927 struct qed_ll2_stats *p_stats)
1928 {
1929 struct core_ll2_ustorm_per_queue_stat ustats;
1930 u8 qid = p_ll2_conn->queue_id;
1931 u32 ustats_addr;
1932
1933 memset(&ustats, 0, sizeof(ustats));
1934 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1935 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1936 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1937
1938 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1939 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1940 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1941 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1942 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1943 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1944 }
1945
1946 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1947 struct qed_ptt *p_ptt,
1948 struct qed_ll2_info *p_ll2_conn,
1949 struct qed_ll2_stats *p_stats)
1950 {
1951 struct core_ll2_pstorm_per_queue_stat pstats;
1952 u8 stats_id = p_ll2_conn->tx_stats_id;
1953 u32 pstats_addr;
1954
1955 memset(&pstats, 0, sizeof(pstats));
1956 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1957 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1958 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1959
1960 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1961 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1962 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1963 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1964 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1965 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1966 }
1967
1968 int qed_ll2_get_stats(void *cxt,
1969 u8 connection_handle, struct qed_ll2_stats *p_stats)
1970 {
1971 struct qed_hwfn *p_hwfn = cxt;
1972 struct qed_ll2_info *p_ll2_conn = NULL;
1973 struct qed_ptt *p_ptt;
1974
1975 memset(p_stats, 0, sizeof(*p_stats));
1976
1977 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1978 !p_hwfn->p_ll2_info)
1979 return -EINVAL;
1980
1981 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1982
1983 p_ptt = qed_ptt_acquire(p_hwfn);
1984 if (!p_ptt) {
1985 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1986 return -EINVAL;
1987 }
1988
1989 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1990 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1991 if (p_ll2_conn->tx_stats_en)
1992 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1993
1994 qed_ptt_release(p_hwfn, p_ptt);
1995 return 0;
1996 }
1997
1998 static void qed_ll2b_release_rx_packet(void *cxt,
1999 u8 connection_handle,
2000 void *cookie,
2001 dma_addr_t rx_buf_addr,
2002 bool b_last_packet)
2003 {
2004 struct qed_hwfn *p_hwfn = cxt;
2005
2006 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2007 }
2008
2009 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2010 const struct qed_ll2_cb_ops *ops,
2011 void *cookie)
2012 {
2013 cdev->ll2->cbs = ops;
2014 cdev->ll2->cb_cookie = cookie;
2015 }
2016
2017 struct qed_ll2_cbs ll2_cbs = {
2018 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2019 .rx_release_cb = &qed_ll2b_release_rx_packet,
2020 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2021 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2022 };
2023
2024 static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2025 struct qed_ll2_acquire_data *data,
2026 struct qed_ll2_params *params,
2027 enum qed_ll2_conn_type conn_type,
2028 u8 *handle, bool lb)
2029 {
2030 memset(data, 0, sizeof(*data));
2031
2032 data->input.conn_type = conn_type;
2033 data->input.mtu = params->mtu;
2034 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2035 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2036 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2037 data->input.tx_num_desc = QED_LL2_TX_SIZE;
2038 data->p_connection_handle = handle;
2039 data->cbs = &ll2_cbs;
2040 ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
2041
2042 if (lb) {
2043 data->input.tx_tc = OOO_LB_TC;
2044 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2045 } else {
2046 data->input.tx_tc = 0;
2047 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2048 }
2049 }
2050
2051 static int qed_ll2_start_ooo(struct qed_dev *cdev,
2052 struct qed_ll2_params *params)
2053 {
2054 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2055 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2056 struct qed_ll2_acquire_data data;
2057 int rc;
2058
2059 qed_ll2_set_conn_data(cdev, &data, params,
2060 QED_LL2_TYPE_ISCSI_OOO, handle, true);
2061
2062 rc = qed_ll2_acquire_connection(hwfn, &data);
2063 if (rc) {
2064 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2065 goto out;
2066 }
2067
2068 rc = qed_ll2_establish_connection(hwfn, *handle);
2069 if (rc) {
2070 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2071 goto fail;
2072 }
2073
2074 return 0;
2075
2076 fail:
2077 qed_ll2_release_connection(hwfn, *handle);
2078 out:
2079 *handle = QED_LL2_UNUSED_HANDLE;
2080 return rc;
2081 }
2082
2083 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2084 {
2085 struct qed_ll2_buffer *buffer, *tmp_buffer;
2086 enum qed_ll2_conn_type conn_type;
2087 struct qed_ll2_acquire_data data;
2088 struct qed_ptt *p_ptt;
2089 int rc, i;
2090
2091
2092 /* Initialize LL2 locks & lists */
2093 INIT_LIST_HEAD(&cdev->ll2->list);
2094 spin_lock_init(&cdev->ll2->lock);
2095 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2096 L1_CACHE_BYTES + params->mtu;
2097
2098 /*Allocate memory for LL2 */
2099 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2100 cdev->ll2->rx_size);
2101 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2102 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2103 if (!buffer) {
2104 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2105 goto fail;
2106 }
2107
2108 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2109 &buffer->phys_addr);
2110 if (rc) {
2111 kfree(buffer);
2112 goto fail;
2113 }
2114
2115 list_add_tail(&buffer->list, &cdev->ll2->list);
2116 }
2117
2118 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2119 case QED_PCI_FCOE:
2120 conn_type = QED_LL2_TYPE_FCOE;
2121 break;
2122 case QED_PCI_ISCSI:
2123 conn_type = QED_LL2_TYPE_ISCSI;
2124 break;
2125 case QED_PCI_ETH_ROCE:
2126 conn_type = QED_LL2_TYPE_ROCE;
2127 break;
2128 default:
2129 conn_type = QED_LL2_TYPE_TEST;
2130 }
2131
2132 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
2133 &cdev->ll2->handle, false);
2134
2135 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
2136 if (rc) {
2137 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2138 goto fail;
2139 }
2140
2141 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2142 cdev->ll2->handle);
2143 if (rc) {
2144 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2145 goto release_fail;
2146 }
2147
2148 /* Post all Rx buffers to FW */
2149 spin_lock_bh(&cdev->ll2->lock);
2150 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2151 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2152 cdev->ll2->handle,
2153 buffer->phys_addr, 0, buffer, 1);
2154 if (rc) {
2155 DP_INFO(cdev,
2156 "Failed to post an Rx buffer; Deleting it\n");
2157 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2158 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2159 kfree(buffer->data);
2160 list_del(&buffer->list);
2161 kfree(buffer);
2162 } else {
2163 cdev->ll2->rx_cnt++;
2164 }
2165 }
2166 spin_unlock_bh(&cdev->ll2->lock);
2167
2168 if (!cdev->ll2->rx_cnt) {
2169 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2170 goto release_terminate;
2171 }
2172
2173 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2174 DP_INFO(cdev, "Invalid Ethernet address\n");
2175 goto release_terminate;
2176 }
2177
2178 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2179 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2180 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2181 rc = qed_ll2_start_ooo(cdev, params);
2182 if (rc) {
2183 DP_INFO(cdev,
2184 "Failed to initialize the OOO LL2 queue\n");
2185 goto release_terminate;
2186 }
2187 }
2188
2189 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2190 if (!p_ptt) {
2191 DP_INFO(cdev, "Failed to acquire PTT\n");
2192 goto release_terminate;
2193 }
2194
2195 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2196 params->ll2_mac_address);
2197 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2198 if (rc) {
2199 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2200 goto release_terminate_all;
2201 }
2202
2203 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2204 return 0;
2205
2206 release_terminate_all:
2207
2208 release_terminate:
2209 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2210 release_fail:
2211 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2212 fail:
2213 qed_ll2_kill_buffers(cdev);
2214 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2215 return -EINVAL;
2216 }
2217
2218 static int qed_ll2_stop(struct qed_dev *cdev)
2219 {
2220 struct qed_ptt *p_ptt;
2221 int rc;
2222
2223 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2224 return 0;
2225
2226 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2227 if (!p_ptt) {
2228 DP_INFO(cdev, "Failed to acquire PTT\n");
2229 goto fail;
2230 }
2231
2232 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2233 cdev->ll2_mac_address);
2234 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2235 eth_zero_addr(cdev->ll2_mac_address);
2236
2237 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2238 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2239 qed_ll2_stop_ooo(cdev);
2240
2241 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2242 cdev->ll2->handle);
2243 if (rc)
2244 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2245
2246 qed_ll2_kill_buffers(cdev);
2247
2248 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2249 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2250
2251 return rc;
2252 fail:
2253 return -EINVAL;
2254 }
2255
2256 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2257 {
2258 struct qed_ll2_tx_pkt_info pkt;
2259 const skb_frag_t *frag;
2260 int rc = -EINVAL, i;
2261 dma_addr_t mapping;
2262 u16 vlan = 0;
2263 u8 flags = 0;
2264
2265 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2266 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2267 return -EINVAL;
2268 }
2269
2270 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2271 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2272 1 + skb_shinfo(skb)->nr_frags);
2273 return -EINVAL;
2274 }
2275
2276 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2277 skb->len, DMA_TO_DEVICE);
2278 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2279 DP_NOTICE(cdev, "SKB mapping failed\n");
2280 return -EINVAL;
2281 }
2282
2283 /* Request HW to calculate IP csum */
2284 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2285 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2286 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2287
2288 if (skb_vlan_tag_present(skb)) {
2289 vlan = skb_vlan_tag_get(skb);
2290 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2291 }
2292
2293 memset(&pkt, 0, sizeof(pkt));
2294 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2295 pkt.vlan = vlan;
2296 pkt.bd_flags = flags;
2297 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2298 pkt.first_frag = mapping;
2299 pkt.first_frag_len = skb->len;
2300 pkt.cookie = skb;
2301
2302 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2303 &pkt, 1);
2304 if (rc)
2305 goto err;
2306
2307 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2308 frag = &skb_shinfo(skb)->frags[i];
2309
2310 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2311 skb_frag_size(frag), DMA_TO_DEVICE);
2312
2313 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2314 DP_NOTICE(cdev,
2315 "Unable to map frag - dropping packet\n");
2316 goto err;
2317 }
2318
2319 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2320 cdev->ll2->handle,
2321 mapping,
2322 skb_frag_size(frag));
2323
2324 /* if failed not much to do here, partial packet has been posted
2325 * we can't free memory, will need to wait for completion.
2326 */
2327 if (rc)
2328 goto err2;
2329 }
2330
2331 return 0;
2332
2333 err:
2334 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2335
2336 err2:
2337 return rc;
2338 }
2339
2340 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2341 {
2342 if (!cdev->ll2)
2343 return -EINVAL;
2344
2345 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2346 cdev->ll2->handle, stats);
2347 }
2348
2349 const struct qed_ll2_ops qed_ll2_ops_pass = {
2350 .start = &qed_ll2_start,
2351 .stop = &qed_ll2_stop,
2352 .start_xmit = &qed_ll2_start_xmit,
2353 .register_cb_ops = &qed_ll2_register_cb_ops,
2354 .get_stats = &qed_ll2_stats,
2355 };
2356
2357 int qed_ll2_alloc_if(struct qed_dev *cdev)
2358 {
2359 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2360 return cdev->ll2 ? 0 : -ENOMEM;
2361 }
2362
2363 void qed_ll2_dealloc_if(struct qed_dev *cdev)
2364 {
2365 kfree(cdev->ll2);
2366 cdev->ll2 = NULL;
2367 }