]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_ll2.c
tlan: avoid unused label with PCI=n
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_ll2.c
CommitLineData
0a7fb11c
YM
1/* QLogic qed NIC Driver
2 *
3 * Copyright (c) 2015 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include <linux/types.h>
11#include <asm/byteorder.h>
12#include <linux/dma-mapping.h>
13#include <linux/if_vlan.h>
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/slab.h>
17#include <linux/stddef.h>
18#include <linux/version.h>
19#include <linux/workqueue.h>
20#include <net/ipv6.h>
21#include <linux/bitops.h>
22#include <linux/delay.h>
23#include <linux/errno.h>
24#include <linux/etherdevice.h>
25#include <linux/io.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/qed/qed_ll2_if.h>
31#include "qed.h"
32#include "qed_cxt.h"
33#include "qed_dev_api.h"
34#include "qed_hsi.h"
35#include "qed_hw.h"
36#include "qed_int.h"
37#include "qed_ll2.h"
38#include "qed_mcp.h"
39#include "qed_reg_addr.h"
40#include "qed_sp.h"
41
42#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
43#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
44
45#define QED_LL2_TX_SIZE (256)
46#define QED_LL2_RX_SIZE (4096)
47
48struct qed_cb_ll2_info {
49 int rx_cnt;
50 u32 rx_size;
51 u8 handle;
52 bool frags_mapped;
53
54 /* Lock protecting LL2 buffer lists in sleepless context */
55 spinlock_t lock;
56 struct list_head list;
57
58 const struct qed_ll2_cb_ops *cbs;
59 void *cb_cookie;
60};
61
62struct qed_ll2_buffer {
63 struct list_head list;
64 void *data;
65 dma_addr_t phys_addr;
66};
67
68static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
69 u8 connection_handle,
70 void *cookie,
71 dma_addr_t first_frag_addr,
72 bool b_last_fragment,
73 bool b_last_packet)
74{
75 struct qed_dev *cdev = p_hwfn->cdev;
76 struct sk_buff *skb = cookie;
77
78 /* All we need to do is release the mapping */
79 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
80 skb_headlen(skb), DMA_TO_DEVICE);
81
82 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
83 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
84 b_last_fragment);
85
86 if (cdev->ll2->frags_mapped)
87 /* Case where mapped frags were received, need to
88 * free skb with nr_frags marked as 0
89 */
90 skb_shinfo(skb)->nr_frags = 0;
91
92 dev_kfree_skb_any(skb);
93}
94
95static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
96 u8 **data, dma_addr_t *phys_addr)
97{
98 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
99 if (!(*data)) {
100 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
101 return -ENOMEM;
102 }
103
104 *phys_addr = dma_map_single(&cdev->pdev->dev,
105 ((*data) + NET_SKB_PAD),
106 cdev->ll2->rx_size, DMA_FROM_DEVICE);
107 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
108 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
109 kfree((*data));
110 return -ENOMEM;
111 }
112
113 return 0;
114}
115
116static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
117 struct qed_ll2_buffer *buffer)
118{
119 spin_lock_bh(&cdev->ll2->lock);
120
121 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
122 cdev->ll2->rx_size, DMA_FROM_DEVICE);
123 kfree(buffer->data);
124 list_del(&buffer->list);
125
126 cdev->ll2->rx_cnt--;
127 if (!cdev->ll2->rx_cnt)
128 DP_INFO(cdev, "All LL2 entries were removed\n");
129
130 spin_unlock_bh(&cdev->ll2->lock);
131
132 return 0;
133}
134
135static void qed_ll2_kill_buffers(struct qed_dev *cdev)
136{
137 struct qed_ll2_buffer *buffer, *tmp_buffer;
138
139 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
140 qed_ll2_dealloc_buffer(cdev, buffer);
141}
142
143void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
144 u8 connection_handle,
145 struct qed_ll2_rx_packet *p_pkt,
146 struct core_rx_fast_path_cqe *p_cqe,
147 bool b_last_packet)
148{
149 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
150 struct qed_ll2_buffer *buffer = p_pkt->cookie;
151 struct qed_dev *cdev = p_hwfn->cdev;
152 u16 vlan = le16_to_cpu(p_cqe->vlan);
153 u32 opaque_data_0, opaque_data_1;
154 u8 pad = p_cqe->placement_offset;
155 dma_addr_t new_phys_addr;
156 struct sk_buff *skb;
157 bool reuse = false;
158 int rc = -EINVAL;
159 u8 *new_data;
160
161 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
162 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
163
164 DP_VERBOSE(p_hwfn,
165 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
166 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
167 (u64)p_pkt->rx_buf_addr, pad, packet_length,
168 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
169 opaque_data_0, opaque_data_1);
170
171 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
172 print_hex_dump(KERN_INFO, "",
173 DUMP_PREFIX_OFFSET, 16, 1,
174 buffer->data, packet_length, false);
175 }
176
177 /* Determine if data is valid */
178 if (packet_length < ETH_HLEN)
179 reuse = true;
180
181 /* Allocate a replacement for buffer; Reuse upon failure */
182 if (!reuse)
183 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
184 &new_phys_addr);
185
186 /* If need to reuse or there's no replacement buffer, repost this */
187 if (rc)
188 goto out_post;
189
190 skb = build_skb(buffer->data, 0);
191 if (!skb) {
192 rc = -ENOMEM;
193 goto out_post;
194 }
195
196 pad += NET_SKB_PAD;
197 skb_reserve(skb, pad);
198 skb_put(skb, packet_length);
199 skb_checksum_none_assert(skb);
200
201 /* Get parital ethernet information instead of eth_type_trans(),
202 * Since we don't have an associated net_device.
203 */
204 skb_reset_mac_header(skb);
205 skb->protocol = eth_hdr(skb)->h_proto;
206
207 /* Pass SKB onward */
208 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
209 if (vlan)
210 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
211 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
212 opaque_data_0, opaque_data_1);
213 }
214
215 /* Update Buffer information and update FW producer */
216 buffer->data = new_data;
217 buffer->phys_addr = new_phys_addr;
218
219out_post:
220 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
221 buffer->phys_addr, 0, buffer, 1);
222
223 if (rc)
224 qed_ll2_dealloc_buffer(cdev, buffer);
225}
226
227static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
228 u8 connection_handle,
229 bool b_lock,
230 bool b_only_active)
231{
232 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
233
234 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
235 return NULL;
236
237 if (!p_hwfn->p_ll2_info)
238 return NULL;
239
240 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
241
242 if (b_only_active) {
243 if (b_lock)
244 mutex_lock(&p_ll2_conn->mutex);
245 if (p_ll2_conn->b_active)
246 p_ret = p_ll2_conn;
247 if (b_lock)
248 mutex_unlock(&p_ll2_conn->mutex);
249 } else {
250 p_ret = p_ll2_conn;
251 }
252
253 return p_ret;
254}
255
256static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
257 u8 connection_handle)
258{
259 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
260}
261
262static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
263 u8 connection_handle)
264{
265 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
266}
267
268static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
269 *p_hwfn,
270 u8 connection_handle)
271{
272 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
273}
274
275static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
276{
277 bool b_last_packet = false, b_last_frag = false;
278 struct qed_ll2_tx_packet *p_pkt = NULL;
279 struct qed_ll2_info *p_ll2_conn;
280 struct qed_ll2_tx_queue *p_tx;
abd49676 281 dma_addr_t tx_frag;
0a7fb11c
YM
282
283 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
284 if (!p_ll2_conn)
285 return;
286
287 p_tx = &p_ll2_conn->tx_queue;
288
289 while (!list_empty(&p_tx->active_descq)) {
290 p_pkt = list_first_entry(&p_tx->active_descq,
291 struct qed_ll2_tx_packet, list_entry);
292 if (!p_pkt)
293 break;
294
295 list_del(&p_pkt->list_entry);
296 b_last_packet = list_empty(&p_tx->active_descq);
297 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
298 p_tx->cur_completing_packet = *p_pkt;
299 p_tx->cur_completing_bd_idx = 1;
300 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
abd49676
RA
301 tx_frag = p_pkt->bds_set[0].tx_frag;
302 if (p_ll2_conn->gsi_enable)
303 qed_ll2b_release_tx_gsi_packet(p_hwfn,
304 p_ll2_conn->my_id,
305 p_pkt->cookie,
306 tx_frag,
307 b_last_frag,
308 b_last_packet);
309 else
310 qed_ll2b_complete_tx_packet(p_hwfn,
311 p_ll2_conn->my_id,
312 p_pkt->cookie,
313 tx_frag,
314 b_last_frag,
315 b_last_packet);
0a7fb11c 316
0a7fb11c
YM
317 }
318}
319
320static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
321{
322 struct qed_ll2_info *p_ll2_conn = p_cookie;
323 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
324 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
325 struct qed_ll2_tx_packet *p_pkt;
326 bool b_last_frag = false;
327 unsigned long flags;
abd49676 328 dma_addr_t tx_frag;
0a7fb11c
YM
329 int rc = -EINVAL;
330
331 spin_lock_irqsave(&p_tx->lock, flags);
332 if (p_tx->b_completing_packet) {
333 rc = -EBUSY;
334 goto out;
335 }
336
337 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
338 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
339 while (num_bds) {
340 if (list_empty(&p_tx->active_descq))
341 goto out;
342
343 p_pkt = list_first_entry(&p_tx->active_descq,
344 struct qed_ll2_tx_packet, list_entry);
345 if (!p_pkt)
346 goto out;
347
348 p_tx->b_completing_packet = true;
349 p_tx->cur_completing_packet = *p_pkt;
350 num_bds_in_packet = p_pkt->bd_used;
351 list_del(&p_pkt->list_entry);
352
353 if (num_bds < num_bds_in_packet) {
354 DP_NOTICE(p_hwfn,
355 "Rest of BDs does not cover whole packet\n");
356 goto out;
357 }
358
359 num_bds -= num_bds_in_packet;
360 p_tx->bds_idx += num_bds_in_packet;
361 while (num_bds_in_packet--)
362 qed_chain_consume(&p_tx->txq_chain);
363
364 p_tx->cur_completing_bd_idx = 1;
365 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
366 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
367
368 spin_unlock_irqrestore(&p_tx->lock, flags);
abd49676
RA
369 tx_frag = p_pkt->bds_set[0].tx_frag;
370 if (p_ll2_conn->gsi_enable)
371 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
372 p_ll2_conn->my_id,
373 p_pkt->cookie,
374 tx_frag,
375 b_last_frag, !num_bds);
376 else
377 qed_ll2b_complete_tx_packet(p_hwfn,
378 p_ll2_conn->my_id,
379 p_pkt->cookie,
380 tx_frag,
381 b_last_frag, !num_bds);
0a7fb11c
YM
382 spin_lock_irqsave(&p_tx->lock, flags);
383 }
384
385 p_tx->b_completing_packet = false;
386 rc = 0;
387out:
388 spin_unlock_irqrestore(&p_tx->lock, flags);
389 return rc;
390}
391
abd49676
RA
392static int
393qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
394 struct qed_ll2_info *p_ll2_info,
395 union core_rx_cqe_union *p_cqe,
396 unsigned long lock_flags, bool b_last_cqe)
397{
398 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
399 struct qed_ll2_rx_packet *p_pkt = NULL;
400 u16 packet_length, parse_flags, vlan;
401 u32 src_mac_addrhi;
402 u16 src_mac_addrlo;
403
404 if (!list_empty(&p_rx->active_descq))
405 p_pkt = list_first_entry(&p_rx->active_descq,
406 struct qed_ll2_rx_packet, list_entry);
407 if (!p_pkt) {
408 DP_NOTICE(p_hwfn,
409 "GSI Rx completion but active_descq is empty\n");
410 return -EIO;
411 }
412
413 list_del(&p_pkt->list_entry);
414 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
415 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
416 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
417 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
418 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
419 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
420 DP_NOTICE(p_hwfn,
421 "Mismatch between active_descq and the LL2 Rx chain\n");
422 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
423
424 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
425 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
426 p_ll2_info->my_id,
427 p_pkt->cookie,
428 p_pkt->rx_buf_addr,
429 packet_length,
430 p_cqe->rx_cqe_gsi.data_length_error,
431 parse_flags,
432 vlan,
433 src_mac_addrhi,
434 src_mac_addrlo, b_last_cqe);
435 spin_lock_irqsave(&p_rx->lock, lock_flags);
436
437 return 0;
438}
439
0a7fb11c
YM
440static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
441 struct qed_ll2_info *p_ll2_conn,
442 union core_rx_cqe_union *p_cqe,
443 unsigned long lock_flags,
444 bool b_last_cqe)
445{
446 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
447 struct qed_ll2_rx_packet *p_pkt = NULL;
448
449 if (!list_empty(&p_rx->active_descq))
450 p_pkt = list_first_entry(&p_rx->active_descq,
451 struct qed_ll2_rx_packet, list_entry);
452 if (!p_pkt) {
453 DP_NOTICE(p_hwfn,
454 "LL2 Rx completion but active_descq is empty\n");
455 return -EIO;
456 }
457 list_del(&p_pkt->list_entry);
458
459 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
460 DP_NOTICE(p_hwfn,
461 "Mismatch between active_descq and the LL2 Rx chain\n");
462 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
463
464 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
465 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
466 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
467 spin_lock_irqsave(&p_rx->lock, lock_flags);
468
469 return 0;
470}
471
472static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
473{
474 struct qed_ll2_info *p_ll2_conn = cookie;
475 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
476 union core_rx_cqe_union *cqe = NULL;
477 u16 cq_new_idx = 0, cq_old_idx = 0;
478 unsigned long flags = 0;
479 int rc = 0;
480
481 spin_lock_irqsave(&p_rx->lock, flags);
482 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
483 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
484
485 while (cq_new_idx != cq_old_idx) {
486 bool b_last_cqe = (cq_new_idx == cq_old_idx);
487
488 cqe = qed_chain_consume(&p_rx->rcq_chain);
489 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
490
491 DP_VERBOSE(p_hwfn,
492 QED_MSG_LL2,
493 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
494 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
495
496 switch (cqe->rx_cqe_sp.type) {
497 case CORE_RX_CQE_TYPE_SLOW_PATH:
498 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
499 rc = -EINVAL;
500 break;
abd49676
RA
501 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
502 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
503 cqe, flags, b_last_cqe);
504 break;
0a7fb11c
YM
505 case CORE_RX_CQE_TYPE_REGULAR:
506 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
507 cqe, flags, b_last_cqe);
508 break;
509 default:
510 rc = -EIO;
511 }
512 }
513
514 spin_unlock_irqrestore(&p_rx->lock, flags);
515 return rc;
516}
517
518void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
519{
520 struct qed_ll2_info *p_ll2_conn = NULL;
521 struct qed_ll2_rx_packet *p_pkt = NULL;
522 struct qed_ll2_rx_queue *p_rx;
523
524 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
525 if (!p_ll2_conn)
526 return;
527
528 p_rx = &p_ll2_conn->rx_queue;
529
530 while (!list_empty(&p_rx->active_descq)) {
531 dma_addr_t rx_buf_addr;
532 void *cookie;
533 bool b_last;
534
535 p_pkt = list_first_entry(&p_rx->active_descq,
536 struct qed_ll2_rx_packet, list_entry);
537 if (!p_pkt)
538 break;
539
540 list_del(&p_pkt->list_entry);
541 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
542
543 rx_buf_addr = p_pkt->rx_buf_addr;
544 cookie = p_pkt->cookie;
545
546 b_last = list_empty(&p_rx->active_descq);
547 }
548}
549
550static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
551 struct qed_ll2_info *p_ll2_conn,
552 u8 action_on_error)
553{
554 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
555 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
556 struct core_rx_start_ramrod_data *p_ramrod = NULL;
557 struct qed_spq_entry *p_ent = NULL;
558 struct qed_sp_init_data init_data;
559 u16 cqe_pbl_size;
560 int rc = 0;
561
562 /* Get SPQ entry */
563 memset(&init_data, 0, sizeof(init_data));
564 init_data.cid = p_ll2_conn->cid;
565 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
566 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
567
568 rc = qed_sp_init_request(p_hwfn, &p_ent,
569 CORE_RAMROD_RX_QUEUE_START,
570 PROTOCOLID_CORE, &init_data);
571 if (rc)
572 return rc;
573
574 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
575
576 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
577 p_ramrod->sb_index = p_rx->rx_sb_index;
578 p_ramrod->complete_event_flg = 1;
579
580 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
581 DMA_REGPAIR_LE(p_ramrod->bd_base,
582 p_rx->rxq_chain.p_phys_addr);
583 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
584 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
585 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
586 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
587
588 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
589 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
590 p_ramrod->queue_id = p_ll2_conn->queue_id;
591 p_ramrod->main_func_queue = 1;
592
593 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
594 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
595 p_ramrod->mf_si_bcast_accept_all = 1;
596 p_ramrod->mf_si_mcast_accept_all = 1;
597 } else {
598 p_ramrod->mf_si_bcast_accept_all = 0;
599 p_ramrod->mf_si_mcast_accept_all = 0;
600 }
601
602 p_ramrod->action_on_error.error_type = action_on_error;
abd49676 603 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
0a7fb11c
YM
604 return qed_spq_post(p_hwfn, p_ent, NULL);
605}
606
607static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
608 struct qed_ll2_info *p_ll2_conn)
609{
610 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
611 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
612 struct core_tx_start_ramrod_data *p_ramrod = NULL;
613 struct qed_spq_entry *p_ent = NULL;
614 struct qed_sp_init_data init_data;
615 union qed_qm_pq_params pq_params;
616 u16 pq_id = 0, pbl_size;
617 int rc = -EINVAL;
618
619 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
620 return 0;
621
622 /* Get SPQ entry */
623 memset(&init_data, 0, sizeof(init_data));
624 init_data.cid = p_ll2_conn->cid;
625 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
626 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
627
628 rc = qed_sp_init_request(p_hwfn, &p_ent,
629 CORE_RAMROD_TX_QUEUE_START,
630 PROTOCOLID_CORE, &init_data);
631 if (rc)
632 return rc;
633
634 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
635
636 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
637 p_ramrod->sb_index = p_tx->tx_sb_index;
638 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
639 p_ll2_conn->tx_stats_en = 1;
640 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
641 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
642
643 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
644 qed_chain_get_pbl_phys(&p_tx->txq_chain));
645 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
646 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
647
648 memset(&pq_params, 0, sizeof(pq_params));
649 pq_params.core.tc = p_ll2_conn->tx_tc;
650 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
651 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
652
653 switch (conn_type) {
654 case QED_LL2_TYPE_ISCSI:
655 case QED_LL2_TYPE_ISCSI_OOO:
656 p_ramrod->conn_type = PROTOCOLID_ISCSI;
657 break;
658 case QED_LL2_TYPE_ROCE:
659 p_ramrod->conn_type = PROTOCOLID_ROCE;
660 break;
661 default:
662 p_ramrod->conn_type = PROTOCOLID_ETH;
663 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
664 }
665
abd49676 666 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
0a7fb11c
YM
667 return qed_spq_post(p_hwfn, p_ent, NULL);
668}
669
670static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
671 struct qed_ll2_info *p_ll2_conn)
672{
673 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
674 struct qed_spq_entry *p_ent = NULL;
675 struct qed_sp_init_data init_data;
676 int rc = -EINVAL;
677
678 /* Get SPQ entry */
679 memset(&init_data, 0, sizeof(init_data));
680 init_data.cid = p_ll2_conn->cid;
681 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
682 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
683
684 rc = qed_sp_init_request(p_hwfn, &p_ent,
685 CORE_RAMROD_RX_QUEUE_STOP,
686 PROTOCOLID_CORE, &init_data);
687 if (rc)
688 return rc;
689
690 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
691
692 p_ramrod->complete_event_flg = 1;
693 p_ramrod->queue_id = p_ll2_conn->queue_id;
694
695 return qed_spq_post(p_hwfn, p_ent, NULL);
696}
697
698static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
699 struct qed_ll2_info *p_ll2_conn)
700{
701 struct qed_spq_entry *p_ent = NULL;
702 struct qed_sp_init_data init_data;
703 int rc = -EINVAL;
704
705 /* Get SPQ entry */
706 memset(&init_data, 0, sizeof(init_data));
707 init_data.cid = p_ll2_conn->cid;
708 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
709 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
710
711 rc = qed_sp_init_request(p_hwfn, &p_ent,
712 CORE_RAMROD_TX_QUEUE_STOP,
713 PROTOCOLID_CORE, &init_data);
714 if (rc)
715 return rc;
716
717 return qed_spq_post(p_hwfn, p_ent, NULL);
718}
719
720static int
721qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
722 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
723{
724 struct qed_ll2_rx_packet *p_descq;
725 u32 capacity;
726 int rc = 0;
727
728 if (!rx_num_desc)
729 goto out;
730
731 rc = qed_chain_alloc(p_hwfn->cdev,
732 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
733 QED_CHAIN_MODE_NEXT_PTR,
734 QED_CHAIN_CNT_TYPE_U16,
735 rx_num_desc,
736 sizeof(struct core_rx_bd),
737 &p_ll2_info->rx_queue.rxq_chain);
738 if (rc) {
739 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
740 goto out;
741 }
742
743 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
744 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
745 GFP_KERNEL);
746 if (!p_descq) {
747 rc = -ENOMEM;
748 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
749 goto out;
750 }
751 p_ll2_info->rx_queue.descq_array = p_descq;
752
753 rc = qed_chain_alloc(p_hwfn->cdev,
754 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
755 QED_CHAIN_MODE_PBL,
756 QED_CHAIN_CNT_TYPE_U16,
757 rx_num_desc,
758 sizeof(struct core_rx_fast_path_cqe),
759 &p_ll2_info->rx_queue.rcq_chain);
760 if (rc) {
761 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
762 goto out;
763 }
764
765 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
766 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
767 p_ll2_info->conn_type, rx_num_desc);
768
769out:
770 return rc;
771}
772
773static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
774 struct qed_ll2_info *p_ll2_info,
775 u16 tx_num_desc)
776{
777 struct qed_ll2_tx_packet *p_descq;
778 u32 capacity;
779 int rc = 0;
780
781 if (!tx_num_desc)
782 goto out;
783
784 rc = qed_chain_alloc(p_hwfn->cdev,
785 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
786 QED_CHAIN_MODE_PBL,
787 QED_CHAIN_CNT_TYPE_U16,
788 tx_num_desc,
789 sizeof(struct core_tx_bd),
790 &p_ll2_info->tx_queue.txq_chain);
791 if (rc)
792 goto out;
793
794 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
795 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
796 GFP_KERNEL);
797 if (!p_descq) {
798 rc = -ENOMEM;
799 goto out;
800 }
801 p_ll2_info->tx_queue.descq_array = p_descq;
802
803 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
804 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
805 p_ll2_info->conn_type, tx_num_desc);
806
807out:
808 if (rc)
809 DP_NOTICE(p_hwfn,
810 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
811 tx_num_desc);
812 return rc;
813}
814
815int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
816 struct qed_ll2_info *p_params,
817 u16 rx_num_desc,
818 u16 tx_num_desc,
819 u8 *p_connection_handle)
820{
821 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
822 struct qed_ll2_info *p_ll2_info = NULL;
823 int rc;
824 u8 i;
825
826 if (!p_connection_handle || !p_hwfn->p_ll2_info)
827 return -EINVAL;
828
829 /* Find a free connection to be used */
830 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
831 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
832 if (p_hwfn->p_ll2_info[i].b_active) {
833 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
834 continue;
835 }
836
837 p_hwfn->p_ll2_info[i].b_active = true;
838 p_ll2_info = &p_hwfn->p_ll2_info[i];
839 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
840 break;
841 }
842 if (!p_ll2_info)
843 return -EBUSY;
844
845 p_ll2_info->conn_type = p_params->conn_type;
846 p_ll2_info->mtu = p_params->mtu;
847 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
848 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
849 p_ll2_info->tx_tc = p_params->tx_tc;
850 p_ll2_info->tx_dest = p_params->tx_dest;
851 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
852 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
abd49676 853 p_ll2_info->gsi_enable = p_params->gsi_enable;
0a7fb11c
YM
854
855 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
856 if (rc)
857 goto q_allocate_fail;
858
859 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
860 if (rc)
861 goto q_allocate_fail;
862
863 /* Register callbacks for the Rx/Tx queues */
864 comp_rx_cb = qed_ll2_rxq_completion;
865 comp_tx_cb = qed_ll2_txq_completion;
866
867 if (rx_num_desc) {
868 qed_int_register_cb(p_hwfn, comp_rx_cb,
869 &p_hwfn->p_ll2_info[i],
870 &p_ll2_info->rx_queue.rx_sb_index,
871 &p_ll2_info->rx_queue.p_fw_cons);
872 p_ll2_info->rx_queue.b_cb_registred = true;
873 }
874
875 if (tx_num_desc) {
876 qed_int_register_cb(p_hwfn,
877 comp_tx_cb,
878 &p_hwfn->p_ll2_info[i],
879 &p_ll2_info->tx_queue.tx_sb_index,
880 &p_ll2_info->tx_queue.p_fw_cons);
881 p_ll2_info->tx_queue.b_cb_registred = true;
882 }
883
884 *p_connection_handle = i;
885 return rc;
886
887q_allocate_fail:
888 qed_ll2_release_connection(p_hwfn, i);
889 return -ENOMEM;
890}
891
892static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
893 struct qed_ll2_info *p_ll2_conn)
894{
895 u8 action_on_error = 0;
896
897 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
898 return 0;
899
900 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
901
902 SET_FIELD(action_on_error,
903 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
904 p_ll2_conn->ai_err_packet_too_big);
905 SET_FIELD(action_on_error,
906 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
907
908 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
909}
910
911int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
912{
913 struct qed_ll2_info *p_ll2_conn;
914 struct qed_ll2_rx_queue *p_rx;
915 struct qed_ll2_tx_queue *p_tx;
916 int rc = -EINVAL;
917 u32 i, capacity;
918 u8 qid;
919
920 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
921 if (!p_ll2_conn)
922 return -EINVAL;
923 p_rx = &p_ll2_conn->rx_queue;
924 p_tx = &p_ll2_conn->tx_queue;
925
926 qed_chain_reset(&p_rx->rxq_chain);
927 qed_chain_reset(&p_rx->rcq_chain);
928 INIT_LIST_HEAD(&p_rx->active_descq);
929 INIT_LIST_HEAD(&p_rx->free_descq);
930 INIT_LIST_HEAD(&p_rx->posting_descq);
931 spin_lock_init(&p_rx->lock);
932 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
933 for (i = 0; i < capacity; i++)
934 list_add_tail(&p_rx->descq_array[i].list_entry,
935 &p_rx->free_descq);
936 *p_rx->p_fw_cons = 0;
937
938 qed_chain_reset(&p_tx->txq_chain);
939 INIT_LIST_HEAD(&p_tx->active_descq);
940 INIT_LIST_HEAD(&p_tx->free_descq);
941 INIT_LIST_HEAD(&p_tx->sending_descq);
942 spin_lock_init(&p_tx->lock);
943 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
944 for (i = 0; i < capacity; i++)
945 list_add_tail(&p_tx->descq_array[i].list_entry,
946 &p_tx->free_descq);
947 p_tx->cur_completing_bd_idx = 0;
948 p_tx->bds_idx = 0;
949 p_tx->b_completing_packet = false;
950 p_tx->cur_send_packet = NULL;
951 p_tx->cur_send_frag_num = 0;
952 p_tx->cur_completing_frag_num = 0;
953 *p_tx->p_fw_cons = 0;
954
955 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
956
957 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
958 p_ll2_conn->queue_id = qid;
959 p_ll2_conn->tx_stats_id = qid;
960 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
961 GTT_BAR0_MAP_REG_TSDM_RAM +
962 TSTORM_LL2_RX_PRODS_OFFSET(qid);
963 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
964 qed_db_addr(p_ll2_conn->cid,
965 DQ_DEMS_LEGACY);
966
967 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
968 if (rc)
969 return rc;
970
971 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
972 if (rc)
973 return rc;
974
975 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
976 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
977
978 return rc;
979}
980
981static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
982 struct qed_ll2_rx_queue *p_rx,
983 struct qed_ll2_rx_packet *p_curp)
984{
985 struct qed_ll2_rx_packet *p_posting_packet = NULL;
986 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
987 bool b_notify_fw = false;
988 u16 bd_prod, cq_prod;
989
990 /* This handles the flushing of already posted buffers */
991 while (!list_empty(&p_rx->posting_descq)) {
992 p_posting_packet = list_first_entry(&p_rx->posting_descq,
993 struct qed_ll2_rx_packet,
994 list_entry);
995 list_del(&p_posting_packet->list_entry);
996 list_add_tail(&p_posting_packet->list_entry,
997 &p_rx->active_descq);
998 b_notify_fw = true;
999 }
1000
1001 /* This handles the supplied packet [if there is one] */
1002 if (p_curp) {
1003 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1004 b_notify_fw = true;
1005 }
1006
1007 if (!b_notify_fw)
1008 return;
1009
1010 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1011 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1012 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1013 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1014 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1015}
1016
1017int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1018 u8 connection_handle,
1019 dma_addr_t addr,
1020 u16 buf_len, void *cookie, u8 notify_fw)
1021{
1022 struct core_rx_bd_with_buff_len *p_curb = NULL;
1023 struct qed_ll2_rx_packet *p_curp = NULL;
1024 struct qed_ll2_info *p_ll2_conn;
1025 struct qed_ll2_rx_queue *p_rx;
1026 unsigned long flags;
1027 void *p_data;
1028 int rc = 0;
1029
1030 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1031 if (!p_ll2_conn)
1032 return -EINVAL;
1033 p_rx = &p_ll2_conn->rx_queue;
1034
1035 spin_lock_irqsave(&p_rx->lock, flags);
1036 if (!list_empty(&p_rx->free_descq))
1037 p_curp = list_first_entry(&p_rx->free_descq,
1038 struct qed_ll2_rx_packet, list_entry);
1039 if (p_curp) {
1040 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1041 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1042 p_data = qed_chain_produce(&p_rx->rxq_chain);
1043 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1044 qed_chain_produce(&p_rx->rcq_chain);
1045 }
1046 }
1047
1048 /* If we're lacking entires, let's try to flush buffers to FW */
1049 if (!p_curp || !p_curb) {
1050 rc = -EBUSY;
1051 p_curp = NULL;
1052 goto out_notify;
1053 }
1054
1055 /* We have an Rx packet we can fill */
1056 DMA_REGPAIR_LE(p_curb->addr, addr);
1057 p_curb->buff_length = cpu_to_le16(buf_len);
1058 p_curp->rx_buf_addr = addr;
1059 p_curp->cookie = cookie;
1060 p_curp->rxq_bd = p_curb;
1061 p_curp->buf_length = buf_len;
1062 list_del(&p_curp->list_entry);
1063
1064 /* Check if we only want to enqueue this packet without informing FW */
1065 if (!notify_fw) {
1066 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1067 goto out;
1068 }
1069
1070out_notify:
1071 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1072out:
1073 spin_unlock_irqrestore(&p_rx->lock, flags);
1074 return rc;
1075}
1076
1077static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1078 struct qed_ll2_tx_queue *p_tx,
1079 struct qed_ll2_tx_packet *p_curp,
1080 u8 num_of_bds,
1081 dma_addr_t first_frag,
1082 u16 first_frag_len, void *p_cookie,
1083 u8 notify_fw)
1084{
1085 list_del(&p_curp->list_entry);
1086 p_curp->cookie = p_cookie;
1087 p_curp->bd_used = num_of_bds;
1088 p_curp->notify_fw = notify_fw;
1089 p_tx->cur_send_packet = p_curp;
1090 p_tx->cur_send_frag_num = 0;
1091
1092 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1093 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1094 p_tx->cur_send_frag_num++;
1095}
1096
1097static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1098 struct qed_ll2_info *p_ll2,
1099 struct qed_ll2_tx_packet *p_curp,
1100 u8 num_of_bds,
1101 enum core_tx_dest tx_dest,
1102 u16 vlan,
1103 u8 bd_flags,
1104 u16 l4_hdr_offset_w,
abd49676 1105 enum core_roce_flavor_type type,
0a7fb11c
YM
1106 dma_addr_t first_frag,
1107 u16 first_frag_len)
1108{
1109 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1110 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1111 struct core_tx_bd *start_bd = NULL;
1112 u16 frag_idx;
1113
1114 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1115 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1116 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1117 cpu_to_le16(l4_hdr_offset_w));
1118 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1119 start_bd->bd_flags.as_bitfield = bd_flags;
1120 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1121 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1122 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1123 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1124 start_bd->nbytes = cpu_to_le16(first_frag_len);
1125
abd49676
RA
1126 SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
1127 type);
1128
0a7fb11c
YM
1129 DP_VERBOSE(p_hwfn,
1130 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1131 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1132 p_ll2->queue_id,
1133 p_ll2->cid,
1134 p_ll2->conn_type,
1135 prod_idx,
1136 first_frag_len,
1137 num_of_bds,
1138 le32_to_cpu(start_bd->addr.hi),
1139 le32_to_cpu(start_bd->addr.lo));
1140
1141 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1142 return;
1143
1144 /* Need to provide the packet with additional BDs for frags */
1145 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1146 frag_idx < num_of_bds; frag_idx++) {
1147 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1148
1149 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1150 (*p_bd)->bd_flags.as_bitfield = 0;
1151 (*p_bd)->bitfield1 = 0;
1152 (*p_bd)->bitfield0 = 0;
1153 p_curp->bds_set[frag_idx].tx_frag = 0;
1154 p_curp->bds_set[frag_idx].frag_len = 0;
1155 }
1156}
1157
1158/* This should be called while the Txq spinlock is being held */
1159static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1160 struct qed_ll2_info *p_ll2_conn)
1161{
1162 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1163 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1164 struct qed_ll2_tx_packet *p_pkt = NULL;
1165 struct core_db_data db_msg = { 0, 0, 0 };
1166 u16 bd_prod;
1167
1168 /* If there are missing BDs, don't do anything now */
1169 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1170 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1171 return;
1172
1173 /* Push the current packet to the list and clean after it */
1174 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1175 &p_ll2_conn->tx_queue.sending_descq);
1176 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1177 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1178
1179 /* Notify FW of packet only if requested to */
1180 if (!b_notify)
1181 return;
1182
1183 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1184
1185 while (!list_empty(&p_tx->sending_descq)) {
1186 p_pkt = list_first_entry(&p_tx->sending_descq,
1187 struct qed_ll2_tx_packet, list_entry);
1188 if (!p_pkt)
1189 break;
1190
1191 list_del(&p_pkt->list_entry);
1192 list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
1193 }
1194
1195 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1196 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1197 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1198 DQ_XCM_CORE_TX_BD_PROD_CMD);
1199 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1200 db_msg.spq_prod = cpu_to_le16(bd_prod);
1201
1202 /* Make sure the BDs data is updated before ringing the doorbell */
1203 wmb();
1204
1205 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1206
1207 DP_VERBOSE(p_hwfn,
1208 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1209 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1210 p_ll2_conn->queue_id,
1211 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
1212}
1213
1214int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1215 u8 connection_handle,
1216 u8 num_of_bds,
1217 u16 vlan,
1218 u8 bd_flags,
1219 u16 l4_hdr_offset_w,
abd49676 1220 enum qed_ll2_roce_flavor_type qed_roce_flavor,
0a7fb11c
YM
1221 dma_addr_t first_frag,
1222 u16 first_frag_len, void *cookie, u8 notify_fw)
1223{
1224 struct qed_ll2_tx_packet *p_curp = NULL;
1225 struct qed_ll2_info *p_ll2_conn = NULL;
abd49676 1226 enum core_roce_flavor_type roce_flavor;
0a7fb11c
YM
1227 struct qed_ll2_tx_queue *p_tx;
1228 struct qed_chain *p_tx_chain;
1229 unsigned long flags;
1230 int rc = 0;
1231
1232 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1233 if (!p_ll2_conn)
1234 return -EINVAL;
1235 p_tx = &p_ll2_conn->tx_queue;
1236 p_tx_chain = &p_tx->txq_chain;
1237
1238 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1239 return -EIO;
1240
1241 spin_lock_irqsave(&p_tx->lock, flags);
1242 if (p_tx->cur_send_packet) {
1243 rc = -EEXIST;
1244 goto out;
1245 }
1246
1247 /* Get entry, but only if we have tx elements for it */
1248 if (!list_empty(&p_tx->free_descq))
1249 p_curp = list_first_entry(&p_tx->free_descq,
1250 struct qed_ll2_tx_packet, list_entry);
1251 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1252 p_curp = NULL;
1253
1254 if (!p_curp) {
1255 rc = -EBUSY;
1256 goto out;
1257 }
1258
abd49676
RA
1259 if (qed_roce_flavor == QED_LL2_ROCE) {
1260 roce_flavor = CORE_ROCE;
1261 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1262 roce_flavor = CORE_RROCE;
1263 } else {
1264 rc = -EINVAL;
1265 goto out;
1266 }
1267
0a7fb11c
YM
1268 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1269 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1270 num_of_bds, first_frag,
1271 first_frag_len, cookie, notify_fw);
1272 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1273 num_of_bds, CORE_TX_DEST_NW,
1274 vlan, bd_flags, l4_hdr_offset_w,
abd49676 1275 roce_flavor,
0a7fb11c
YM
1276 first_frag, first_frag_len);
1277
1278 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1279
1280out:
1281 spin_unlock_irqrestore(&p_tx->lock, flags);
1282 return rc;
1283}
1284
1285int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1286 u8 connection_handle,
1287 dma_addr_t addr, u16 nbytes)
1288{
1289 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1290 struct qed_ll2_info *p_ll2_conn = NULL;
1291 u16 cur_send_frag_num = 0;
1292 struct core_tx_bd *p_bd;
1293 unsigned long flags;
1294
1295 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1296 if (!p_ll2_conn)
1297 return -EINVAL;
1298
1299 if (!p_ll2_conn->tx_queue.cur_send_packet)
1300 return -EINVAL;
1301
1302 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1303 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1304
1305 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1306 return -EINVAL;
1307
1308 /* Fill the BD information, and possibly notify FW */
1309 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1310 DMA_REGPAIR_LE(p_bd->addr, addr);
1311 p_bd->nbytes = cpu_to_le16(nbytes);
1312 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1313 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1314
1315 p_ll2_conn->tx_queue.cur_send_frag_num++;
1316
1317 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1318 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1319 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1320
1321 return 0;
1322}
1323
1324int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1325{
1326 struct qed_ll2_info *p_ll2_conn = NULL;
1327 int rc = -EINVAL;
1328
1329 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1330 if (!p_ll2_conn)
1331 return -EINVAL;
1332
1333 /* Stop Tx & Rx of connection, if needed */
1334 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1335 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1336 if (rc)
1337 return rc;
1338 qed_ll2_txq_flush(p_hwfn, connection_handle);
1339 }
1340
1341 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1342 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1343 if (rc)
1344 return rc;
1345 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1346 }
1347
1348 return rc;
1349}
1350
1351void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1352{
1353 struct qed_ll2_info *p_ll2_conn = NULL;
1354
1355 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1356 if (!p_ll2_conn)
1357 return;
1358
1359 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1360 p_ll2_conn->rx_queue.b_cb_registred = false;
1361 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1362 }
1363
1364 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1365 p_ll2_conn->tx_queue.b_cb_registred = false;
1366 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1367 }
1368
1369 kfree(p_ll2_conn->tx_queue.descq_array);
1370 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1371
1372 kfree(p_ll2_conn->rx_queue.descq_array);
1373 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1374 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1375
1376 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1377
1378 mutex_lock(&p_ll2_conn->mutex);
1379 p_ll2_conn->b_active = false;
1380 mutex_unlock(&p_ll2_conn->mutex);
1381}
1382
1383struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1384{
1385 struct qed_ll2_info *p_ll2_connections;
1386 u8 i;
1387
1388 /* Allocate LL2's set struct */
1389 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1390 sizeof(struct qed_ll2_info), GFP_KERNEL);
1391 if (!p_ll2_connections) {
1392 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1393 return NULL;
1394 }
1395
1396 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1397 p_ll2_connections[i].my_id = i;
1398
1399 return p_ll2_connections;
1400}
1401
1402void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1403 struct qed_ll2_info *p_ll2_connections)
1404{
1405 int i;
1406
1407 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1408 mutex_init(&p_ll2_connections[i].mutex);
1409}
1410
1411void qed_ll2_free(struct qed_hwfn *p_hwfn,
1412 struct qed_ll2_info *p_ll2_connections)
1413{
1414 kfree(p_ll2_connections);
1415}
1416
1417static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1418 struct qed_ptt *p_ptt,
1419 struct qed_ll2_info *p_ll2_conn,
1420 struct qed_ll2_stats *p_stats)
1421{
1422 struct core_ll2_tstorm_per_queue_stat tstats;
1423 u8 qid = p_ll2_conn->queue_id;
1424 u32 tstats_addr;
1425
1426 memset(&tstats, 0, sizeof(tstats));
1427 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1428 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1429 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1430
1431 p_stats->packet_too_big_discard =
1432 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1433 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1434}
1435
1436static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1437 struct qed_ptt *p_ptt,
1438 struct qed_ll2_info *p_ll2_conn,
1439 struct qed_ll2_stats *p_stats)
1440{
1441 struct core_ll2_ustorm_per_queue_stat ustats;
1442 u8 qid = p_ll2_conn->queue_id;
1443 u32 ustats_addr;
1444
1445 memset(&ustats, 0, sizeof(ustats));
1446 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1447 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1448 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1449
1450 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1451 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1452 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1453 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1454 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1455 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1456}
1457
1458static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1459 struct qed_ptt *p_ptt,
1460 struct qed_ll2_info *p_ll2_conn,
1461 struct qed_ll2_stats *p_stats)
1462{
1463 struct core_ll2_pstorm_per_queue_stat pstats;
1464 u8 stats_id = p_ll2_conn->tx_stats_id;
1465 u32 pstats_addr;
1466
1467 memset(&pstats, 0, sizeof(pstats));
1468 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1469 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1470 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1471
1472 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1473 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1474 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1475 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1476 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1477 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1478}
1479
1480int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1481 u8 connection_handle, struct qed_ll2_stats *p_stats)
1482{
1483 struct qed_ll2_info *p_ll2_conn = NULL;
1484 struct qed_ptt *p_ptt;
1485
1486 memset(p_stats, 0, sizeof(*p_stats));
1487
1488 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1489 !p_hwfn->p_ll2_info)
1490 return -EINVAL;
1491
1492 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1493
1494 p_ptt = qed_ptt_acquire(p_hwfn);
1495 if (!p_ptt) {
1496 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1497 return -EINVAL;
1498 }
1499
1500 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1501 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1502 if (p_ll2_conn->tx_stats_en)
1503 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1504
1505 qed_ptt_release(p_hwfn, p_ptt);
1506 return 0;
1507}
1508
1509static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1510 const struct qed_ll2_cb_ops *ops,
1511 void *cookie)
1512{
1513 cdev->ll2->cbs = ops;
1514 cdev->ll2->cb_cookie = cookie;
1515}
1516
1517static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1518{
1519 struct qed_ll2_info ll2_info;
1520 struct qed_ll2_buffer *buffer;
1521 enum qed_ll2_conn_type conn_type;
1522 struct qed_ptt *p_ptt;
1523 int rc, i;
1524
1525 /* Initialize LL2 locks & lists */
1526 INIT_LIST_HEAD(&cdev->ll2->list);
1527 spin_lock_init(&cdev->ll2->lock);
1528 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
1529 L1_CACHE_BYTES + params->mtu;
1530 cdev->ll2->frags_mapped = params->frags_mapped;
1531
1532 /*Allocate memory for LL2 */
1533 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
1534 cdev->ll2->rx_size);
1535 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
1536 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1537 if (!buffer) {
1538 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
1539 goto fail;
1540 }
1541
1542 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
1543 &buffer->phys_addr);
1544 if (rc) {
1545 kfree(buffer);
1546 goto fail;
1547 }
1548
1549 list_add_tail(&buffer->list, &cdev->ll2->list);
1550 }
1551
1552 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
1553 case QED_PCI_ISCSI:
1554 conn_type = QED_LL2_TYPE_ISCSI;
1555 break;
1556 case QED_PCI_ETH_ROCE:
1557 conn_type = QED_LL2_TYPE_ROCE;
1558 break;
1559 default:
1560 conn_type = QED_LL2_TYPE_TEST;
1561 }
1562
1563 /* Prepare the temporary ll2 information */
1564 memset(&ll2_info, 0, sizeof(ll2_info));
1565 ll2_info.conn_type = conn_type;
1566 ll2_info.mtu = params->mtu;
1567 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
1568 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
1569 ll2_info.tx_tc = 0;
1570 ll2_info.tx_dest = CORE_TX_DEST_NW;
abd49676 1571 ll2_info.gsi_enable = 1;
0a7fb11c
YM
1572
1573 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
1574 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
1575 &cdev->ll2->handle);
1576 if (rc) {
1577 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
1578 goto fail;
1579 }
1580
1581 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
1582 cdev->ll2->handle);
1583 if (rc) {
1584 DP_INFO(cdev, "Failed to establish LL2 connection\n");
1585 goto release_fail;
1586 }
1587
1588 /* Post all Rx buffers to FW */
1589 spin_lock_bh(&cdev->ll2->lock);
1590 list_for_each_entry(buffer, &cdev->ll2->list, list) {
1591 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
1592 cdev->ll2->handle,
1593 buffer->phys_addr, 0, buffer, 1);
1594 if (rc) {
1595 DP_INFO(cdev,
1596 "Failed to post an Rx buffer; Deleting it\n");
1597 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1598 cdev->ll2->rx_size, DMA_FROM_DEVICE);
1599 kfree(buffer->data);
1600 list_del(&buffer->list);
1601 kfree(buffer);
1602 } else {
1603 cdev->ll2->rx_cnt++;
1604 }
1605 }
1606 spin_unlock_bh(&cdev->ll2->lock);
1607
1608 if (!cdev->ll2->rx_cnt) {
1609 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
1610 goto release_terminate;
1611 }
1612
1613 if (!is_valid_ether_addr(params->ll2_mac_address)) {
1614 DP_INFO(cdev, "Invalid Ethernet address\n");
1615 goto release_terminate;
1616 }
1617
1618 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1619 if (!p_ptt) {
1620 DP_INFO(cdev, "Failed to acquire PTT\n");
1621 goto release_terminate;
1622 }
1623
1624 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1625 params->ll2_mac_address);
1626 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1627 if (rc) {
1628 DP_ERR(cdev, "Failed to allocate LLH filter\n");
1629 goto release_terminate_all;
1630 }
1631
1632 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
1633
1634 return 0;
1635
1636release_terminate_all:
1637
1638release_terminate:
1639 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1640release_fail:
1641 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1642fail:
1643 qed_ll2_kill_buffers(cdev);
1644 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1645 return -EINVAL;
1646}
1647
1648static int qed_ll2_stop(struct qed_dev *cdev)
1649{
1650 struct qed_ptt *p_ptt;
1651 int rc;
1652
1653 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
1654 return 0;
1655
1656 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1657 if (!p_ptt) {
1658 DP_INFO(cdev, "Failed to acquire PTT\n");
1659 goto fail;
1660 }
1661
1662 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1663 cdev->ll2_mac_address);
1664 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1665 eth_zero_addr(cdev->ll2_mac_address);
1666
1667 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
1668 cdev->ll2->handle);
1669 if (rc)
1670 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
1671
1672 qed_ll2_kill_buffers(cdev);
1673
1674 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1675 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1676
1677 return rc;
1678fail:
1679 return -EINVAL;
1680}
1681
1682static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
1683{
1684 const skb_frag_t *frag;
1685 int rc = -EINVAL, i;
1686 dma_addr_t mapping;
1687 u16 vlan = 0;
1688 u8 flags = 0;
1689
1690 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
1691 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
1692 return -EINVAL;
1693 }
1694
1695 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
1696 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1697 1 + skb_shinfo(skb)->nr_frags);
1698 return -EINVAL;
1699 }
1700
1701 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
1702 skb->len, DMA_TO_DEVICE);
1703 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
1704 DP_NOTICE(cdev, "SKB mapping failed\n");
1705 return -EINVAL;
1706 }
1707
1708 /* Request HW to calculate IP csum */
1709 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
1710 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1711 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
1712
1713 if (skb_vlan_tag_present(skb)) {
1714 vlan = skb_vlan_tag_get(skb);
1715 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
1716 }
1717
1718 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
1719 cdev->ll2->handle,
1720 1 + skb_shinfo(skb)->nr_frags,
abd49676
RA
1721 vlan, flags, 0, 0 /* RoCE FLAVOR */,
1722 mapping, skb->len, skb, 1);
0a7fb11c
YM
1723 if (rc)
1724 goto err;
1725
1726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1727 frag = &skb_shinfo(skb)->frags[i];
1728 if (!cdev->ll2->frags_mapped) {
1729 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
1730 skb_frag_size(frag),
1731 DMA_TO_DEVICE);
1732
1733 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
1734 mapping))) {
1735 DP_NOTICE(cdev,
1736 "Unable to map frag - dropping packet\n");
1737 goto err;
1738 }
1739 } else {
1740 mapping = page_to_phys(skb_frag_page(frag)) |
1741 frag->page_offset;
1742 }
1743
1744 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
1745 cdev->ll2->handle,
1746 mapping,
1747 skb_frag_size(frag));
1748
1749 /* if failed not much to do here, partial packet has been posted
1750 * we can't free memory, will need to wait for completion.
1751 */
1752 if (rc)
1753 goto err2;
1754 }
1755
1756 return 0;
1757
1758err:
1759 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
1760
1761err2:
1762 return rc;
1763}
1764
1765static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
1766{
1767 if (!cdev->ll2)
1768 return -EINVAL;
1769
1770 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
1771 cdev->ll2->handle, stats);
1772}
1773
1774const struct qed_ll2_ops qed_ll2_ops_pass = {
1775 .start = &qed_ll2_start,
1776 .stop = &qed_ll2_stop,
1777 .start_xmit = &qed_ll2_start_xmit,
1778 .register_cb_ops = &qed_ll2_register_cb_ops,
1779 .get_stats = &qed_ll2_stats,
1780};
1781
1782int qed_ll2_alloc_if(struct qed_dev *cdev)
1783{
1784 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
1785 return cdev->ll2 ? 0 : -ENOMEM;
1786}
1787
1788void qed_ll2_dealloc_if(struct qed_dev *cdev)
1789{
1790 kfree(cdev->ll2);
1791 cdev->ll2 = NULL;
1792}