]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / enic / base / vnic_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #include <rte_memzone.h>
7 #include <rte_memcpy.h>
8 #include <rte_string_fns.h>
9
10 #include "vnic_dev.h"
11 #include "vnic_resource.h"
12 #include "vnic_devcmd.h"
13 #include "vnic_nic.h"
14 #include "vnic_stats.h"
15
16
17 enum vnic_proxy_type {
18 PROXY_NONE,
19 PROXY_BY_BDF,
20 PROXY_BY_INDEX,
21 };
22
23 struct vnic_res {
24 void __iomem *vaddr;
25 dma_addr_t bus_addr;
26 unsigned int count;
27 };
28
29 struct vnic_intr_coal_timer_info {
30 u32 mul;
31 u32 div;
32 u32 max_usec;
33 };
34
35 struct vnic_dev {
36 void *priv;
37 struct rte_pci_device *pdev;
38 struct vnic_res res[RES_TYPE_MAX];
39 enum vnic_dev_intr_mode intr_mode;
40 struct vnic_devcmd __iomem *devcmd;
41 struct vnic_devcmd_notify *notify;
42 struct vnic_devcmd_notify notify_copy;
43 dma_addr_t notify_pa;
44 u32 notify_sz;
45 dma_addr_t linkstatus_pa;
46 struct vnic_stats *stats;
47 dma_addr_t stats_pa;
48 struct vnic_devcmd_fw_info *fw_info;
49 dma_addr_t fw_info_pa;
50 enum vnic_proxy_type proxy;
51 u32 proxy_index;
52 u64 args[VNIC_DEVCMD_NARGS];
53 int in_reset;
54 struct vnic_intr_coal_timer_info intr_coal_timer_info;
55 void *(*alloc_consistent)(void *priv, size_t size,
56 dma_addr_t *dma_handle, u8 *name);
57 void (*free_consistent)(void *priv,
58 size_t size, void *vaddr,
59 dma_addr_t dma_handle);
60 };
61
62 #define VNIC_MAX_RES_HDR_SIZE \
63 (sizeof(struct vnic_resource_header) + \
64 sizeof(struct vnic_resource) * RES_TYPE_MAX)
65 #define VNIC_RES_STRIDE 128
66
67 void *vnic_dev_priv(struct vnic_dev *vdev)
68 {
69 return vdev->priv;
70 }
71
72 void vnic_register_cbacks(struct vnic_dev *vdev,
73 void *(*alloc_consistent)(void *priv, size_t size,
74 dma_addr_t *dma_handle, u8 *name),
75 void (*free_consistent)(void *priv,
76 size_t size, void *vaddr,
77 dma_addr_t dma_handle))
78 {
79 vdev->alloc_consistent = alloc_consistent;
80 vdev->free_consistent = free_consistent;
81 }
82
83 static int vnic_dev_discover_res(struct vnic_dev *vdev,
84 struct vnic_dev_bar *bar, unsigned int num_bars)
85 {
86 struct vnic_resource_header __iomem *rh;
87 struct mgmt_barmap_hdr __iomem *mrh;
88 struct vnic_resource __iomem *r;
89 u8 type;
90
91 if (num_bars == 0)
92 return -EINVAL;
93
94 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
95 pr_err("vNIC BAR0 res hdr length error\n");
96 return -EINVAL;
97 }
98
99 rh = bar->vaddr;
100 mrh = bar->vaddr;
101 if (!rh) {
102 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
103 return -EINVAL;
104 }
105
106 /* Check for mgmt vnic in addition to normal vnic */
107 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
108 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
109 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
110 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
111 pr_err("vNIC BAR0 res magic/version error " \
112 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
113 VNIC_RES_MAGIC, VNIC_RES_VERSION,
114 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
115 ioread32(&rh->magic), ioread32(&rh->version));
116 return -EINVAL;
117 }
118 }
119
120 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
121 r = (struct vnic_resource __iomem *)(mrh + 1);
122 else
123 r = (struct vnic_resource __iomem *)(rh + 1);
124
125
126 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
127 u8 bar_num = ioread8(&r->bar);
128 u32 bar_offset = ioread32(&r->bar_offset);
129 u32 count = ioread32(&r->count);
130 u32 len;
131
132 r++;
133
134 if (bar_num >= num_bars)
135 continue;
136
137 if (!bar[bar_num].len || !bar[bar_num].vaddr)
138 continue;
139
140 switch (type) {
141 case RES_TYPE_WQ:
142 case RES_TYPE_RQ:
143 case RES_TYPE_CQ:
144 case RES_TYPE_INTR_CTRL:
145 /* each count is stride bytes long */
146 len = count * VNIC_RES_STRIDE;
147 if (len + bar_offset > bar[bar_num].len) {
148 pr_err("vNIC BAR0 resource %d " \
149 "out-of-bounds, offset 0x%x + " \
150 "size 0x%x > bar len 0x%lx\n",
151 type, bar_offset,
152 len,
153 bar[bar_num].len);
154 return -EINVAL;
155 }
156 break;
157 case RES_TYPE_INTR_PBA_LEGACY:
158 case RES_TYPE_DEVCMD:
159 len = count;
160 break;
161 default:
162 continue;
163 }
164
165 vdev->res[type].count = count;
166 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
167 bar_offset;
168 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
169 }
170
171 return 0;
172 }
173
174 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
175 enum vnic_res_type type)
176 {
177 return vdev->res[type].count;
178 }
179
180 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
181 unsigned int index)
182 {
183 if (!vdev->res[type].vaddr)
184 return NULL;
185
186 switch (type) {
187 case RES_TYPE_WQ:
188 case RES_TYPE_RQ:
189 case RES_TYPE_CQ:
190 case RES_TYPE_INTR_CTRL:
191 return (char __iomem *)vdev->res[type].vaddr +
192 index * VNIC_RES_STRIDE;
193 default:
194 return (char __iomem *)vdev->res[type].vaddr;
195 }
196 }
197
198 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
199 unsigned int desc_count, unsigned int desc_size)
200 {
201 /* The base address of the desc rings must be 512 byte aligned.
202 * Descriptor count is aligned to groups of 32 descriptors. A
203 * count of 0 means the maximum 4096 descriptors. Descriptor
204 * size is aligned to 16 bytes.
205 */
206
207 unsigned int count_align = 32;
208 unsigned int desc_align = 16;
209
210 ring->base_align = 512;
211
212 if (desc_count == 0)
213 desc_count = 4096;
214
215 ring->desc_count = VNIC_ALIGN(desc_count, count_align);
216
217 ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
218
219 ring->size = ring->desc_count * ring->desc_size;
220 ring->size_unaligned = ring->size + ring->base_align;
221
222 return ring->size_unaligned;
223 }
224
225 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
226 {
227 memset(ring->descs, 0, ring->size);
228 }
229
230 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
231 struct vnic_dev_ring *ring,
232 unsigned int desc_count, unsigned int desc_size,
233 __attribute__((unused)) unsigned int socket_id,
234 char *z_name)
235 {
236 void *alloc_addr;
237 dma_addr_t alloc_pa = 0;
238
239 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
240 alloc_addr = vdev->alloc_consistent(vdev->priv,
241 ring->size_unaligned,
242 &alloc_pa, (u8 *)z_name);
243 if (!alloc_addr) {
244 pr_err("Failed to allocate ring (size=%d), aborting\n",
245 (int)ring->size);
246 return -ENOMEM;
247 }
248 ring->descs_unaligned = alloc_addr;
249 if (!alloc_pa) {
250 pr_err("Failed to map allocated ring (size=%d), aborting\n",
251 (int)ring->size);
252 vdev->free_consistent(vdev->priv,
253 ring->size_unaligned,
254 alloc_addr,
255 alloc_pa);
256 return -ENOMEM;
257 }
258 ring->base_addr_unaligned = alloc_pa;
259
260 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
261 ring->base_align);
262 ring->descs = (u8 *)ring->descs_unaligned +
263 (ring->base_addr - ring->base_addr_unaligned);
264
265 vnic_dev_clear_desc_ring(ring);
266
267 ring->desc_avail = ring->desc_count - 1;
268
269 return 0;
270 }
271
272 void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
273 struct vnic_dev_ring *ring)
274 {
275 if (ring->descs) {
276 vdev->free_consistent(vdev->priv,
277 ring->size_unaligned,
278 ring->descs_unaligned,
279 ring->base_addr_unaligned);
280 ring->descs = NULL;
281 }
282 }
283
284 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
285 int wait)
286 {
287 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
288 unsigned int i;
289 int delay;
290 u32 status;
291 int err;
292
293 status = ioread32(&devcmd->status);
294 if (status == 0xFFFFFFFF) {
295 /* PCI-e target device is gone */
296 return -ENODEV;
297 }
298 if (status & STAT_BUSY) {
299
300 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
301 return -EBUSY;
302 }
303
304 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
305 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
306 writeq(vdev->args[i], &devcmd->args[i]);
307 wmb(); /* complete all writes initiated till now */
308 }
309
310 iowrite32(cmd, &devcmd->cmd);
311
312 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
313 return 0;
314
315 for (delay = 0; delay < wait; delay++) {
316
317 udelay(100);
318
319 status = ioread32(&devcmd->status);
320 if (status == 0xFFFFFFFF) {
321 /* PCI-e target device is gone */
322 return -ENODEV;
323 }
324
325 if (!(status & STAT_BUSY)) {
326 if (status & STAT_ERROR) {
327 err = -(int)readq(&devcmd->args[0]);
328 if (cmd != CMD_CAPABILITY)
329 pr_err("Devcmd %d failed " \
330 "with error code %d\n",
331 _CMD_N(cmd), err);
332 return err;
333 }
334
335 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
336 rmb();/* finish all reads initiated till now */
337 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
338 vdev->args[i] = readq(&devcmd->args[i]);
339 }
340
341 return 0;
342 }
343 }
344
345 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
346 return -ETIMEDOUT;
347 }
348
349 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
350 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
351 u64 *args, int nargs, int wait)
352 {
353 u32 status;
354 int err;
355
356 /*
357 * Proxy command consumes 2 arguments. One for proxy index,
358 * the other is for command to be proxied
359 */
360 if (nargs > VNIC_DEVCMD_NARGS - 2) {
361 pr_err("number of args %d exceeds the maximum\n", nargs);
362 return -EINVAL;
363 }
364 memset(vdev->args, 0, sizeof(vdev->args));
365
366 vdev->args[0] = vdev->proxy_index;
367 vdev->args[1] = cmd;
368 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
369
370 err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
371 if (err)
372 return err;
373
374 status = (u32)vdev->args[0];
375 if (status & STAT_ERROR) {
376 err = (int)vdev->args[1];
377 if (err != ERR_ECMDUNKNOWN ||
378 cmd != CMD_CAPABILITY)
379 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
380 return err;
381 }
382
383 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
384
385 return 0;
386 }
387
388 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
389 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
390 {
391 int err;
392
393 if (nargs > VNIC_DEVCMD_NARGS) {
394 pr_err("number of args %d exceeds the maximum\n", nargs);
395 return -EINVAL;
396 }
397 memset(vdev->args, 0, sizeof(vdev->args));
398 memcpy(vdev->args, args, nargs * sizeof(args[0]));
399
400 err = _vnic_dev_cmd(vdev, cmd, wait);
401
402 memcpy(args, vdev->args, nargs * sizeof(args[0]));
403
404 return err;
405 }
406
407 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
408 u64 *a0, u64 *a1, int wait)
409 {
410 u64 args[2];
411 int err;
412
413 args[0] = *a0;
414 args[1] = *a1;
415 memset(vdev->args, 0, sizeof(vdev->args));
416
417 switch (vdev->proxy) {
418 case PROXY_BY_INDEX:
419 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
420 args, ARRAY_SIZE(args), wait);
421 break;
422 case PROXY_BY_BDF:
423 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
424 args, ARRAY_SIZE(args), wait);
425 break;
426 case PROXY_NONE:
427 default:
428 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
429 break;
430 }
431
432 if (err == 0) {
433 *a0 = args[0];
434 *a1 = args[1];
435 }
436
437 return err;
438 }
439
440 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
441 u64 *args, int nargs, int wait)
442 {
443 switch (vdev->proxy) {
444 case PROXY_BY_INDEX:
445 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
446 args, nargs, wait);
447 case PROXY_BY_BDF:
448 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
449 args, nargs, wait);
450 case PROXY_NONE:
451 default:
452 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
453 }
454 }
455
456 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
457 int nargs)
458 {
459 memset(args, 0, nargs * sizeof(*args));
460 args[0] = CMD_ADD_ADV_FILTER;
461 args[1] = FILTER_CAP_MODE_V1_FLAG;
462 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
463 }
464
465 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
466 {
467 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
468 int wait = 1000;
469 int err;
470
471 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
472 if (err)
473 return 0;
474 return (a1 >= (u32)FILTER_DPDK_1);
475 }
476
477 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
478 * value or 0 on error:
479 * FILTER_DPDK_1- advanced filters availabile
480 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
481 * the IP layer must explicitly specified. I.e. cannot have a UDP
482 * filter that matches both IPv4 and IPv6.
483 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
484 * all other filter types are not available.
485 * Retrun true in filter_tags if supported
486 */
487 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
488 u8 *filter_actions)
489 {
490 u64 args[4];
491 int err;
492 u32 max_level = 0;
493
494 err = vnic_dev_advanced_filters_cap(vdev, args, 4);
495
496 /* determine supported filter actions */
497 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
498 if (args[2] == FILTER_CAP_MODE_V1)
499 *filter_actions = args[3];
500
501 if (err || ((args[0] == 1) && (args[1] == 0))) {
502 /* Adv filter Command not supported or adv filters available but
503 * not enabled. Try the normal filter capability command.
504 */
505 args[0] = CMD_ADD_FILTER;
506 args[1] = 0;
507 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
508 if (err)
509 return err;
510 max_level = args[1];
511 goto parse_max_level;
512 } else if (args[2] == FILTER_CAP_MODE_V1) {
513 /* parse filter capability mask in args[1] */
514 if (args[1] & FILTER_DPDK_1_FLAG)
515 *mode = FILTER_DPDK_1;
516 else if (args[1] & FILTER_USNIC_IP_FLAG)
517 *mode = FILTER_USNIC_IP;
518 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
519 *mode = FILTER_IPV4_5TUPLE;
520 return 0;
521 }
522 max_level = args[1];
523 parse_max_level:
524 if (max_level >= (u32)FILTER_USNIC_IP)
525 *mode = FILTER_USNIC_IP;
526 else
527 *mode = FILTER_IPV4_5TUPLE;
528 return 0;
529 }
530
531 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
532 bool *weak)
533 {
534 u64 a0 = CMD_NIC_CFG, a1 = 0;
535 int wait = 1000;
536 int err;
537
538 *cfg_chk = false;
539 *weak = false;
540 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
541 if (err == 0 && a0 != 0 && a1 != 0) {
542 *cfg_chk = true;
543 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
544 }
545 }
546
547 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
548 {
549 u64 a0 = (u32)cmd, a1 = 0;
550 int wait = 1000;
551 int err;
552
553 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
554
555 return !(err || a0);
556 }
557
558 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
559 void *value)
560 {
561 u64 a0, a1;
562 int wait = 1000;
563 int err;
564
565 a0 = offset;
566 a1 = size;
567
568 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
569
570 switch (size) {
571 case 1:
572 *(u8 *)value = (u8)a0;
573 break;
574 case 2:
575 *(u16 *)value = (u16)a0;
576 break;
577 case 4:
578 *(u32 *)value = (u32)a0;
579 break;
580 case 8:
581 *(u64 *)value = a0;
582 break;
583 default:
584 BUG();
585 break;
586 }
587
588 return err;
589 }
590
591 int vnic_dev_stats_clear(struct vnic_dev *vdev)
592 {
593 u64 a0 = 0, a1 = 0;
594 int wait = 1000;
595
596 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
597 }
598
599 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
600 {
601 u64 a0, a1;
602 int wait = 1000;
603
604 if (!vdev->stats)
605 return -ENOMEM;
606
607 *stats = vdev->stats;
608 a0 = vdev->stats_pa;
609 a1 = sizeof(struct vnic_stats);
610
611 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
612 }
613
614 int vnic_dev_close(struct vnic_dev *vdev)
615 {
616 u64 a0 = 0, a1 = 0;
617 int wait = 1000;
618
619 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
620 }
621
622 int vnic_dev_enable_wait(struct vnic_dev *vdev)
623 {
624 u64 a0 = 0, a1 = 0;
625 int wait = 1000;
626
627 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
628 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
629 else
630 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
631 }
632
633 int vnic_dev_disable(struct vnic_dev *vdev)
634 {
635 u64 a0 = 0, a1 = 0;
636 int wait = 1000;
637
638 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
639 }
640
641 int vnic_dev_open(struct vnic_dev *vdev, int arg)
642 {
643 u64 a0 = (u32)arg, a1 = 0;
644 int wait = 1000;
645
646 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
647 }
648
649 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
650 {
651 u64 a0 = 0, a1 = 0;
652 int wait = 1000;
653 int err;
654
655 *done = 0;
656
657 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
658 if (err)
659 return err;
660
661 *done = (a0 == 0);
662
663 return 0;
664 }
665
666 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
667 {
668 u64 a0 = 0, a1 = 0;
669 int wait = 1000;
670 int err, i;
671
672 for (i = 0; i < ETH_ALEN; i++)
673 mac_addr[i] = 0;
674
675 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
676 if (err)
677 return err;
678
679 for (i = 0; i < ETH_ALEN; i++)
680 mac_addr[i] = ((u8 *)&a0)[i];
681
682 return 0;
683 }
684
685 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
686 int broadcast, int promisc, int allmulti)
687 {
688 u64 a0, a1 = 0;
689 int wait = 1000;
690 int err;
691
692 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
693 (multicast ? CMD_PFILTER_MULTICAST : 0) |
694 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
695 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
696 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
697
698 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
699 if (err)
700 pr_err("Can't set packet filter\n");
701
702 return err;
703 }
704
705 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
706 {
707 u64 a0 = 0, a1 = 0;
708 int wait = 1000;
709 int err;
710 int i;
711
712 for (i = 0; i < ETH_ALEN; i++)
713 ((u8 *)&a0)[i] = addr[i];
714
715 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
716 if (err)
717 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
718 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
719 err);
720
721 return err;
722 }
723
724 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
725 {
726 u64 a0 = 0, a1 = 0;
727 int wait = 1000;
728 int err;
729 int i;
730
731 for (i = 0; i < ETH_ALEN; i++)
732 ((u8 *)&a0)[i] = addr[i];
733
734 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
735 if (err)
736 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
737 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
738 err);
739
740 return err;
741 }
742
743 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
744 u8 ig_vlan_rewrite_mode)
745 {
746 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
747 int wait = 1000;
748
749 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
750 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
751 &a0, &a1, wait);
752 else
753 return 0;
754 }
755
756 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
757 {
758 vdev->in_reset = state;
759 }
760
761 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
762 {
763 return vdev->in_reset;
764 }
765
766 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
767 void *notify_addr, dma_addr_t notify_pa, u16 intr)
768 {
769 u64 a0, a1;
770 int wait = 1000;
771 int r;
772
773 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
774 if (!vnic_dev_in_reset(vdev)) {
775 vdev->notify = notify_addr;
776 vdev->notify_pa = notify_pa;
777 }
778
779 a0 = (u64)notify_pa;
780 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
781 a1 += sizeof(struct vnic_devcmd_notify);
782
783 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
784 if (!vnic_dev_in_reset(vdev))
785 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
786
787 return r;
788 }
789
790 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
791 {
792 void *notify_addr = NULL;
793 dma_addr_t notify_pa = 0;
794 char name[NAME_MAX];
795 static u32 instance;
796
797 if (vdev->notify || vdev->notify_pa) {
798 return vnic_dev_notify_setcmd(vdev, vdev->notify,
799 vdev->notify_pa, intr);
800 }
801 if (!vnic_dev_in_reset(vdev)) {
802 snprintf((char *)name, sizeof(name),
803 "vnic_notify-%u", instance++);
804 notify_addr = vdev->alloc_consistent(vdev->priv,
805 sizeof(struct vnic_devcmd_notify),
806 &notify_pa, (u8 *)name);
807 if (!notify_addr)
808 return -ENOMEM;
809 }
810
811 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
812 }
813
814 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
815 {
816 u64 a0, a1;
817 int wait = 1000;
818 int err;
819
820 a0 = 0; /* paddr = 0 to unset notify buffer */
821 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
822 a1 += sizeof(struct vnic_devcmd_notify);
823
824 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
825 if (!vnic_dev_in_reset(vdev)) {
826 vdev->notify = NULL;
827 vdev->notify_pa = 0;
828 vdev->notify_sz = 0;
829 }
830
831 return err;
832 }
833
834 int vnic_dev_notify_unset(struct vnic_dev *vdev)
835 {
836 if (vdev->notify && !vnic_dev_in_reset(vdev)) {
837 vdev->free_consistent(vdev->priv,
838 sizeof(struct vnic_devcmd_notify),
839 vdev->notify,
840 vdev->notify_pa);
841 }
842
843 return vnic_dev_notify_unsetcmd(vdev);
844 }
845
846 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
847 {
848 u32 *words;
849 unsigned int nwords = vdev->notify_sz / 4;
850 unsigned int i;
851 u32 csum;
852
853 if (!vdev->notify || !vdev->notify_sz)
854 return 0;
855
856 do {
857 csum = 0;
858 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
859 words = (u32 *)&vdev->notify_copy;
860 for (i = 1; i < nwords; i++)
861 csum += words[i];
862 } while (csum != words[0]);
863
864 return 1;
865 }
866
867 int vnic_dev_init(struct vnic_dev *vdev, int arg)
868 {
869 u64 a0 = (u32)arg, a1 = 0;
870 int wait = 1000;
871 int r = 0;
872
873 if (vnic_dev_capable(vdev, CMD_INIT))
874 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
875 else {
876 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
877 if (a0 & CMD_INITF_DEFAULT_MAC) {
878 /* Emulate these for old CMD_INIT_v1 which
879 * didn't pass a0 so no CMD_INITF_*.
880 */
881 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
882 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
883 }
884 }
885 return r;
886 }
887
888 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
889 {
890 /* Default: hardware intr coal timer is in units of 1.5 usecs */
891 vdev->intr_coal_timer_info.mul = 2;
892 vdev->intr_coal_timer_info.div = 3;
893 vdev->intr_coal_timer_info.max_usec =
894 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
895 }
896
897 int vnic_dev_link_status(struct vnic_dev *vdev)
898 {
899 if (!vnic_dev_notify_ready(vdev))
900 return 0;
901
902 return vdev->notify_copy.link_state;
903 }
904
905 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
906 {
907 if (!vnic_dev_notify_ready(vdev))
908 return 0;
909
910 return vdev->notify_copy.port_speed;
911 }
912
913 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
914 {
915 return (usec * vdev->intr_coal_timer_info.mul) /
916 vdev->intr_coal_timer_info.div;
917 }
918
919 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
920 {
921 return (hw_cycles * vdev->intr_coal_timer_info.div) /
922 vdev->intr_coal_timer_info.mul;
923 }
924
925 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
926 {
927 return vdev->intr_coal_timer_info.max_usec;
928 }
929
930 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
931 {
932 char name[NAME_MAX];
933 static u32 instance;
934
935 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
936 vdev->stats = vdev->alloc_consistent(vdev->priv,
937 sizeof(struct vnic_stats),
938 &vdev->stats_pa, (u8 *)name);
939 return vdev->stats == NULL ? -ENOMEM : 0;
940 }
941
942 void vnic_dev_unregister(struct vnic_dev *vdev)
943 {
944 if (vdev) {
945 if (vdev->notify)
946 vdev->free_consistent(vdev->priv,
947 sizeof(struct vnic_devcmd_notify),
948 vdev->notify,
949 vdev->notify_pa);
950 if (vdev->stats)
951 vdev->free_consistent(vdev->priv,
952 sizeof(struct vnic_stats),
953 vdev->stats, vdev->stats_pa);
954 if (vdev->fw_info)
955 vdev->free_consistent(vdev->priv,
956 sizeof(struct vnic_devcmd_fw_info),
957 vdev->fw_info, vdev->fw_info_pa);
958 rte_free(vdev);
959 }
960 }
961
962 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
963 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
964 unsigned int num_bars)
965 {
966 if (!vdev) {
967 char name[NAME_MAX];
968 snprintf((char *)name, sizeof(name), "%s-vnic",
969 pdev->device.name);
970 vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
971 sizeof(struct vnic_dev),
972 RTE_CACHE_LINE_SIZE,
973 pdev->device.numa_node);
974 if (!vdev)
975 return NULL;
976 }
977
978 vdev->priv = priv;
979 vdev->pdev = pdev;
980
981 if (vnic_dev_discover_res(vdev, bar, num_bars))
982 goto err_out;
983
984 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
985 if (!vdev->devcmd)
986 goto err_out;
987
988 return vdev;
989
990 err_out:
991 vnic_dev_unregister(vdev);
992 return NULL;
993 }
994
995 /*
996 * vnic_dev_classifier: Add/Delete classifier entries
997 * @vdev: vdev of the device
998 * @cmd: CLSF_ADD for Add filter
999 * CLSF_DEL for Delete filter
1000 * @entry: In case of ADD filter, the caller passes the RQ number in this
1001 * variable.
1002 * This function stores the filter_id returned by the
1003 * firmware in the same variable before return;
1004 *
1005 * In case of DEL filter, the caller passes the RQ number. Return
1006 * value is irrelevant.
1007 * @data: filter data
1008 * @action: action data
1009 */
1010 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
1011 struct filter_v2 *data, struct filter_action_v2 *action_v2)
1012 {
1013 u64 a0 = 0, a1 = 0;
1014 int wait = 1000;
1015 dma_addr_t tlv_pa;
1016 int ret = -EINVAL;
1017 struct filter_tlv *tlv, *tlv_va;
1018 u64 tlv_size;
1019 u32 filter_size, action_size;
1020 static unsigned int unique_id;
1021 char z_name[RTE_MEMZONE_NAMESIZE];
1022 enum vnic_devcmd_cmd dev_cmd;
1023
1024 if (cmd == CLSF_ADD) {
1025 dev_cmd = (data->type >= FILTER_DPDK_1) ?
1026 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
1027
1028 filter_size = vnic_filter_size(data);
1029 action_size = vnic_action_size(action_v2);
1030
1031 tlv_size = filter_size + action_size +
1032 2*sizeof(struct filter_tlv);
1033 snprintf((char *)z_name, sizeof(z_name),
1034 "vnic_clsf_%u", unique_id++);
1035 tlv_va = vdev->alloc_consistent(vdev->priv,
1036 tlv_size, &tlv_pa, (u8 *)z_name);
1037 if (!tlv_va)
1038 return -ENOMEM;
1039 tlv = tlv_va;
1040 a0 = tlv_pa;
1041 a1 = tlv_size;
1042 memset(tlv, 0, tlv_size);
1043 tlv->type = CLSF_TLV_FILTER;
1044 tlv->length = filter_size;
1045 memcpy(&tlv->val, (void *)data, filter_size);
1046
1047 tlv = (struct filter_tlv *)((char *)tlv +
1048 sizeof(struct filter_tlv) +
1049 filter_size);
1050
1051 tlv->type = CLSF_TLV_ACTION;
1052 tlv->length = action_size;
1053 memcpy(&tlv->val, (void *)action_v2, action_size);
1054 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
1055 *entry = (u16)a0;
1056 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
1057 } else if (cmd == CLSF_DEL) {
1058 a0 = *entry;
1059 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
1060 }
1061
1062 return ret;
1063 }
1064
1065 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
1066 {
1067 u64 a0 = overlay;
1068 u64 a1 = config;
1069 int wait = 1000;
1070
1071 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1072 }
1073
1074 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
1075 u16 vxlan_udp_port_number)
1076 {
1077 u64 a1 = vxlan_udp_port_number;
1078 u64 a0 = overlay;
1079 int wait = 1000;
1080
1081 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1082 }
1083
1084 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
1085 {
1086 u64 a0 = VIC_FEATURE_VXLAN;
1087 u64 a1 = 0;
1088 int wait = 1000;
1089 int ret;
1090
1091 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1092 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
1093 return ret == 0 &&
1094 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
1095 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
1096 }