]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | |
4 | * | |
5 | * Copyright (c) 2014, Cisco Systems, Inc. | |
6 | * All rights reserved. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * | |
12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in | |
17 | * the documentation and/or other materials provided with the | |
18 | * distribution. | |
19 | * | |
20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
23 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
24 | * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
26 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
28 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
30 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
31 | * POSSIBILITY OF SUCH DAMAGE. | |
32 | * | |
33 | */ | |
34 | ||
35 | #include <rte_memzone.h> | |
36 | #include <rte_memcpy.h> | |
37 | #include <rte_string_fns.h> | |
38 | ||
39 | #include "vnic_dev.h" | |
40 | #include "vnic_resource.h" | |
41 | #include "vnic_devcmd.h" | |
42 | #include "vnic_stats.h" | |
43 | ||
44 | ||
45 | enum vnic_proxy_type { | |
46 | PROXY_NONE, | |
47 | PROXY_BY_BDF, | |
48 | PROXY_BY_INDEX, | |
49 | }; | |
50 | ||
51 | struct vnic_res { | |
52 | void __iomem *vaddr; | |
53 | dma_addr_t bus_addr; | |
54 | unsigned int count; | |
55 | }; | |
56 | ||
57 | struct vnic_intr_coal_timer_info { | |
58 | u32 mul; | |
59 | u32 div; | |
60 | u32 max_usec; | |
61 | }; | |
62 | ||
63 | struct vnic_dev { | |
64 | void *priv; | |
65 | struct rte_pci_device *pdev; | |
66 | struct vnic_res res[RES_TYPE_MAX]; | |
67 | enum vnic_dev_intr_mode intr_mode; | |
68 | struct vnic_devcmd __iomem *devcmd; | |
69 | struct vnic_devcmd_notify *notify; | |
70 | struct vnic_devcmd_notify notify_copy; | |
71 | dma_addr_t notify_pa; | |
72 | u32 notify_sz; | |
73 | dma_addr_t linkstatus_pa; | |
74 | struct vnic_stats *stats; | |
75 | dma_addr_t stats_pa; | |
76 | struct vnic_devcmd_fw_info *fw_info; | |
77 | dma_addr_t fw_info_pa; | |
78 | enum vnic_proxy_type proxy; | |
79 | u32 proxy_index; | |
80 | u64 args[VNIC_DEVCMD_NARGS]; | |
81 | u16 split_hdr_size; | |
82 | int in_reset; | |
83 | struct vnic_intr_coal_timer_info intr_coal_timer_info; | |
84 | void *(*alloc_consistent)(void *priv, size_t size, | |
85 | dma_addr_t *dma_handle, u8 *name); | |
86 | void (*free_consistent)(void *priv, | |
87 | size_t size, void *vaddr, | |
88 | dma_addr_t dma_handle); | |
89 | }; | |
90 | ||
91 | #define VNIC_MAX_RES_HDR_SIZE \ | |
92 | (sizeof(struct vnic_resource_header) + \ | |
93 | sizeof(struct vnic_resource) * RES_TYPE_MAX) | |
94 | #define VNIC_RES_STRIDE 128 | |
95 | ||
96 | void *vnic_dev_priv(struct vnic_dev *vdev) | |
97 | { | |
98 | return vdev->priv; | |
99 | } | |
100 | ||
101 | void vnic_register_cbacks(struct vnic_dev *vdev, | |
102 | void *(*alloc_consistent)(void *priv, size_t size, | |
103 | dma_addr_t *dma_handle, u8 *name), | |
104 | void (*free_consistent)(void *priv, | |
105 | size_t size, void *vaddr, | |
106 | dma_addr_t dma_handle)) | |
107 | { | |
108 | vdev->alloc_consistent = alloc_consistent; | |
109 | vdev->free_consistent = free_consistent; | |
110 | } | |
111 | ||
112 | static int vnic_dev_discover_res(struct vnic_dev *vdev, | |
113 | struct vnic_dev_bar *bar, unsigned int num_bars) | |
114 | { | |
115 | struct vnic_resource_header __iomem *rh; | |
116 | struct mgmt_barmap_hdr __iomem *mrh; | |
117 | struct vnic_resource __iomem *r; | |
118 | u8 type; | |
119 | ||
120 | if (num_bars == 0) | |
121 | return -EINVAL; | |
122 | ||
123 | if (bar->len < VNIC_MAX_RES_HDR_SIZE) { | |
124 | pr_err("vNIC BAR0 res hdr length error\n"); | |
125 | return -EINVAL; | |
126 | } | |
127 | ||
128 | rh = bar->vaddr; | |
129 | mrh = bar->vaddr; | |
130 | if (!rh) { | |
131 | pr_err("vNIC BAR0 res hdr not mem-mapped\n"); | |
132 | return -EINVAL; | |
133 | } | |
134 | ||
135 | /* Check for mgmt vnic in addition to normal vnic */ | |
136 | if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || | |
137 | (ioread32(&rh->version) != VNIC_RES_VERSION)) { | |
138 | if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || | |
139 | (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { | |
140 | pr_err("vNIC BAR0 res magic/version error " \ | |
141 | "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", | |
142 | VNIC_RES_MAGIC, VNIC_RES_VERSION, | |
143 | MGMTVNIC_MAGIC, MGMTVNIC_VERSION, | |
144 | ioread32(&rh->magic), ioread32(&rh->version)); | |
145 | return -EINVAL; | |
146 | } | |
147 | } | |
148 | ||
149 | if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) | |
150 | r = (struct vnic_resource __iomem *)(mrh + 1); | |
151 | else | |
152 | r = (struct vnic_resource __iomem *)(rh + 1); | |
153 | ||
154 | ||
155 | while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { | |
156 | u8 bar_num = ioread8(&r->bar); | |
157 | u32 bar_offset = ioread32(&r->bar_offset); | |
158 | u32 count = ioread32(&r->count); | |
159 | u32 len; | |
160 | ||
161 | r++; | |
162 | ||
163 | if (bar_num >= num_bars) | |
164 | continue; | |
165 | ||
166 | if (!bar[bar_num].len || !bar[bar_num].vaddr) | |
167 | continue; | |
168 | ||
169 | switch (type) { | |
170 | case RES_TYPE_WQ: | |
171 | case RES_TYPE_RQ: | |
172 | case RES_TYPE_CQ: | |
173 | case RES_TYPE_INTR_CTRL: | |
174 | /* each count is stride bytes long */ | |
175 | len = count * VNIC_RES_STRIDE; | |
176 | if (len + bar_offset > bar[bar_num].len) { | |
177 | pr_err("vNIC BAR0 resource %d " \ | |
178 | "out-of-bounds, offset 0x%x + " \ | |
179 | "size 0x%x > bar len 0x%lx\n", | |
180 | type, bar_offset, | |
181 | len, | |
182 | bar[bar_num].len); | |
183 | return -EINVAL; | |
184 | } | |
185 | break; | |
186 | case RES_TYPE_INTR_PBA_LEGACY: | |
187 | case RES_TYPE_DEVCMD: | |
188 | len = count; | |
189 | break; | |
190 | default: | |
191 | continue; | |
192 | } | |
193 | ||
194 | vdev->res[type].count = count; | |
195 | vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + | |
196 | bar_offset; | |
197 | vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; | |
198 | } | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
203 | unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, | |
204 | enum vnic_res_type type) | |
205 | { | |
206 | return vdev->res[type].count; | |
207 | } | |
208 | ||
209 | void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, | |
210 | unsigned int index) | |
211 | { | |
212 | if (!vdev->res[type].vaddr) | |
213 | return NULL; | |
214 | ||
215 | switch (type) { | |
216 | case RES_TYPE_WQ: | |
217 | case RES_TYPE_RQ: | |
218 | case RES_TYPE_CQ: | |
219 | case RES_TYPE_INTR_CTRL: | |
220 | return (char __iomem *)vdev->res[type].vaddr + | |
221 | index * VNIC_RES_STRIDE; | |
222 | default: | |
223 | return (char __iomem *)vdev->res[type].vaddr; | |
224 | } | |
225 | } | |
226 | ||
227 | unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, | |
228 | unsigned int desc_count, unsigned int desc_size) | |
229 | { | |
230 | /* The base address of the desc rings must be 512 byte aligned. | |
231 | * Descriptor count is aligned to groups of 32 descriptors. A | |
232 | * count of 0 means the maximum 4096 descriptors. Descriptor | |
233 | * size is aligned to 16 bytes. | |
234 | */ | |
235 | ||
236 | unsigned int count_align = 32; | |
237 | unsigned int desc_align = 16; | |
238 | ||
239 | ring->base_align = 512; | |
240 | ||
241 | if (desc_count == 0) | |
242 | desc_count = 4096; | |
243 | ||
244 | ring->desc_count = VNIC_ALIGN(desc_count, count_align); | |
245 | ||
246 | ring->desc_size = VNIC_ALIGN(desc_size, desc_align); | |
247 | ||
248 | ring->size = ring->desc_count * ring->desc_size; | |
249 | ring->size_unaligned = ring->size + ring->base_align; | |
250 | ||
251 | return ring->size_unaligned; | |
252 | } | |
253 | ||
254 | void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size) | |
255 | { | |
256 | vdev->split_hdr_size = size; | |
257 | } | |
258 | ||
259 | u16 vnic_get_hdr_split_size(struct vnic_dev *vdev) | |
260 | { | |
261 | return vdev->split_hdr_size; | |
262 | } | |
263 | ||
264 | void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) | |
265 | { | |
266 | memset(ring->descs, 0, ring->size); | |
267 | } | |
268 | ||
269 | int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, | |
270 | struct vnic_dev_ring *ring, | |
271 | unsigned int desc_count, unsigned int desc_size, | |
272 | __attribute__((unused)) unsigned int socket_id, | |
273 | char *z_name) | |
274 | { | |
275 | void *alloc_addr = NULL; | |
276 | dma_addr_t alloc_pa = 0; | |
277 | ||
278 | vnic_dev_desc_ring_size(ring, desc_count, desc_size); | |
279 | alloc_addr = vdev->alloc_consistent(vdev->priv, | |
280 | ring->size_unaligned, | |
281 | &alloc_pa, (u8 *)z_name); | |
282 | if (!alloc_addr) { | |
283 | pr_err("Failed to allocate ring (size=%d), aborting\n", | |
284 | (int)ring->size); | |
285 | return -ENOMEM; | |
286 | } | |
287 | ring->descs_unaligned = alloc_addr; | |
288 | if (!alloc_pa) { | |
289 | pr_err("Failed to map allocated ring (size=%d), aborting\n", | |
290 | (int)ring->size); | |
291 | vdev->free_consistent(vdev->priv, | |
292 | ring->size_unaligned, | |
293 | alloc_addr, | |
294 | alloc_pa); | |
295 | return -ENOMEM; | |
296 | } | |
297 | ring->base_addr_unaligned = alloc_pa; | |
298 | ||
299 | ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, | |
300 | ring->base_align); | |
301 | ring->descs = (u8 *)ring->descs_unaligned + | |
302 | (ring->base_addr - ring->base_addr_unaligned); | |
303 | ||
304 | vnic_dev_clear_desc_ring(ring); | |
305 | ||
306 | ring->desc_avail = ring->desc_count - 1; | |
307 | ||
308 | return 0; | |
309 | } | |
310 | ||
311 | void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev, | |
312 | struct vnic_dev_ring *ring) | |
313 | { | |
314 | if (ring->descs) { | |
315 | vdev->free_consistent(vdev->priv, | |
316 | ring->size_unaligned, | |
317 | ring->descs_unaligned, | |
318 | ring->base_addr_unaligned); | |
319 | ring->descs = NULL; | |
320 | } | |
321 | } | |
322 | ||
323 | static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |
324 | int wait) | |
325 | { | |
326 | struct vnic_devcmd __iomem *devcmd = vdev->devcmd; | |
327 | unsigned int i; | |
328 | int delay; | |
329 | u32 status; | |
330 | int err; | |
331 | ||
332 | status = ioread32(&devcmd->status); | |
333 | if (status == 0xFFFFFFFF) { | |
334 | /* PCI-e target device is gone */ | |
335 | return -ENODEV; | |
336 | } | |
337 | if (status & STAT_BUSY) { | |
338 | ||
339 | pr_err("Busy devcmd %d\n", _CMD_N(cmd)); | |
340 | return -EBUSY; | |
341 | } | |
342 | ||
343 | if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { | |
344 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) | |
345 | writeq(vdev->args[i], &devcmd->args[i]); | |
346 | wmb(); /* complete all writes initiated till now */ | |
347 | } | |
348 | ||
349 | iowrite32(cmd, &devcmd->cmd); | |
350 | ||
351 | if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) | |
352 | return 0; | |
353 | ||
354 | for (delay = 0; delay < wait; delay++) { | |
355 | ||
356 | udelay(100); | |
357 | ||
358 | status = ioread32(&devcmd->status); | |
359 | if (status == 0xFFFFFFFF) { | |
360 | /* PCI-e target device is gone */ | |
361 | return -ENODEV; | |
362 | } | |
363 | ||
364 | if (!(status & STAT_BUSY)) { | |
365 | if (status & STAT_ERROR) { | |
366 | err = -(int)readq(&devcmd->args[0]); | |
367 | if (cmd != CMD_CAPABILITY) | |
368 | pr_err("Devcmd %d failed " \ | |
369 | "with error code %d\n", | |
370 | _CMD_N(cmd), err); | |
371 | return err; | |
372 | } | |
373 | ||
374 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { | |
375 | rmb();/* finish all reads initiated till now */ | |
376 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) | |
377 | vdev->args[i] = readq(&devcmd->args[i]); | |
378 | } | |
379 | ||
380 | return 0; | |
381 | } | |
382 | } | |
383 | ||
384 | pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); | |
385 | return -ETIMEDOUT; | |
386 | } | |
387 | ||
388 | static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, | |
389 | enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, | |
390 | u64 *a0, u64 *a1, int wait) | |
391 | { | |
392 | u32 status; | |
393 | int err; | |
394 | ||
395 | memset(vdev->args, 0, sizeof(vdev->args)); | |
396 | ||
397 | vdev->args[0] = vdev->proxy_index; | |
398 | vdev->args[1] = cmd; | |
399 | vdev->args[2] = *a0; | |
400 | vdev->args[3] = *a1; | |
401 | ||
402 | err = _vnic_dev_cmd(vdev, proxy_cmd, wait); | |
403 | if (err) | |
404 | return err; | |
405 | ||
406 | status = (u32)vdev->args[0]; | |
407 | if (status & STAT_ERROR) { | |
408 | err = (int)vdev->args[1]; | |
409 | if (err != ERR_ECMDUNKNOWN || | |
410 | cmd != CMD_CAPABILITY) | |
411 | pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); | |
412 | return err; | |
413 | } | |
414 | ||
415 | *a0 = vdev->args[1]; | |
416 | *a1 = vdev->args[2]; | |
417 | ||
418 | return 0; | |
419 | } | |
420 | ||
421 | static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, | |
422 | enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) | |
423 | { | |
424 | int err; | |
425 | ||
426 | vdev->args[0] = *a0; | |
427 | vdev->args[1] = *a1; | |
428 | ||
429 | err = _vnic_dev_cmd(vdev, cmd, wait); | |
430 | ||
431 | *a0 = vdev->args[0]; | |
432 | *a1 = vdev->args[1]; | |
433 | ||
434 | return err; | |
435 | } | |
436 | ||
437 | void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index) | |
438 | { | |
439 | vdev->proxy = PROXY_BY_INDEX; | |
440 | vdev->proxy_index = index; | |
441 | } | |
442 | ||
443 | void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf) | |
444 | { | |
445 | vdev->proxy = PROXY_BY_BDF; | |
446 | vdev->proxy_index = bdf; | |
447 | } | |
448 | ||
449 | void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) | |
450 | { | |
451 | vdev->proxy = PROXY_NONE; | |
452 | vdev->proxy_index = 0; | |
453 | } | |
454 | ||
455 | int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |
456 | u64 *a0, u64 *a1, int wait) | |
457 | { | |
458 | memset(vdev->args, 0, sizeof(vdev->args)); | |
459 | ||
460 | switch (vdev->proxy) { | |
461 | case PROXY_BY_INDEX: | |
462 | return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, | |
463 | a0, a1, wait); | |
464 | case PROXY_BY_BDF: | |
465 | return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, | |
466 | a0, a1, wait); | |
467 | case PROXY_NONE: | |
468 | default: | |
469 | return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); | |
470 | } | |
471 | } | |
472 | ||
473 | int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) | |
474 | { | |
475 | u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0; | |
476 | int wait = 1000; | |
477 | int err; | |
478 | ||
479 | err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); | |
480 | if (err) | |
481 | return 0; | |
482 | return (a1 >= (u32)FILTER_DPDK_1); | |
483 | } | |
484 | ||
485 | static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) | |
486 | { | |
487 | u64 a0 = (u32)cmd, a1 = 0; | |
488 | int wait = 1000; | |
489 | int err; | |
490 | ||
491 | err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); | |
492 | ||
493 | return !(err || a0); | |
494 | } | |
495 | ||
496 | int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, | |
497 | void *value) | |
498 | { | |
499 | u64 a0, a1; | |
500 | int wait = 1000; | |
501 | int err; | |
502 | ||
503 | a0 = offset; | |
504 | a1 = size; | |
505 | ||
506 | err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); | |
507 | ||
508 | switch (size) { | |
509 | case 1: | |
510 | *(u8 *)value = (u8)a0; | |
511 | break; | |
512 | case 2: | |
513 | *(u16 *)value = (u16)a0; | |
514 | break; | |
515 | case 4: | |
516 | *(u32 *)value = (u32)a0; | |
517 | break; | |
518 | case 8: | |
519 | *(u64 *)value = a0; | |
520 | break; | |
521 | default: | |
522 | BUG(); | |
523 | break; | |
524 | } | |
525 | ||
526 | return err; | |
527 | } | |
528 | ||
529 | int vnic_dev_stats_clear(struct vnic_dev *vdev) | |
530 | { | |
531 | u64 a0 = 0, a1 = 0; | |
532 | int wait = 1000; | |
533 | ||
534 | return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); | |
535 | } | |
536 | ||
537 | int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) | |
538 | { | |
539 | u64 a0, a1; | |
540 | int wait = 1000; | |
541 | static u32 instance; | |
542 | char name[NAME_MAX]; | |
543 | ||
544 | if (!vdev->stats) { | |
545 | snprintf((char *)name, sizeof(name), | |
546 | "vnic_stats-%d", instance++); | |
547 | vdev->stats = vdev->alloc_consistent(vdev->priv, | |
548 | sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name); | |
549 | if (!vdev->stats) | |
550 | return -ENOMEM; | |
551 | } | |
552 | ||
553 | *stats = vdev->stats; | |
554 | a0 = vdev->stats_pa; | |
555 | a1 = sizeof(struct vnic_stats); | |
556 | ||
557 | return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); | |
558 | } | |
559 | ||
560 | int vnic_dev_close(struct vnic_dev *vdev) | |
561 | { | |
562 | u64 a0 = 0, a1 = 0; | |
563 | int wait = 1000; | |
564 | ||
565 | return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); | |
566 | } | |
567 | ||
568 | /** Deprecated. @see vnic_dev_enable_wait */ | |
569 | int vnic_dev_enable(struct vnic_dev *vdev) | |
570 | { | |
571 | u64 a0 = 0, a1 = 0; | |
572 | int wait = 1000; | |
573 | ||
574 | return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); | |
575 | } | |
576 | ||
577 | int vnic_dev_enable_wait(struct vnic_dev *vdev) | |
578 | { | |
579 | u64 a0 = 0, a1 = 0; | |
580 | int wait = 1000; | |
581 | ||
582 | if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) | |
583 | return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); | |
584 | else | |
585 | return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); | |
586 | } | |
587 | ||
588 | int vnic_dev_disable(struct vnic_dev *vdev) | |
589 | { | |
590 | u64 a0 = 0, a1 = 0; | |
591 | int wait = 1000; | |
592 | ||
593 | return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); | |
594 | } | |
595 | ||
596 | int vnic_dev_open(struct vnic_dev *vdev, int arg) | |
597 | { | |
598 | u64 a0 = (u32)arg, a1 = 0; | |
599 | int wait = 1000; | |
600 | ||
601 | return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); | |
602 | } | |
603 | ||
604 | int vnic_dev_open_done(struct vnic_dev *vdev, int *done) | |
605 | { | |
606 | u64 a0 = 0, a1 = 0; | |
607 | int wait = 1000; | |
608 | int err; | |
609 | ||
610 | *done = 0; | |
611 | ||
612 | err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); | |
613 | if (err) | |
614 | return err; | |
615 | ||
616 | *done = (a0 == 0); | |
617 | ||
618 | return 0; | |
619 | } | |
620 | ||
621 | int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) | |
622 | { | |
623 | u64 a0 = (u32)arg, a1 = 0; | |
624 | int wait = 1000; | |
625 | ||
626 | return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); | |
627 | } | |
628 | ||
629 | int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) | |
630 | { | |
631 | u64 a0 = 0, a1 = 0; | |
632 | int wait = 1000; | |
633 | int err; | |
634 | ||
635 | *done = 0; | |
636 | ||
637 | err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); | |
638 | if (err) | |
639 | return err; | |
640 | ||
641 | *done = (a0 == 0); | |
642 | ||
643 | return 0; | |
644 | } | |
645 | ||
646 | int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) | |
647 | { | |
648 | u64 a0, a1 = 0; | |
649 | int wait = 1000; | |
650 | int err, i; | |
651 | ||
652 | for (i = 0; i < ETH_ALEN; i++) | |
653 | mac_addr[i] = 0; | |
654 | ||
655 | err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); | |
656 | if (err) | |
657 | return err; | |
658 | ||
659 | for (i = 0; i < ETH_ALEN; i++) | |
660 | mac_addr[i] = ((u8 *)&a0)[i]; | |
661 | ||
662 | return 0; | |
663 | } | |
664 | ||
665 | int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, | |
666 | int broadcast, int promisc, int allmulti) | |
667 | { | |
668 | u64 a0, a1 = 0; | |
669 | int wait = 1000; | |
670 | int err; | |
671 | ||
672 | a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | | |
673 | (multicast ? CMD_PFILTER_MULTICAST : 0) | | |
674 | (broadcast ? CMD_PFILTER_BROADCAST : 0) | | |
675 | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | | |
676 | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); | |
677 | ||
678 | err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); | |
679 | if (err) | |
680 | pr_err("Can't set packet filter\n"); | |
681 | ||
682 | return err; | |
683 | } | |
684 | ||
685 | int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) | |
686 | { | |
687 | u64 a0 = 0, a1 = 0; | |
688 | int wait = 1000; | |
689 | int err; | |
690 | int i; | |
691 | ||
692 | for (i = 0; i < ETH_ALEN; i++) | |
693 | ((u8 *)&a0)[i] = addr[i]; | |
694 | ||
695 | err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); | |
696 | if (err) | |
697 | pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", | |
698 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], | |
699 | err); | |
700 | ||
701 | return err; | |
702 | } | |
703 | ||
704 | int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) | |
705 | { | |
706 | u64 a0 = 0, a1 = 0; | |
707 | int wait = 1000; | |
708 | int err; | |
709 | int i; | |
710 | ||
711 | for (i = 0; i < ETH_ALEN; i++) | |
712 | ((u8 *)&a0)[i] = addr[i]; | |
713 | ||
714 | err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); | |
715 | if (err) | |
716 | pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", | |
717 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], | |
718 | err); | |
719 | ||
720 | return err; | |
721 | } | |
722 | ||
723 | int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, | |
724 | u8 ig_vlan_rewrite_mode) | |
725 | { | |
726 | u64 a0 = ig_vlan_rewrite_mode, a1 = 0; | |
727 | int wait = 1000; | |
728 | ||
729 | if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) | |
730 | return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, | |
731 | &a0, &a1, wait); | |
732 | else | |
733 | return 0; | |
734 | } | |
735 | ||
736 | int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) | |
737 | { | |
738 | u64 a0 = intr, a1 = 0; | |
739 | int wait = 1000; | |
740 | int err; | |
741 | ||
742 | err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait); | |
743 | if (err) | |
744 | pr_err("Failed to raise INTR[%d], err %d\n", intr, err); | |
745 | ||
746 | return err; | |
747 | } | |
748 | ||
749 | void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) | |
750 | { | |
751 | vdev->in_reset = state; | |
752 | } | |
753 | ||
754 | static inline int vnic_dev_in_reset(struct vnic_dev *vdev) | |
755 | { | |
756 | return vdev->in_reset; | |
757 | } | |
758 | ||
759 | int vnic_dev_notify_setcmd(struct vnic_dev *vdev, | |
760 | void *notify_addr, dma_addr_t notify_pa, u16 intr) | |
761 | { | |
762 | u64 a0, a1; | |
763 | int wait = 1000; | |
764 | int r; | |
765 | ||
766 | memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); | |
767 | if (!vnic_dev_in_reset(vdev)) { | |
768 | vdev->notify = notify_addr; | |
769 | vdev->notify_pa = notify_pa; | |
770 | } | |
771 | ||
772 | a0 = (u64)notify_pa; | |
773 | a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; | |
774 | a1 += sizeof(struct vnic_devcmd_notify); | |
775 | ||
776 | r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | |
777 | if (!vnic_dev_in_reset(vdev)) | |
778 | vdev->notify_sz = (r == 0) ? (u32)a1 : 0; | |
779 | ||
780 | return r; | |
781 | } | |
782 | ||
783 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) | |
784 | { | |
785 | void *notify_addr = NULL; | |
786 | dma_addr_t notify_pa = 0; | |
787 | char name[NAME_MAX]; | |
788 | static u32 instance; | |
789 | ||
790 | if (vdev->notify || vdev->notify_pa) { | |
791 | return vnic_dev_notify_setcmd(vdev, vdev->notify, | |
792 | vdev->notify_pa, intr); | |
793 | } | |
794 | if (!vnic_dev_in_reset(vdev)) { | |
795 | snprintf((char *)name, sizeof(name), | |
796 | "vnic_notify-%d", instance++); | |
797 | notify_addr = vdev->alloc_consistent(vdev->priv, | |
798 | sizeof(struct vnic_devcmd_notify), | |
799 | ¬ify_pa, (u8 *)name); | |
800 | if (!notify_addr) | |
801 | return -ENOMEM; | |
802 | } | |
803 | ||
804 | return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); | |
805 | } | |
806 | ||
807 | int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) | |
808 | { | |
809 | u64 a0, a1; | |
810 | int wait = 1000; | |
811 | int err; | |
812 | ||
813 | a0 = 0; /* paddr = 0 to unset notify buffer */ | |
814 | a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ | |
815 | a1 += sizeof(struct vnic_devcmd_notify); | |
816 | ||
817 | err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | |
818 | if (!vnic_dev_in_reset(vdev)) { | |
819 | vdev->notify = NULL; | |
820 | vdev->notify_pa = 0; | |
821 | vdev->notify_sz = 0; | |
822 | } | |
823 | ||
824 | return err; | |
825 | } | |
826 | ||
827 | int vnic_dev_notify_unset(struct vnic_dev *vdev) | |
828 | { | |
829 | if (vdev->notify && !vnic_dev_in_reset(vdev)) { | |
830 | vdev->free_consistent(vdev->priv, | |
831 | sizeof(struct vnic_devcmd_notify), | |
832 | vdev->notify, | |
833 | vdev->notify_pa); | |
834 | } | |
835 | ||
836 | return vnic_dev_notify_unsetcmd(vdev); | |
837 | } | |
838 | ||
839 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) | |
840 | { | |
841 | u32 *words; | |
842 | unsigned int nwords = vdev->notify_sz / 4; | |
843 | unsigned int i; | |
844 | u32 csum; | |
845 | ||
846 | if (!vdev->notify || !vdev->notify_sz) | |
847 | return 0; | |
848 | ||
849 | do { | |
850 | csum = 0; | |
851 | rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); | |
852 | words = (u32 *)&vdev->notify_copy; | |
853 | for (i = 1; i < nwords; i++) | |
854 | csum += words[i]; | |
855 | } while (csum != words[0]); | |
856 | ||
857 | return 1; | |
858 | } | |
859 | ||
860 | int vnic_dev_init(struct vnic_dev *vdev, int arg) | |
861 | { | |
862 | u64 a0 = (u32)arg, a1 = 0; | |
863 | int wait = 1000; | |
864 | int r = 0; | |
865 | ||
866 | if (vnic_dev_capable(vdev, CMD_INIT)) | |
867 | r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); | |
868 | else { | |
869 | vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); | |
870 | if (a0 & CMD_INITF_DEFAULT_MAC) { | |
871 | /* Emulate these for old CMD_INIT_v1 which | |
872 | * didn't pass a0 so no CMD_INITF_*. | |
873 | */ | |
874 | vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); | |
875 | vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); | |
876 | } | |
877 | } | |
878 | return r; | |
879 | } | |
880 | ||
881 | int vnic_dev_deinit(struct vnic_dev *vdev) | |
882 | { | |
883 | u64 a0 = 0, a1 = 0; | |
884 | int wait = 1000; | |
885 | ||
886 | return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait); | |
887 | } | |
888 | ||
889 | void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) | |
890 | { | |
891 | /* Default: hardware intr coal timer is in units of 1.5 usecs */ | |
892 | vdev->intr_coal_timer_info.mul = 2; | |
893 | vdev->intr_coal_timer_info.div = 3; | |
894 | vdev->intr_coal_timer_info.max_usec = | |
895 | vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); | |
896 | } | |
897 | ||
898 | int vnic_dev_link_status(struct vnic_dev *vdev) | |
899 | { | |
900 | if (!vnic_dev_notify_ready(vdev)) | |
901 | return 0; | |
902 | ||
903 | return vdev->notify_copy.link_state; | |
904 | } | |
905 | ||
906 | u32 vnic_dev_port_speed(struct vnic_dev *vdev) | |
907 | { | |
908 | if (!vnic_dev_notify_ready(vdev)) | |
909 | return 0; | |
910 | ||
911 | return vdev->notify_copy.port_speed; | |
912 | } | |
913 | ||
914 | void vnic_dev_set_intr_mode(struct vnic_dev *vdev, | |
915 | enum vnic_dev_intr_mode intr_mode) | |
916 | { | |
917 | vdev->intr_mode = intr_mode; | |
918 | } | |
919 | ||
920 | enum vnic_dev_intr_mode vnic_dev_get_intr_mode( | |
921 | struct vnic_dev *vdev) | |
922 | { | |
923 | return vdev->intr_mode; | |
924 | } | |
925 | ||
926 | u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) | |
927 | { | |
928 | return (usec * vdev->intr_coal_timer_info.mul) / | |
929 | vdev->intr_coal_timer_info.div; | |
930 | } | |
931 | ||
932 | u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) | |
933 | { | |
934 | return (hw_cycles * vdev->intr_coal_timer_info.div) / | |
935 | vdev->intr_coal_timer_info.mul; | |
936 | } | |
937 | ||
938 | u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) | |
939 | { | |
940 | return vdev->intr_coal_timer_info.max_usec; | |
941 | } | |
942 | ||
943 | void vnic_dev_unregister(struct vnic_dev *vdev) | |
944 | { | |
945 | if (vdev) { | |
946 | if (vdev->notify) | |
947 | vdev->free_consistent(vdev->priv, | |
948 | sizeof(struct vnic_devcmd_notify), | |
949 | vdev->notify, | |
950 | vdev->notify_pa); | |
951 | if (vdev->stats) | |
952 | vdev->free_consistent(vdev->priv, | |
953 | sizeof(struct vnic_stats), | |
954 | vdev->stats, vdev->stats_pa); | |
955 | if (vdev->fw_info) | |
956 | vdev->free_consistent(vdev->priv, | |
957 | sizeof(struct vnic_devcmd_fw_info), | |
958 | vdev->fw_info, vdev->fw_info_pa); | |
959 | kfree(vdev); | |
960 | } | |
961 | } | |
962 | ||
963 | struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, | |
964 | void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, | |
965 | unsigned int num_bars) | |
966 | { | |
967 | if (!vdev) { | |
968 | vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); | |
969 | if (!vdev) | |
970 | return NULL; | |
971 | } | |
972 | ||
973 | vdev->priv = priv; | |
974 | vdev->pdev = pdev; | |
975 | ||
976 | if (vnic_dev_discover_res(vdev, bar, num_bars)) | |
977 | goto err_out; | |
978 | ||
979 | vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); | |
980 | if (!vdev->devcmd) | |
981 | goto err_out; | |
982 | ||
983 | return vdev; | |
984 | ||
985 | err_out: | |
986 | vnic_dev_unregister(vdev); | |
987 | return NULL; | |
988 | } | |
989 | ||
990 | struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev) | |
991 | { | |
992 | return vdev->pdev; | |
993 | } | |
994 | ||
995 | int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) | |
996 | { | |
997 | u64 a0, a1 = 0; | |
998 | int wait = 1000; | |
999 | int i; | |
1000 | ||
1001 | for (i = 0; i < ETH_ALEN; i++) | |
1002 | ((u8 *)&a0)[i] = mac_addr[i]; | |
1003 | ||
1004 | return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait); | |
1005 | } | |
1006 | ||
1007 | /* | |
1008 | * vnic_dev_classifier: Add/Delete classifier entries | |
1009 | * @vdev: vdev of the device | |
1010 | * @cmd: CLSF_ADD for Add filter | |
1011 | * CLSF_DEL for Delete filter | |
1012 | * @entry: In case of ADD filter, the caller passes the RQ number in this | |
1013 | * variable. | |
1014 | * This function stores the filter_id returned by the | |
1015 | * firmware in the same variable before return; | |
1016 | * | |
1017 | * In case of DEL filter, the caller passes the RQ number. Return | |
1018 | * value is irrelevant. | |
1019 | * @data: filter data | |
1020 | */ | |
1021 | int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, | |
1022 | struct filter_v2 *data) | |
1023 | { | |
1024 | u64 a0, a1; | |
1025 | int wait = 1000; | |
1026 | dma_addr_t tlv_pa; | |
1027 | int ret = -EINVAL; | |
1028 | struct filter_tlv *tlv, *tlv_va; | |
1029 | struct filter_action *action; | |
1030 | u64 tlv_size; | |
1031 | u32 filter_size; | |
1032 | static unsigned int unique_id; | |
1033 | char z_name[RTE_MEMZONE_NAMESIZE]; | |
1034 | enum vnic_devcmd_cmd dev_cmd; | |
1035 | ||
1036 | ||
1037 | if (cmd == CLSF_ADD) { | |
1038 | if (data->type == FILTER_DPDK_1) | |
1039 | dev_cmd = CMD_ADD_ADV_FILTER; | |
1040 | else | |
1041 | dev_cmd = CMD_ADD_FILTER; | |
1042 | ||
1043 | filter_size = vnic_filter_size(data); | |
1044 | tlv_size = filter_size + | |
1045 | sizeof(struct filter_action) + | |
1046 | 2*sizeof(struct filter_tlv); | |
1047 | snprintf((char *)z_name, sizeof(z_name), | |
1048 | "vnic_clsf_%d", unique_id++); | |
1049 | tlv_va = vdev->alloc_consistent(vdev->priv, | |
1050 | tlv_size, &tlv_pa, (u8 *)z_name); | |
1051 | if (!tlv_va) | |
1052 | return -ENOMEM; | |
1053 | tlv = tlv_va; | |
1054 | a0 = tlv_pa; | |
1055 | a1 = tlv_size; | |
1056 | memset(tlv, 0, tlv_size); | |
1057 | tlv->type = CLSF_TLV_FILTER; | |
1058 | tlv->length = filter_size; | |
1059 | memcpy(&tlv->val, (void *)data, filter_size); | |
1060 | ||
1061 | tlv = (struct filter_tlv *)((char *)tlv + | |
1062 | sizeof(struct filter_tlv) + | |
1063 | filter_size); | |
1064 | ||
1065 | tlv->type = CLSF_TLV_ACTION; | |
1066 | tlv->length = sizeof(struct filter_action); | |
1067 | action = (struct filter_action *)&tlv->val; | |
1068 | action->type = FILTER_ACTION_RQ_STEERING; | |
1069 | action->u.rq_idx = *entry; | |
1070 | ||
1071 | ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); | |
1072 | *entry = (u16)a0; | |
1073 | vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); | |
1074 | } else if (cmd == CLSF_DEL) { | |
1075 | a0 = *entry; | |
1076 | ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); | |
1077 | } | |
1078 | ||
1079 | return ret; | |
1080 | } |