]>
Commit | Line | Data |
---|---|---|
4b8ac966 JP |
1 | /* |
2 | * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver | |
11ce2ba3 | 3 | * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> |
4b8ac966 JP |
4 | * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/wait.h> | |
18 | #include <linux/spinlock.h> | |
4b8ac966 JP |
19 | #include <linux/sort.h> |
20 | #include <linux/random.h> | |
21 | #include <linux/netdevice.h> | |
4b8ac966 JP |
22 | #include <linux/skbuff.h> |
23 | #include <linux/socket.h> | |
24 | #include <linux/etherdevice.h> | |
25 | #include <linux/ethtool.h> | |
26 | #include <linux/if_ether.h> | |
27 | #include <linux/if_vlan.h> | |
6c707945 | 28 | #include <linux/if_bridge.h> |
9f6bbf7c | 29 | #include <linux/bitops.h> |
db19170b | 30 | #include <linux/ctype.h> |
c1bb279c | 31 | #include <linux/workqueue.h> |
4b8ac966 JP |
32 | #include <net/switchdev.h> |
33 | #include <net/rtnetlink.h> | |
c1beeef7 SF |
34 | #include <net/netevent.h> |
35 | #include <net/arp.h> | |
2f8e2c87 | 36 | #include <linux/io-64-nonatomic-lo-hi.h> |
4b8ac966 JP |
37 | #include <generated/utsrelease.h> |
38 | ||
0fe685f6 | 39 | #include "rocker_hw.h" |
de152192 JP |
40 | #include "rocker.h" |
41 | #include "rocker_tlv.h" | |
4b8ac966 JP |
42 | |
43 | static const char rocker_driver_name[] = "rocker"; | |
44 | ||
45 | static const struct pci_device_id rocker_pci_id_table[] = { | |
46 | {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, | |
47 | {0, } | |
48 | }; | |
49 | ||
4b8ac966 JP |
50 | struct rocker_wait { |
51 | wait_queue_head_t wait; | |
52 | bool done; | |
179f9a25 | 53 | bool nowait; |
4b8ac966 JP |
54 | }; |
55 | ||
56 | static void rocker_wait_reset(struct rocker_wait *wait) | |
57 | { | |
58 | wait->done = false; | |
179f9a25 | 59 | wait->nowait = false; |
4b8ac966 JP |
60 | } |
61 | ||
62 | static void rocker_wait_init(struct rocker_wait *wait) | |
63 | { | |
64 | init_waitqueue_head(&wait->wait); | |
65 | rocker_wait_reset(wait); | |
66 | } | |
67 | ||
ca0a5f2a | 68 | static struct rocker_wait *rocker_wait_create(void) |
4b8ac966 JP |
69 | { |
70 | struct rocker_wait *wait; | |
71 | ||
ca0a5f2a | 72 | wait = kzalloc(sizeof(*wait), GFP_KERNEL); |
4b8ac966 JP |
73 | if (!wait) |
74 | return NULL; | |
4b8ac966 JP |
75 | return wait; |
76 | } | |
77 | ||
ca0a5f2a | 78 | static void rocker_wait_destroy(struct rocker_wait *wait) |
4b8ac966 | 79 | { |
ca0a5f2a | 80 | kfree(wait); |
4b8ac966 JP |
81 | } |
82 | ||
83 | static bool rocker_wait_event_timeout(struct rocker_wait *wait, | |
84 | unsigned long timeout) | |
85 | { | |
86 | wait_event_timeout(wait->wait, wait->done, HZ / 10); | |
87 | if (!wait->done) | |
88 | return false; | |
89 | return true; | |
90 | } | |
91 | ||
92 | static void rocker_wait_wake_up(struct rocker_wait *wait) | |
93 | { | |
94 | wait->done = true; | |
95 | wake_up(&wait->wait); | |
96 | } | |
97 | ||
e5054643 | 98 | static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) |
4b8ac966 JP |
99 | { |
100 | return rocker->msix_entries[vector].vector; | |
101 | } | |
102 | ||
e5054643 | 103 | static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) |
4b8ac966 JP |
104 | { |
105 | return rocker_msix_vector(rocker_port->rocker, | |
106 | ROCKER_MSIX_VEC_TX(rocker_port->port_number)); | |
107 | } | |
108 | ||
e5054643 | 109 | static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) |
4b8ac966 JP |
110 | { |
111 | return rocker_msix_vector(rocker_port->rocker, | |
112 | ROCKER_MSIX_VEC_RX(rocker_port->port_number)); | |
113 | } | |
114 | ||
115 | #define rocker_write32(rocker, reg, val) \ | |
116 | writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) | |
117 | #define rocker_read32(rocker, reg) \ | |
118 | readl((rocker)->hw_addr + (ROCKER_ ## reg)) | |
119 | #define rocker_write64(rocker, reg, val) \ | |
120 | writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) | |
121 | #define rocker_read64(rocker, reg) \ | |
122 | readq((rocker)->hw_addr + (ROCKER_ ## reg)) | |
123 | ||
124 | /***************************** | |
125 | * HW basic testing functions | |
126 | *****************************/ | |
127 | ||
e5054643 | 128 | static int rocker_reg_test(const struct rocker *rocker) |
4b8ac966 | 129 | { |
e5054643 | 130 | const struct pci_dev *pdev = rocker->pdev; |
4b8ac966 JP |
131 | u64 test_reg; |
132 | u64 rnd; | |
133 | ||
134 | rnd = prandom_u32(); | |
135 | rnd >>= 1; | |
136 | rocker_write32(rocker, TEST_REG, rnd); | |
137 | test_reg = rocker_read32(rocker, TEST_REG); | |
138 | if (test_reg != rnd * 2) { | |
139 | dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", | |
140 | test_reg, rnd * 2); | |
141 | return -EIO; | |
142 | } | |
143 | ||
144 | rnd = prandom_u32(); | |
145 | rnd <<= 31; | |
146 | rnd |= prandom_u32(); | |
147 | rocker_write64(rocker, TEST_REG64, rnd); | |
148 | test_reg = rocker_read64(rocker, TEST_REG64); | |
149 | if (test_reg != rnd * 2) { | |
150 | dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", | |
151 | test_reg, rnd * 2); | |
152 | return -EIO; | |
153 | } | |
154 | ||
155 | return 0; | |
156 | } | |
157 | ||
e5054643 SH |
158 | static int rocker_dma_test_one(const struct rocker *rocker, |
159 | struct rocker_wait *wait, u32 test_type, | |
160 | dma_addr_t dma_handle, const unsigned char *buf, | |
161 | const unsigned char *expect, size_t size) | |
4b8ac966 | 162 | { |
e5054643 | 163 | const struct pci_dev *pdev = rocker->pdev; |
4b8ac966 JP |
164 | int i; |
165 | ||
166 | rocker_wait_reset(wait); | |
167 | rocker_write32(rocker, TEST_DMA_CTRL, test_type); | |
168 | ||
169 | if (!rocker_wait_event_timeout(wait, HZ / 10)) { | |
170 | dev_err(&pdev->dev, "no interrupt received within a timeout\n"); | |
171 | return -EIO; | |
172 | } | |
173 | ||
174 | for (i = 0; i < size; i++) { | |
175 | if (buf[i] != expect[i]) { | |
176 | dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", | |
177 | buf[i], i, expect[i]); | |
178 | return -EIO; | |
179 | } | |
180 | } | |
181 | return 0; | |
182 | } | |
183 | ||
184 | #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) | |
185 | #define ROCKER_TEST_DMA_FILL_PATTERN 0x96 | |
186 | ||
e5054643 | 187 | static int rocker_dma_test_offset(const struct rocker *rocker, |
4b8ac966 JP |
188 | struct rocker_wait *wait, int offset) |
189 | { | |
190 | struct pci_dev *pdev = rocker->pdev; | |
191 | unsigned char *alloc; | |
192 | unsigned char *buf; | |
193 | unsigned char *expect; | |
194 | dma_addr_t dma_handle; | |
195 | int i; | |
196 | int err; | |
197 | ||
198 | alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, | |
199 | GFP_KERNEL | GFP_DMA); | |
200 | if (!alloc) | |
201 | return -ENOMEM; | |
202 | buf = alloc + offset; | |
203 | expect = buf + ROCKER_TEST_DMA_BUF_SIZE; | |
204 | ||
205 | dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, | |
206 | PCI_DMA_BIDIRECTIONAL); | |
207 | if (pci_dma_mapping_error(pdev, dma_handle)) { | |
208 | err = -EIO; | |
209 | goto free_alloc; | |
210 | } | |
211 | ||
212 | rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); | |
213 | rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); | |
214 | ||
215 | memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); | |
216 | err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, | |
217 | dma_handle, buf, expect, | |
218 | ROCKER_TEST_DMA_BUF_SIZE); | |
219 | if (err) | |
220 | goto unmap; | |
221 | ||
222 | memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); | |
223 | err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, | |
224 | dma_handle, buf, expect, | |
225 | ROCKER_TEST_DMA_BUF_SIZE); | |
226 | if (err) | |
227 | goto unmap; | |
228 | ||
229 | prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); | |
230 | for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) | |
231 | expect[i] = ~buf[i]; | |
232 | err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, | |
233 | dma_handle, buf, expect, | |
234 | ROCKER_TEST_DMA_BUF_SIZE); | |
235 | if (err) | |
236 | goto unmap; | |
237 | ||
238 | unmap: | |
239 | pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, | |
240 | PCI_DMA_BIDIRECTIONAL); | |
241 | free_alloc: | |
242 | kfree(alloc); | |
243 | ||
244 | return err; | |
245 | } | |
246 | ||
e5054643 SH |
247 | static int rocker_dma_test(const struct rocker *rocker, |
248 | struct rocker_wait *wait) | |
4b8ac966 JP |
249 | { |
250 | int i; | |
251 | int err; | |
252 | ||
253 | for (i = 0; i < 8; i++) { | |
254 | err = rocker_dma_test_offset(rocker, wait, i); | |
255 | if (err) | |
256 | return err; | |
257 | } | |
258 | return 0; | |
259 | } | |
260 | ||
261 | static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) | |
262 | { | |
263 | struct rocker_wait *wait = dev_id; | |
264 | ||
265 | rocker_wait_wake_up(wait); | |
266 | ||
267 | return IRQ_HANDLED; | |
268 | } | |
269 | ||
e5054643 | 270 | static int rocker_basic_hw_test(const struct rocker *rocker) |
4b8ac966 | 271 | { |
e5054643 | 272 | const struct pci_dev *pdev = rocker->pdev; |
4b8ac966 JP |
273 | struct rocker_wait wait; |
274 | int err; | |
275 | ||
276 | err = rocker_reg_test(rocker); | |
277 | if (err) { | |
278 | dev_err(&pdev->dev, "reg test failed\n"); | |
279 | return err; | |
280 | } | |
281 | ||
282 | err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), | |
283 | rocker_test_irq_handler, 0, | |
284 | rocker_driver_name, &wait); | |
285 | if (err) { | |
286 | dev_err(&pdev->dev, "cannot assign test irq\n"); | |
287 | return err; | |
288 | } | |
289 | ||
290 | rocker_wait_init(&wait); | |
291 | rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); | |
292 | ||
293 | if (!rocker_wait_event_timeout(&wait, HZ / 10)) { | |
294 | dev_err(&pdev->dev, "no interrupt received within a timeout\n"); | |
295 | err = -EIO; | |
296 | goto free_irq; | |
297 | } | |
298 | ||
299 | err = rocker_dma_test(rocker, &wait); | |
300 | if (err) | |
301 | dev_err(&pdev->dev, "dma test failed\n"); | |
302 | ||
303 | free_irq: | |
304 | free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); | |
305 | return err; | |
306 | } | |
307 | ||
4b8ac966 JP |
308 | /****************************************** |
309 | * DMA rings and descriptors manipulations | |
310 | ******************************************/ | |
311 | ||
312 | static u32 __pos_inc(u32 pos, size_t limit) | |
313 | { | |
314 | return ++pos == limit ? 0 : pos; | |
315 | } | |
316 | ||
e5054643 | 317 | static int rocker_desc_err(const struct rocker_desc_info *desc_info) |
4b8ac966 | 318 | { |
7eb344f8 SF |
319 | int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; |
320 | ||
321 | switch (err) { | |
322 | case ROCKER_OK: | |
323 | return 0; | |
324 | case -ROCKER_ENOENT: | |
325 | return -ENOENT; | |
326 | case -ROCKER_ENXIO: | |
327 | return -ENXIO; | |
328 | case -ROCKER_ENOMEM: | |
329 | return -ENOMEM; | |
330 | case -ROCKER_EEXIST: | |
331 | return -EEXIST; | |
332 | case -ROCKER_EINVAL: | |
333 | return -EINVAL; | |
334 | case -ROCKER_EMSGSIZE: | |
335 | return -EMSGSIZE; | |
336 | case -ROCKER_ENOTSUP: | |
337 | return -EOPNOTSUPP; | |
338 | case -ROCKER_ENOBUFS: | |
339 | return -ENOBUFS; | |
340 | } | |
341 | ||
342 | return -EINVAL; | |
4b8ac966 JP |
343 | } |
344 | ||
e5054643 | 345 | static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) |
4b8ac966 JP |
346 | { |
347 | desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; | |
348 | } | |
349 | ||
e5054643 | 350 | static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) |
4b8ac966 JP |
351 | { |
352 | u32 comp_err = desc_info->desc->comp_err; | |
353 | ||
354 | return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; | |
355 | } | |
356 | ||
11ce2ba3 JP |
357 | static void * |
358 | rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) | |
4b8ac966 | 359 | { |
adedf37b | 360 | return (void *)(uintptr_t)desc_info->desc->cookie; |
4b8ac966 JP |
361 | } |
362 | ||
e5054643 | 363 | static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, |
4b8ac966 JP |
364 | void *ptr) |
365 | { | |
adedf37b | 366 | desc_info->desc->cookie = (uintptr_t) ptr; |
4b8ac966 JP |
367 | } |
368 | ||
369 | static struct rocker_desc_info * | |
e5054643 | 370 | rocker_desc_head_get(const struct rocker_dma_ring_info *info) |
4b8ac966 JP |
371 | { |
372 | static struct rocker_desc_info *desc_info; | |
373 | u32 head = __pos_inc(info->head, info->size); | |
374 | ||
375 | desc_info = &info->desc_info[info->head]; | |
376 | if (head == info->tail) | |
377 | return NULL; /* ring full */ | |
378 | desc_info->tlv_size = 0; | |
379 | return desc_info; | |
380 | } | |
381 | ||
e5054643 | 382 | static void rocker_desc_commit(const struct rocker_desc_info *desc_info) |
4b8ac966 JP |
383 | { |
384 | desc_info->desc->buf_size = desc_info->data_size; | |
385 | desc_info->desc->tlv_size = desc_info->tlv_size; | |
386 | } | |
387 | ||
e5054643 | 388 | static void rocker_desc_head_set(const struct rocker *rocker, |
4b8ac966 | 389 | struct rocker_dma_ring_info *info, |
e5054643 | 390 | const struct rocker_desc_info *desc_info) |
4b8ac966 JP |
391 | { |
392 | u32 head = __pos_inc(info->head, info->size); | |
393 | ||
394 | BUG_ON(head == info->tail); | |
395 | rocker_desc_commit(desc_info); | |
396 | info->head = head; | |
397 | rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); | |
398 | } | |
399 | ||
400 | static struct rocker_desc_info * | |
401 | rocker_desc_tail_get(struct rocker_dma_ring_info *info) | |
402 | { | |
403 | static struct rocker_desc_info *desc_info; | |
404 | ||
405 | if (info->tail == info->head) | |
406 | return NULL; /* nothing to be done between head and tail */ | |
407 | desc_info = &info->desc_info[info->tail]; | |
408 | if (!rocker_desc_gen(desc_info)) | |
409 | return NULL; /* gen bit not set, desc is not ready yet */ | |
410 | info->tail = __pos_inc(info->tail, info->size); | |
411 | desc_info->tlv_size = desc_info->desc->tlv_size; | |
412 | return desc_info; | |
413 | } | |
414 | ||
e5054643 SH |
415 | static void rocker_dma_ring_credits_set(const struct rocker *rocker, |
416 | const struct rocker_dma_ring_info *info, | |
4b8ac966 JP |
417 | u32 credits) |
418 | { | |
419 | if (credits) | |
420 | rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); | |
421 | } | |
422 | ||
423 | static unsigned long rocker_dma_ring_size_fix(size_t size) | |
424 | { | |
425 | return max(ROCKER_DMA_SIZE_MIN, | |
426 | min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); | |
427 | } | |
428 | ||
e5054643 | 429 | static int rocker_dma_ring_create(const struct rocker *rocker, |
4b8ac966 JP |
430 | unsigned int type, |
431 | size_t size, | |
432 | struct rocker_dma_ring_info *info) | |
433 | { | |
434 | int i; | |
435 | ||
436 | BUG_ON(size != rocker_dma_ring_size_fix(size)); | |
437 | info->size = size; | |
438 | info->type = type; | |
439 | info->head = 0; | |
440 | info->tail = 0; | |
441 | info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), | |
442 | GFP_KERNEL); | |
443 | if (!info->desc_info) | |
444 | return -ENOMEM; | |
445 | ||
446 | info->desc = pci_alloc_consistent(rocker->pdev, | |
447 | info->size * sizeof(*info->desc), | |
448 | &info->mapaddr); | |
449 | if (!info->desc) { | |
450 | kfree(info->desc_info); | |
451 | return -ENOMEM; | |
452 | } | |
453 | ||
454 | for (i = 0; i < info->size; i++) | |
455 | info->desc_info[i].desc = &info->desc[i]; | |
456 | ||
457 | rocker_write32(rocker, DMA_DESC_CTRL(info->type), | |
458 | ROCKER_DMA_DESC_CTRL_RESET); | |
459 | rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); | |
460 | rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
e5054643 SH |
465 | static void rocker_dma_ring_destroy(const struct rocker *rocker, |
466 | const struct rocker_dma_ring_info *info) | |
4b8ac966 JP |
467 | { |
468 | rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); | |
469 | ||
470 | pci_free_consistent(rocker->pdev, | |
471 | info->size * sizeof(struct rocker_desc), | |
472 | info->desc, info->mapaddr); | |
473 | kfree(info->desc_info); | |
474 | } | |
475 | ||
e5054643 | 476 | static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, |
4b8ac966 JP |
477 | struct rocker_dma_ring_info *info) |
478 | { | |
479 | int i; | |
480 | ||
481 | BUG_ON(info->head || info->tail); | |
482 | ||
483 | /* When ring is consumer, we need to advance head for each desc. | |
484 | * That tells hw that the desc is ready to be used by it. | |
485 | */ | |
486 | for (i = 0; i < info->size - 1; i++) | |
487 | rocker_desc_head_set(rocker, info, &info->desc_info[i]); | |
488 | rocker_desc_commit(&info->desc_info[i]); | |
489 | } | |
490 | ||
e5054643 SH |
491 | static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, |
492 | const struct rocker_dma_ring_info *info, | |
4b8ac966 JP |
493 | int direction, size_t buf_size) |
494 | { | |
495 | struct pci_dev *pdev = rocker->pdev; | |
496 | int i; | |
497 | int err; | |
498 | ||
499 | for (i = 0; i < info->size; i++) { | |
500 | struct rocker_desc_info *desc_info = &info->desc_info[i]; | |
501 | struct rocker_desc *desc = &info->desc[i]; | |
502 | dma_addr_t dma_handle; | |
503 | char *buf; | |
504 | ||
505 | buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); | |
506 | if (!buf) { | |
507 | err = -ENOMEM; | |
508 | goto rollback; | |
509 | } | |
510 | ||
511 | dma_handle = pci_map_single(pdev, buf, buf_size, direction); | |
512 | if (pci_dma_mapping_error(pdev, dma_handle)) { | |
513 | kfree(buf); | |
514 | err = -EIO; | |
515 | goto rollback; | |
516 | } | |
517 | ||
518 | desc_info->data = buf; | |
519 | desc_info->data_size = buf_size; | |
520 | dma_unmap_addr_set(desc_info, mapaddr, dma_handle); | |
521 | ||
522 | desc->buf_addr = dma_handle; | |
523 | desc->buf_size = buf_size; | |
524 | } | |
525 | return 0; | |
526 | ||
527 | rollback: | |
528 | for (i--; i >= 0; i--) { | |
e5054643 | 529 | const struct rocker_desc_info *desc_info = &info->desc_info[i]; |
4b8ac966 JP |
530 | |
531 | pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), | |
532 | desc_info->data_size, direction); | |
533 | kfree(desc_info->data); | |
534 | } | |
535 | return err; | |
536 | } | |
537 | ||
e5054643 SH |
538 | static void rocker_dma_ring_bufs_free(const struct rocker *rocker, |
539 | const struct rocker_dma_ring_info *info, | |
4b8ac966 JP |
540 | int direction) |
541 | { | |
542 | struct pci_dev *pdev = rocker->pdev; | |
543 | int i; | |
544 | ||
545 | for (i = 0; i < info->size; i++) { | |
e5054643 | 546 | const struct rocker_desc_info *desc_info = &info->desc_info[i]; |
4b8ac966 JP |
547 | struct rocker_desc *desc = &info->desc[i]; |
548 | ||
549 | desc->buf_addr = 0; | |
550 | desc->buf_size = 0; | |
551 | pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), | |
552 | desc_info->data_size, direction); | |
553 | kfree(desc_info->data); | |
554 | } | |
555 | } | |
556 | ||
ca0a5f2a JP |
557 | static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info) |
558 | { | |
559 | struct rocker_wait *wait; | |
560 | ||
561 | wait = rocker_wait_create(); | |
562 | if (!wait) | |
563 | return -ENOMEM; | |
564 | rocker_desc_cookie_ptr_set(desc_info, wait); | |
565 | return 0; | |
566 | } | |
567 | ||
568 | static void | |
569 | rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info) | |
570 | { | |
571 | struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info); | |
572 | ||
573 | rocker_wait_destroy(wait); | |
574 | } | |
575 | ||
576 | static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker) | |
577 | { | |
578 | const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; | |
579 | int i; | |
580 | int err; | |
581 | ||
582 | for (i = 0; i < cmd_ring->size; i++) { | |
583 | err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]); | |
584 | if (err) | |
585 | goto rollback; | |
586 | } | |
587 | return 0; | |
588 | ||
589 | rollback: | |
590 | for (i--; i >= 0; i--) | |
591 | rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); | |
592 | return err; | |
593 | } | |
594 | ||
595 | static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker) | |
596 | { | |
597 | const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; | |
598 | int i; | |
599 | ||
600 | for (i = 0; i < cmd_ring->size; i++) | |
601 | rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); | |
602 | } | |
603 | ||
4b8ac966 JP |
604 | static int rocker_dma_rings_init(struct rocker *rocker) |
605 | { | |
e5054643 | 606 | const struct pci_dev *pdev = rocker->pdev; |
4b8ac966 JP |
607 | int err; |
608 | ||
609 | err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, | |
610 | ROCKER_DMA_CMD_DEFAULT_SIZE, | |
611 | &rocker->cmd_ring); | |
612 | if (err) { | |
613 | dev_err(&pdev->dev, "failed to create command dma ring\n"); | |
614 | return err; | |
615 | } | |
616 | ||
617 | spin_lock_init(&rocker->cmd_ring_lock); | |
618 | ||
619 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, | |
620 | PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); | |
621 | if (err) { | |
622 | dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); | |
623 | goto err_dma_cmd_ring_bufs_alloc; | |
624 | } | |
625 | ||
ca0a5f2a JP |
626 | err = rocker_dma_cmd_ring_waits_alloc(rocker); |
627 | if (err) { | |
628 | dev_err(&pdev->dev, "failed to alloc command dma ring waits\n"); | |
629 | goto err_dma_cmd_ring_waits_alloc; | |
630 | } | |
631 | ||
4b8ac966 JP |
632 | err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, |
633 | ROCKER_DMA_EVENT_DEFAULT_SIZE, | |
634 | &rocker->event_ring); | |
635 | if (err) { | |
636 | dev_err(&pdev->dev, "failed to create event dma ring\n"); | |
637 | goto err_dma_event_ring_create; | |
638 | } | |
639 | ||
640 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, | |
641 | PCI_DMA_FROMDEVICE, PAGE_SIZE); | |
642 | if (err) { | |
643 | dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); | |
644 | goto err_dma_event_ring_bufs_alloc; | |
645 | } | |
646 | rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); | |
647 | return 0; | |
648 | ||
649 | err_dma_event_ring_bufs_alloc: | |
650 | rocker_dma_ring_destroy(rocker, &rocker->event_ring); | |
651 | err_dma_event_ring_create: | |
652 | rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, | |
653 | PCI_DMA_BIDIRECTIONAL); | |
ca0a5f2a JP |
654 | err_dma_cmd_ring_waits_alloc: |
655 | rocker_dma_cmd_ring_waits_free(rocker); | |
4b8ac966 JP |
656 | err_dma_cmd_ring_bufs_alloc: |
657 | rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); | |
658 | return err; | |
659 | } | |
660 | ||
661 | static void rocker_dma_rings_fini(struct rocker *rocker) | |
662 | { | |
663 | rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, | |
664 | PCI_DMA_BIDIRECTIONAL); | |
665 | rocker_dma_ring_destroy(rocker, &rocker->event_ring); | |
ca0a5f2a | 666 | rocker_dma_cmd_ring_waits_free(rocker); |
4b8ac966 JP |
667 | rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, |
668 | PCI_DMA_BIDIRECTIONAL); | |
669 | rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); | |
670 | } | |
671 | ||
534ba6a8 | 672 | static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, |
4b8ac966 JP |
673 | struct rocker_desc_info *desc_info, |
674 | struct sk_buff *skb, size_t buf_len) | |
675 | { | |
534ba6a8 | 676 | const struct rocker *rocker = rocker_port->rocker; |
4b8ac966 JP |
677 | struct pci_dev *pdev = rocker->pdev; |
678 | dma_addr_t dma_handle; | |
679 | ||
680 | dma_handle = pci_map_single(pdev, skb->data, buf_len, | |
681 | PCI_DMA_FROMDEVICE); | |
682 | if (pci_dma_mapping_error(pdev, dma_handle)) | |
683 | return -EIO; | |
684 | if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) | |
685 | goto tlv_put_failure; | |
686 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) | |
687 | goto tlv_put_failure; | |
688 | return 0; | |
689 | ||
690 | tlv_put_failure: | |
691 | pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); | |
692 | desc_info->tlv_size = 0; | |
693 | return -EMSGSIZE; | |
694 | } | |
695 | ||
e5054643 | 696 | static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) |
4b8ac966 JP |
697 | { |
698 | return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | |
699 | } | |
700 | ||
534ba6a8 | 701 | static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, |
4b8ac966 JP |
702 | struct rocker_desc_info *desc_info) |
703 | { | |
704 | struct net_device *dev = rocker_port->dev; | |
705 | struct sk_buff *skb; | |
706 | size_t buf_len = rocker_port_rx_buf_len(rocker_port); | |
707 | int err; | |
708 | ||
709 | /* Ensure that hw will see tlv_size zero in case of an error. | |
710 | * That tells hw to use another descriptor. | |
711 | */ | |
712 | rocker_desc_cookie_ptr_set(desc_info, NULL); | |
713 | desc_info->tlv_size = 0; | |
714 | ||
715 | skb = netdev_alloc_skb_ip_align(dev, buf_len); | |
716 | if (!skb) | |
717 | return -ENOMEM; | |
534ba6a8 | 718 | err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); |
4b8ac966 JP |
719 | if (err) { |
720 | dev_kfree_skb_any(skb); | |
721 | return err; | |
722 | } | |
723 | rocker_desc_cookie_ptr_set(desc_info, skb); | |
724 | return 0; | |
725 | } | |
726 | ||
e5054643 SH |
727 | static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, |
728 | const struct rocker_tlv **attrs) | |
4b8ac966 JP |
729 | { |
730 | struct pci_dev *pdev = rocker->pdev; | |
731 | dma_addr_t dma_handle; | |
732 | size_t len; | |
733 | ||
734 | if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || | |
735 | !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) | |
736 | return; | |
737 | dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); | |
738 | len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); | |
739 | pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); | |
740 | } | |
741 | ||
e5054643 SH |
742 | static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, |
743 | const struct rocker_desc_info *desc_info) | |
4b8ac966 | 744 | { |
e5054643 | 745 | const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; |
4b8ac966 JP |
746 | struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); |
747 | ||
748 | if (!skb) | |
749 | return; | |
750 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); | |
751 | rocker_dma_rx_ring_skb_unmap(rocker, attrs); | |
752 | dev_kfree_skb_any(skb); | |
753 | } | |
754 | ||
534ba6a8 | 755 | static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) |
4b8ac966 | 756 | { |
e5054643 | 757 | const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; |
534ba6a8 | 758 | const struct rocker *rocker = rocker_port->rocker; |
4b8ac966 JP |
759 | int i; |
760 | int err; | |
761 | ||
762 | for (i = 0; i < rx_ring->size; i++) { | |
534ba6a8 | 763 | err = rocker_dma_rx_ring_skb_alloc(rocker_port, |
4b8ac966 JP |
764 | &rx_ring->desc_info[i]); |
765 | if (err) | |
766 | goto rollback; | |
767 | } | |
768 | return 0; | |
769 | ||
770 | rollback: | |
771 | for (i--; i >= 0; i--) | |
772 | rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); | |
773 | return err; | |
774 | } | |
775 | ||
534ba6a8 | 776 | static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) |
4b8ac966 | 777 | { |
e5054643 | 778 | const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; |
534ba6a8 | 779 | const struct rocker *rocker = rocker_port->rocker; |
4b8ac966 JP |
780 | int i; |
781 | ||
782 | for (i = 0; i < rx_ring->size; i++) | |
783 | rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); | |
784 | } | |
785 | ||
786 | static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) | |
787 | { | |
788 | struct rocker *rocker = rocker_port->rocker; | |
789 | int err; | |
790 | ||
791 | err = rocker_dma_ring_create(rocker, | |
792 | ROCKER_DMA_TX(rocker_port->port_number), | |
793 | ROCKER_DMA_TX_DEFAULT_SIZE, | |
794 | &rocker_port->tx_ring); | |
795 | if (err) { | |
796 | netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); | |
797 | return err; | |
798 | } | |
799 | ||
800 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, | |
801 | PCI_DMA_TODEVICE, | |
802 | ROCKER_DMA_TX_DESC_SIZE); | |
803 | if (err) { | |
804 | netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); | |
805 | goto err_dma_tx_ring_bufs_alloc; | |
806 | } | |
807 | ||
808 | err = rocker_dma_ring_create(rocker, | |
809 | ROCKER_DMA_RX(rocker_port->port_number), | |
810 | ROCKER_DMA_RX_DEFAULT_SIZE, | |
811 | &rocker_port->rx_ring); | |
812 | if (err) { | |
813 | netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); | |
814 | goto err_dma_rx_ring_create; | |
815 | } | |
816 | ||
817 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, | |
818 | PCI_DMA_BIDIRECTIONAL, | |
819 | ROCKER_DMA_RX_DESC_SIZE); | |
820 | if (err) { | |
821 | netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); | |
822 | goto err_dma_rx_ring_bufs_alloc; | |
823 | } | |
824 | ||
534ba6a8 | 825 | err = rocker_dma_rx_ring_skbs_alloc(rocker_port); |
4b8ac966 JP |
826 | if (err) { |
827 | netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); | |
828 | goto err_dma_rx_ring_skbs_alloc; | |
829 | } | |
830 | rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); | |
831 | ||
832 | return 0; | |
833 | ||
834 | err_dma_rx_ring_skbs_alloc: | |
835 | rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, | |
836 | PCI_DMA_BIDIRECTIONAL); | |
837 | err_dma_rx_ring_bufs_alloc: | |
838 | rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); | |
839 | err_dma_rx_ring_create: | |
840 | rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, | |
841 | PCI_DMA_TODEVICE); | |
842 | err_dma_tx_ring_bufs_alloc: | |
843 | rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); | |
844 | return err; | |
845 | } | |
846 | ||
847 | static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) | |
848 | { | |
849 | struct rocker *rocker = rocker_port->rocker; | |
850 | ||
534ba6a8 | 851 | rocker_dma_rx_ring_skbs_free(rocker_port); |
4b8ac966 JP |
852 | rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, |
853 | PCI_DMA_BIDIRECTIONAL); | |
854 | rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); | |
855 | rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, | |
856 | PCI_DMA_TODEVICE); | |
857 | rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); | |
858 | } | |
859 | ||
e5054643 SH |
860 | static void rocker_port_set_enable(const struct rocker_port *rocker_port, |
861 | bool enable) | |
4b8ac966 JP |
862 | { |
863 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); | |
864 | ||
865 | if (enable) | |
71a83a6d | 866 | val |= 1ULL << rocker_port->pport; |
4b8ac966 | 867 | else |
71a83a6d | 868 | val &= ~(1ULL << rocker_port->pport); |
4b8ac966 JP |
869 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); |
870 | } | |
871 | ||
872 | /******************************** | |
873 | * Interrupt handler and helpers | |
874 | ********************************/ | |
875 | ||
876 | static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) | |
877 | { | |
878 | struct rocker *rocker = dev_id; | |
e5054643 | 879 | const struct rocker_desc_info *desc_info; |
4b8ac966 JP |
880 | struct rocker_wait *wait; |
881 | u32 credits = 0; | |
882 | ||
883 | spin_lock(&rocker->cmd_ring_lock); | |
884 | while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { | |
885 | wait = rocker_desc_cookie_ptr_get(desc_info); | |
179f9a25 SF |
886 | if (wait->nowait) { |
887 | rocker_desc_gen_clear(desc_info); | |
179f9a25 SF |
888 | } else { |
889 | rocker_wait_wake_up(wait); | |
890 | } | |
4b8ac966 JP |
891 | credits++; |
892 | } | |
893 | spin_unlock(&rocker->cmd_ring_lock); | |
894 | rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); | |
895 | ||
896 | return IRQ_HANDLED; | |
897 | } | |
898 | ||
e5054643 | 899 | static void rocker_port_link_up(const struct rocker_port *rocker_port) |
4b8ac966 JP |
900 | { |
901 | netif_carrier_on(rocker_port->dev); | |
902 | netdev_info(rocker_port->dev, "Link is up\n"); | |
903 | } | |
904 | ||
e5054643 | 905 | static void rocker_port_link_down(const struct rocker_port *rocker_port) |
4b8ac966 JP |
906 | { |
907 | netif_carrier_off(rocker_port->dev); | |
908 | netdev_info(rocker_port->dev, "Link is down\n"); | |
909 | } | |
910 | ||
e5054643 | 911 | static int rocker_event_link_change(const struct rocker *rocker, |
4b8ac966 JP |
912 | const struct rocker_tlv *info) |
913 | { | |
e5054643 | 914 | const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; |
4b8ac966 JP |
915 | unsigned int port_number; |
916 | bool link_up; | |
917 | struct rocker_port *rocker_port; | |
918 | ||
919 | rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); | |
4a6bb6d3 | 920 | if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || |
4b8ac966 JP |
921 | !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) |
922 | return -EIO; | |
923 | port_number = | |
4a6bb6d3 | 924 | rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; |
4b8ac966 JP |
925 | link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); |
926 | ||
927 | if (port_number >= rocker->port_count) | |
928 | return -EINVAL; | |
929 | ||
930 | rocker_port = rocker->ports[port_number]; | |
931 | if (netif_carrier_ok(rocker_port->dev) != link_up) { | |
932 | if (link_up) | |
933 | rocker_port_link_up(rocker_port); | |
934 | else | |
935 | rocker_port_link_down(rocker_port); | |
936 | } | |
937 | ||
938 | return 0; | |
939 | } | |
940 | ||
e420114e JP |
941 | static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, |
942 | const unsigned char *addr, | |
943 | __be16 vlan_id); | |
6c707945 | 944 | |
e5054643 | 945 | static int rocker_event_mac_vlan_seen(const struct rocker *rocker, |
6c707945 SF |
946 | const struct rocker_tlv *info) |
947 | { | |
e5054643 | 948 | const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; |
6c707945 SF |
949 | unsigned int port_number; |
950 | struct rocker_port *rocker_port; | |
e5054643 | 951 | const unsigned char *addr; |
6c707945 SF |
952 | __be16 vlan_id; |
953 | ||
954 | rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); | |
4a6bb6d3 | 955 | if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || |
6c707945 SF |
956 | !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || |
957 | !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) | |
958 | return -EIO; | |
959 | port_number = | |
4a6bb6d3 | 960 | rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; |
6c707945 | 961 | addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); |
9b03c71f | 962 | vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); |
6c707945 SF |
963 | |
964 | if (port_number >= rocker->port_count) | |
965 | return -EINVAL; | |
966 | ||
967 | rocker_port = rocker->ports[port_number]; | |
3fbcdbf3 | 968 | return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); |
6c707945 | 969 | } |
9f6bbf7c | 970 | |
e5054643 SH |
971 | static int rocker_event_process(const struct rocker *rocker, |
972 | const struct rocker_desc_info *desc_info) | |
4b8ac966 | 973 | { |
e5054643 SH |
974 | const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; |
975 | const struct rocker_tlv *info; | |
4b8ac966 JP |
976 | u16 type; |
977 | ||
978 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); | |
979 | if (!attrs[ROCKER_TLV_EVENT_TYPE] || | |
980 | !attrs[ROCKER_TLV_EVENT_INFO]) | |
981 | return -EIO; | |
982 | ||
983 | type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); | |
984 | info = attrs[ROCKER_TLV_EVENT_INFO]; | |
985 | ||
986 | switch (type) { | |
987 | case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: | |
988 | return rocker_event_link_change(rocker, info); | |
6c707945 SF |
989 | case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: |
990 | return rocker_event_mac_vlan_seen(rocker, info); | |
4b8ac966 JP |
991 | } |
992 | ||
993 | return -EOPNOTSUPP; | |
994 | } | |
995 | ||
996 | static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) | |
997 | { | |
998 | struct rocker *rocker = dev_id; | |
e5054643 SH |
999 | const struct pci_dev *pdev = rocker->pdev; |
1000 | const struct rocker_desc_info *desc_info; | |
4b8ac966 JP |
1001 | u32 credits = 0; |
1002 | int err; | |
1003 | ||
1004 | while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { | |
1005 | err = rocker_desc_err(desc_info); | |
1006 | if (err) { | |
1007 | dev_err(&pdev->dev, "event desc received with err %d\n", | |
1008 | err); | |
1009 | } else { | |
1010 | err = rocker_event_process(rocker, desc_info); | |
1011 | if (err) | |
1012 | dev_err(&pdev->dev, "event processing failed with err %d\n", | |
1013 | err); | |
1014 | } | |
1015 | rocker_desc_gen_clear(desc_info); | |
1016 | rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); | |
1017 | credits++; | |
1018 | } | |
1019 | rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); | |
1020 | ||
1021 | return IRQ_HANDLED; | |
1022 | } | |
1023 | ||
1024 | static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) | |
1025 | { | |
1026 | struct rocker_port *rocker_port = dev_id; | |
1027 | ||
1028 | napi_schedule(&rocker_port->napi_tx); | |
1029 | return IRQ_HANDLED; | |
1030 | } | |
1031 | ||
1032 | static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) | |
1033 | { | |
1034 | struct rocker_port *rocker_port = dev_id; | |
1035 | ||
1036 | napi_schedule(&rocker_port->napi_rx); | |
1037 | return IRQ_HANDLED; | |
1038 | } | |
1039 | ||
1040 | /******************** | |
1041 | * Command interface | |
1042 | ********************/ | |
1043 | ||
3fbcdbf3 JP |
1044 | int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, |
1045 | rocker_cmd_prep_cb_t prepare, void *prepare_priv, | |
1046 | rocker_cmd_proc_cb_t process, void *process_priv) | |
4b8ac966 | 1047 | { |
534ba6a8 | 1048 | struct rocker *rocker = rocker_port->rocker; |
4b8ac966 JP |
1049 | struct rocker_desc_info *desc_info; |
1050 | struct rocker_wait *wait; | |
179f9a25 | 1051 | unsigned long lock_flags; |
4b8ac966 JP |
1052 | int err; |
1053 | ||
179f9a25 | 1054 | spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); |
c4f20321 | 1055 | |
4b8ac966 JP |
1056 | desc_info = rocker_desc_head_get(&rocker->cmd_ring); |
1057 | if (!desc_info) { | |
179f9a25 | 1058 | spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); |
ca0a5f2a | 1059 | return -EAGAIN; |
4b8ac966 | 1060 | } |
c4f20321 | 1061 | |
ca0a5f2a JP |
1062 | wait = rocker_desc_cookie_ptr_get(desc_info); |
1063 | rocker_wait_init(wait); | |
1064 | wait->nowait = nowait; | |
1065 | ||
534ba6a8 | 1066 | err = prepare(rocker_port, desc_info, prepare_priv); |
4b8ac966 | 1067 | if (err) { |
179f9a25 | 1068 | spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); |
ca0a5f2a | 1069 | return err; |
4b8ac966 | 1070 | } |
c4f20321 | 1071 | |
ae3907ec | 1072 | rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); |
4b8ac966 | 1073 | |
179f9a25 SF |
1074 | spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); |
1075 | ||
1076 | if (nowait) | |
1077 | return 0; | |
c4f20321 | 1078 | |
ae3907ec JP |
1079 | if (!rocker_wait_event_timeout(wait, HZ / 10)) |
1080 | return -EIO; | |
4b8ac966 JP |
1081 | |
1082 | err = rocker_desc_err(desc_info); | |
1083 | if (err) | |
1084 | return err; | |
1085 | ||
1086 | if (process) | |
534ba6a8 | 1087 | err = process(rocker_port, desc_info, process_priv); |
4b8ac966 JP |
1088 | |
1089 | rocker_desc_gen_clear(desc_info); | |
4b8ac966 JP |
1090 | return err; |
1091 | } | |
1092 | ||
1093 | static int | |
534ba6a8 | 1094 | rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, |
4b8ac966 JP |
1095 | struct rocker_desc_info *desc_info, |
1096 | void *priv) | |
1097 | { | |
1098 | struct rocker_tlv *cmd_info; | |
1099 | ||
1100 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, | |
1101 | ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) | |
1102 | return -EMSGSIZE; | |
1103 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | |
1104 | if (!cmd_info) | |
1105 | return -EMSGSIZE; | |
4a6bb6d3 SF |
1106 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1107 | rocker_port->pport)) | |
4b8ac966 JP |
1108 | return -EMSGSIZE; |
1109 | rocker_tlv_nest_end(desc_info, cmd_info); | |
1110 | return 0; | |
1111 | } | |
1112 | ||
1113 | static int | |
534ba6a8 | 1114 | rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, |
e5054643 | 1115 | const struct rocker_desc_info *desc_info, |
4b8ac966 JP |
1116 | void *priv) |
1117 | { | |
1118 | struct ethtool_cmd *ecmd = priv; | |
e5054643 SH |
1119 | const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; |
1120 | const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; | |
4b8ac966 JP |
1121 | u32 speed; |
1122 | u8 duplex; | |
1123 | u8 autoneg; | |
1124 | ||
1125 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); | |
1126 | if (!attrs[ROCKER_TLV_CMD_INFO]) | |
1127 | return -EIO; | |
1128 | ||
1129 | rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, | |
1130 | attrs[ROCKER_TLV_CMD_INFO]); | |
1131 | if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || | |
1132 | !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || | |
1133 | !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) | |
1134 | return -EIO; | |
1135 | ||
1136 | speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); | |
1137 | duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); | |
1138 | autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); | |
1139 | ||
1140 | ecmd->transceiver = XCVR_INTERNAL; | |
1141 | ecmd->supported = SUPPORTED_TP; | |
1142 | ecmd->phy_address = 0xff; | |
1143 | ecmd->port = PORT_TP; | |
1144 | ethtool_cmd_speed_set(ecmd, speed); | |
1145 | ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; | |
1146 | ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; | |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | static int | |
534ba6a8 | 1152 | rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, |
e5054643 | 1153 | const struct rocker_desc_info *desc_info, |
4b8ac966 JP |
1154 | void *priv) |
1155 | { | |
1156 | unsigned char *macaddr = priv; | |
e5054643 SH |
1157 | const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; |
1158 | const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; | |
1159 | const struct rocker_tlv *attr; | |
4b8ac966 JP |
1160 | |
1161 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); | |
1162 | if (!attrs[ROCKER_TLV_CMD_INFO]) | |
1163 | return -EIO; | |
1164 | ||
1165 | rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, | |
1166 | attrs[ROCKER_TLV_CMD_INFO]); | |
1167 | attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; | |
1168 | if (!attr) | |
1169 | return -EIO; | |
1170 | ||
1171 | if (rocker_tlv_len(attr) != ETH_ALEN) | |
1172 | return -EINVAL; | |
1173 | ||
1174 | ether_addr_copy(macaddr, rocker_tlv_data(attr)); | |
1175 | return 0; | |
1176 | } | |
1177 | ||
e1ba3dee JP |
1178 | static int |
1179 | rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port, | |
1180 | const struct rocker_desc_info *desc_info, | |
1181 | void *priv) | |
1182 | { | |
1183 | u8 *p_mode = priv; | |
1184 | const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; | |
1185 | const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; | |
1186 | const struct rocker_tlv *attr; | |
1187 | ||
1188 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); | |
1189 | if (!attrs[ROCKER_TLV_CMD_INFO]) | |
1190 | return -EIO; | |
1191 | ||
1192 | rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, | |
1193 | attrs[ROCKER_TLV_CMD_INFO]); | |
1194 | attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]; | |
1195 | if (!attr) | |
1196 | return -EIO; | |
1197 | ||
1198 | *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); | |
1199 | return 0; | |
1200 | } | |
1201 | ||
db19170b DA |
1202 | struct port_name { |
1203 | char *buf; | |
1204 | size_t len; | |
1205 | }; | |
1206 | ||
1207 | static int | |
534ba6a8 | 1208 | rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, |
e5054643 | 1209 | const struct rocker_desc_info *desc_info, |
db19170b DA |
1210 | void *priv) |
1211 | { | |
e5054643 SH |
1212 | const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; |
1213 | const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; | |
db19170b | 1214 | struct port_name *name = priv; |
e5054643 | 1215 | const struct rocker_tlv *attr; |
db19170b | 1216 | size_t i, j, len; |
e5054643 | 1217 | const char *str; |
db19170b DA |
1218 | |
1219 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); | |
1220 | if (!attrs[ROCKER_TLV_CMD_INFO]) | |
1221 | return -EIO; | |
1222 | ||
1223 | rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, | |
1224 | attrs[ROCKER_TLV_CMD_INFO]); | |
1225 | attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; | |
1226 | if (!attr) | |
1227 | return -EIO; | |
1228 | ||
1229 | len = min_t(size_t, rocker_tlv_len(attr), name->len); | |
1230 | str = rocker_tlv_data(attr); | |
1231 | ||
1232 | /* make sure name only contains alphanumeric characters */ | |
1233 | for (i = j = 0; i < len; ++i) { | |
1234 | if (isalnum(str[i])) { | |
1235 | name->buf[j] = str[i]; | |
1236 | j++; | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | if (j == 0) | |
1241 | return -EIO; | |
1242 | ||
1243 | name->buf[j] = '\0'; | |
1244 | ||
1245 | return 0; | |
1246 | } | |
1247 | ||
4b8ac966 | 1248 | static int |
534ba6a8 | 1249 | rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, |
4b8ac966 JP |
1250 | struct rocker_desc_info *desc_info, |
1251 | void *priv) | |
1252 | { | |
1253 | struct ethtool_cmd *ecmd = priv; | |
1254 | struct rocker_tlv *cmd_info; | |
1255 | ||
1256 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, | |
1257 | ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) | |
1258 | return -EMSGSIZE; | |
1259 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | |
1260 | if (!cmd_info) | |
1261 | return -EMSGSIZE; | |
4a6bb6d3 SF |
1262 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1263 | rocker_port->pport)) | |
4b8ac966 JP |
1264 | return -EMSGSIZE; |
1265 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, | |
1266 | ethtool_cmd_speed(ecmd))) | |
1267 | return -EMSGSIZE; | |
1268 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, | |
1269 | ecmd->duplex)) | |
1270 | return -EMSGSIZE; | |
1271 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, | |
1272 | ecmd->autoneg)) | |
1273 | return -EMSGSIZE; | |
1274 | rocker_tlv_nest_end(desc_info, cmd_info); | |
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | static int | |
534ba6a8 | 1279 | rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, |
4b8ac966 JP |
1280 | struct rocker_desc_info *desc_info, |
1281 | void *priv) | |
1282 | { | |
e5054643 | 1283 | const unsigned char *macaddr = priv; |
4b8ac966 JP |
1284 | struct rocker_tlv *cmd_info; |
1285 | ||
1286 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, | |
1287 | ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) | |
1288 | return -EMSGSIZE; | |
1289 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | |
1290 | if (!cmd_info) | |
1291 | return -EMSGSIZE; | |
4a6bb6d3 SF |
1292 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1293 | rocker_port->pport)) | |
4b8ac966 JP |
1294 | return -EMSGSIZE; |
1295 | if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, | |
1296 | ETH_ALEN, macaddr)) | |
1297 | return -EMSGSIZE; | |
1298 | rocker_tlv_nest_end(desc_info, cmd_info); | |
1299 | return 0; | |
1300 | } | |
1301 | ||
77a58c74 SF |
1302 | static int |
1303 | rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, | |
1304 | struct rocker_desc_info *desc_info, | |
1305 | void *priv) | |
1306 | { | |
1307 | int mtu = *(int *)priv; | |
1308 | struct rocker_tlv *cmd_info; | |
1309 | ||
1310 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, | |
1311 | ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) | |
1312 | return -EMSGSIZE; | |
1313 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | |
1314 | if (!cmd_info) | |
1315 | return -EMSGSIZE; | |
1316 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, | |
1317 | rocker_port->pport)) | |
1318 | return -EMSGSIZE; | |
1319 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, | |
1320 | mtu)) | |
1321 | return -EMSGSIZE; | |
1322 | rocker_tlv_nest_end(desc_info, cmd_info); | |
1323 | return 0; | |
1324 | } | |
1325 | ||
5111f80c | 1326 | static int |
534ba6a8 | 1327 | rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, |
5111f80c SF |
1328 | struct rocker_desc_info *desc_info, |
1329 | void *priv) | |
1330 | { | |
c1fe922e | 1331 | bool learning = *(bool *)priv; |
5111f80c SF |
1332 | struct rocker_tlv *cmd_info; |
1333 | ||
1334 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, | |
1335 | ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) | |
1336 | return -EMSGSIZE; | |
1337 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | |
1338 | if (!cmd_info) | |
1339 | return -EMSGSIZE; | |
4a6bb6d3 SF |
1340 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1341 | rocker_port->pport)) | |
5111f80c SF |
1342 | return -EMSGSIZE; |
1343 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, | |
c1fe922e | 1344 | learning)) |
5111f80c SF |
1345 | return -EMSGSIZE; |
1346 | rocker_tlv_nest_end(desc_info, cmd_info); | |
1347 | return 0; | |
1348 | } | |
1349 | ||
4b8ac966 JP |
1350 | static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, |
1351 | struct ethtool_cmd *ecmd) | |
1352 | { | |
53901cc0 | 1353 | return rocker_cmd_exec(rocker_port, false, |
4b8ac966 JP |
1354 | rocker_cmd_get_port_settings_prep, NULL, |
1355 | rocker_cmd_get_port_settings_ethtool_proc, | |
c4f20321 | 1356 | ecmd); |
4b8ac966 JP |
1357 | } |
1358 | ||
1359 | static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, | |
1360 | unsigned char *macaddr) | |
1361 | { | |
53901cc0 | 1362 | return rocker_cmd_exec(rocker_port, false, |
4b8ac966 JP |
1363 | rocker_cmd_get_port_settings_prep, NULL, |
1364 | rocker_cmd_get_port_settings_macaddr_proc, | |
c4f20321 | 1365 | macaddr); |
4b8ac966 JP |
1366 | } |
1367 | ||
e1ba3dee JP |
1368 | static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, |
1369 | u8 *p_mode) | |
1370 | { | |
53901cc0 | 1371 | return rocker_cmd_exec(rocker_port, false, |
e1ba3dee JP |
1372 | rocker_cmd_get_port_settings_prep, NULL, |
1373 | rocker_cmd_get_port_settings_mode_proc, p_mode); | |
1374 | } | |
1375 | ||
4b8ac966 JP |
1376 | static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, |
1377 | struct ethtool_cmd *ecmd) | |
1378 | { | |
53901cc0 | 1379 | return rocker_cmd_exec(rocker_port, false, |
4b8ac966 | 1380 | rocker_cmd_set_port_settings_ethtool_prep, |
c4f20321 | 1381 | ecmd, NULL, NULL); |
4b8ac966 JP |
1382 | } |
1383 | ||
1384 | static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, | |
1385 | unsigned char *macaddr) | |
1386 | { | |
53901cc0 | 1387 | return rocker_cmd_exec(rocker_port, false, |
4b8ac966 | 1388 | rocker_cmd_set_port_settings_macaddr_prep, |
c4f20321 | 1389 | macaddr, NULL, NULL); |
4b8ac966 JP |
1390 | } |
1391 | ||
77a58c74 SF |
1392 | static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, |
1393 | int mtu) | |
1394 | { | |
53901cc0 | 1395 | return rocker_cmd_exec(rocker_port, false, |
77a58c74 SF |
1396 | rocker_cmd_set_port_settings_mtu_prep, |
1397 | &mtu, NULL, NULL); | |
1398 | } | |
1399 | ||
3fbcdbf3 JP |
1400 | int rocker_port_set_learning(struct rocker_port *rocker_port, |
1401 | bool learning) | |
5111f80c | 1402 | { |
53901cc0 | 1403 | return rocker_cmd_exec(rocker_port, false, |
5111f80c | 1404 | rocker_cmd_set_port_learning_prep, |
c1fe922e | 1405 | &learning, NULL, NULL); |
5111f80c SF |
1406 | } |
1407 | ||
e420114e JP |
1408 | /********************** |
1409 | * Worlds manipulation | |
1410 | **********************/ | |
1411 | ||
1412 | static struct rocker_world_ops *rocker_world_ops[] = { | |
1413 | &rocker_ofdpa_ops, | |
1414 | }; | |
1415 | ||
1416 | #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops) | |
1417 | ||
1418 | static struct rocker_world_ops *rocker_world_ops_find(u8 mode) | |
1419 | { | |
1420 | int i; | |
1421 | ||
1422 | for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++) | |
1423 | if (rocker_world_ops[i]->mode == mode) | |
1424 | return rocker_world_ops[i]; | |
1425 | return NULL; | |
1426 | } | |
1427 | ||
1428 | static int rocker_world_init(struct rocker *rocker, u8 mode) | |
1429 | { | |
1430 | struct rocker_world_ops *wops; | |
1431 | int err; | |
1432 | ||
1433 | wops = rocker_world_ops_find(mode); | |
1434 | if (!wops) { | |
1435 | dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n", | |
1436 | mode); | |
1437 | return -EINVAL; | |
1438 | } | |
1439 | rocker->wops = wops; | |
1440 | rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL); | |
1441 | if (!rocker->wpriv) | |
1442 | return -ENOMEM; | |
1443 | if (!wops->init) | |
1444 | return 0; | |
1445 | err = wops->init(rocker); | |
1446 | if (err) | |
1447 | kfree(rocker->wpriv); | |
1448 | return err; | |
1449 | } | |
1450 | ||
1451 | static void rocker_world_fini(struct rocker *rocker) | |
1452 | { | |
1453 | struct rocker_world_ops *wops = rocker->wops; | |
1454 | ||
1455 | if (!wops || !wops->fini) | |
1456 | return; | |
1457 | wops->fini(rocker); | |
1458 | kfree(rocker->wpriv); | |
1459 | } | |
1460 | ||
1461 | static int rocker_world_check_init(struct rocker_port *rocker_port) | |
1462 | { | |
1463 | struct rocker *rocker = rocker_port->rocker; | |
1464 | u8 mode; | |
1465 | int err; | |
1466 | ||
1467 | err = rocker_cmd_get_port_settings_mode(rocker_port, &mode); | |
1468 | if (err) { | |
1469 | dev_err(&rocker->pdev->dev, "failed to get port mode\n"); | |
1470 | return err; | |
1471 | } | |
1472 | if (rocker->wops) { | |
1473 | if (rocker->wops->mode != mode) { | |
1474 | dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); | |
92d230dd | 1475 | return -EINVAL; |
e420114e JP |
1476 | } |
1477 | return 0; | |
1478 | } | |
1479 | return rocker_world_init(rocker, mode); | |
1480 | } | |
1481 | ||
1482 | static int rocker_world_port_pre_init(struct rocker_port *rocker_port) | |
1483 | { | |
1484 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1485 | int err; | |
1486 | ||
1487 | rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL); | |
1488 | if (!rocker_port->wpriv) | |
1489 | return -ENOMEM; | |
1490 | if (!wops->port_pre_init) | |
1491 | return 0; | |
1492 | err = wops->port_pre_init(rocker_port); | |
1493 | if (err) | |
1494 | kfree(rocker_port->wpriv); | |
1495 | return 0; | |
1496 | } | |
1497 | ||
1498 | static int rocker_world_port_init(struct rocker_port *rocker_port) | |
1499 | { | |
1500 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1501 | ||
1502 | if (!wops->port_init) | |
1503 | return 0; | |
1504 | return wops->port_init(rocker_port); | |
1505 | } | |
1506 | ||
1507 | static void rocker_world_port_fini(struct rocker_port *rocker_port) | |
1508 | { | |
1509 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1510 | ||
1511 | if (!wops->port_fini) | |
1512 | return; | |
1513 | wops->port_fini(rocker_port); | |
1514 | } | |
1515 | ||
1516 | static void rocker_world_port_post_fini(struct rocker_port *rocker_port) | |
1517 | { | |
1518 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1519 | ||
1520 | if (!wops->port_post_fini) | |
1521 | return; | |
1522 | wops->port_post_fini(rocker_port); | |
1523 | kfree(rocker_port->wpriv); | |
1524 | } | |
1525 | ||
1526 | static int rocker_world_port_open(struct rocker_port *rocker_port) | |
1527 | { | |
1528 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1529 | ||
1530 | if (!wops->port_open) | |
1531 | return 0; | |
1532 | return wops->port_open(rocker_port); | |
1533 | } | |
1534 | ||
1535 | static void rocker_world_port_stop(struct rocker_port *rocker_port) | |
1536 | { | |
1537 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1538 | ||
1539 | if (!wops->port_stop) | |
1540 | return; | |
1541 | wops->port_stop(rocker_port); | |
1542 | } | |
1543 | ||
1544 | static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, | |
1545 | u8 state, | |
1546 | struct switchdev_trans *trans) | |
1547 | { | |
1548 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1549 | ||
1550 | if (!wops->port_attr_stp_state_set) | |
fccd84d4 | 1551 | return -EOPNOTSUPP; |
e420114e JP |
1552 | return wops->port_attr_stp_state_set(rocker_port, state, trans); |
1553 | } | |
1554 | ||
1555 | static int | |
1556 | rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, | |
1557 | unsigned long brport_flags, | |
1558 | struct switchdev_trans *trans) | |
1559 | { | |
1560 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1561 | ||
1562 | if (!wops->port_attr_bridge_flags_set) | |
fccd84d4 | 1563 | return -EOPNOTSUPP; |
e420114e JP |
1564 | return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, |
1565 | trans); | |
1566 | } | |
1567 | ||
1568 | static int | |
1569 | rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, | |
1570 | unsigned long *p_brport_flags) | |
1571 | { | |
1572 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1573 | ||
1574 | if (!wops->port_attr_bridge_flags_get) | |
fccd84d4 | 1575 | return -EOPNOTSUPP; |
e420114e JP |
1576 | return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); |
1577 | } | |
1578 | ||
1579 | static int | |
1580 | rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, | |
1581 | u32 ageing_time, | |
1582 | struct switchdev_trans *trans) | |
1583 | ||
1584 | { | |
1585 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1586 | ||
1587 | if (!wops->port_attr_bridge_ageing_time_set) | |
fccd84d4 | 1588 | return -EOPNOTSUPP; |
e420114e JP |
1589 | return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time, |
1590 | trans); | |
1591 | } | |
1592 | ||
1593 | static int | |
1594 | rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, | |
1595 | const struct switchdev_obj_port_vlan *vlan, | |
1596 | struct switchdev_trans *trans) | |
1597 | { | |
1598 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1599 | ||
1600 | if (!wops->port_obj_vlan_add) | |
fccd84d4 | 1601 | return -EOPNOTSUPP; |
e420114e JP |
1602 | return wops->port_obj_vlan_add(rocker_port, vlan, trans); |
1603 | } | |
1604 | ||
1605 | static int | |
1606 | rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, | |
1607 | const struct switchdev_obj_port_vlan *vlan) | |
1608 | { | |
1609 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1610 | ||
1611 | if (!wops->port_obj_vlan_del) | |
fccd84d4 | 1612 | return -EOPNOTSUPP; |
e420114e JP |
1613 | return wops->port_obj_vlan_del(rocker_port, vlan); |
1614 | } | |
1615 | ||
1616 | static int | |
1617 | rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port, | |
1618 | struct switchdev_obj_port_vlan *vlan, | |
1619 | switchdev_obj_dump_cb_t *cb) | |
1620 | { | |
1621 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1622 | ||
1623 | if (!wops->port_obj_vlan_dump) | |
fccd84d4 | 1624 | return -EOPNOTSUPP; |
e420114e JP |
1625 | return wops->port_obj_vlan_dump(rocker_port, vlan, cb); |
1626 | } | |
1627 | ||
e420114e JP |
1628 | static int |
1629 | rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port, | |
1630 | const struct switchdev_obj_port_fdb *fdb, | |
1631 | struct switchdev_trans *trans) | |
1632 | { | |
1633 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1634 | ||
1635 | if (!wops->port_obj_fdb_add) | |
fccd84d4 | 1636 | return -EOPNOTSUPP; |
e420114e JP |
1637 | return wops->port_obj_fdb_add(rocker_port, fdb, trans); |
1638 | } | |
1639 | ||
1640 | static int | |
1641 | rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port, | |
1642 | const struct switchdev_obj_port_fdb *fdb) | |
1643 | { | |
1644 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1645 | ||
1646 | if (!wops->port_obj_fdb_del) | |
fccd84d4 | 1647 | return -EOPNOTSUPP; |
e420114e JP |
1648 | return wops->port_obj_fdb_del(rocker_port, fdb); |
1649 | } | |
1650 | ||
1651 | static int | |
1652 | rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port, | |
1653 | struct switchdev_obj_port_fdb *fdb, | |
1654 | switchdev_obj_dump_cb_t *cb) | |
1655 | { | |
1656 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1657 | ||
1658 | if (!wops->port_obj_fdb_dump) | |
fccd84d4 | 1659 | return -EOPNOTSUPP; |
e420114e JP |
1660 | return wops->port_obj_fdb_dump(rocker_port, fdb, cb); |
1661 | } | |
1662 | ||
1663 | static int rocker_world_port_master_linked(struct rocker_port *rocker_port, | |
1664 | struct net_device *master) | |
1665 | { | |
1666 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1667 | ||
1668 | if (!wops->port_master_linked) | |
fccd84d4 | 1669 | return -EOPNOTSUPP; |
e420114e JP |
1670 | return wops->port_master_linked(rocker_port, master); |
1671 | } | |
1672 | ||
1673 | static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, | |
1674 | struct net_device *master) | |
1675 | { | |
1676 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1677 | ||
1678 | if (!wops->port_master_unlinked) | |
fccd84d4 | 1679 | return -EOPNOTSUPP; |
e420114e JP |
1680 | return wops->port_master_unlinked(rocker_port, master); |
1681 | } | |
1682 | ||
1683 | static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, | |
1684 | struct neighbour *n) | |
1685 | { | |
1686 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1687 | ||
1688 | if (!wops->port_neigh_update) | |
fccd84d4 | 1689 | return -EOPNOTSUPP; |
e420114e JP |
1690 | return wops->port_neigh_update(rocker_port, n); |
1691 | } | |
1692 | ||
1693 | static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, | |
1694 | struct neighbour *n) | |
1695 | { | |
1696 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1697 | ||
1698 | if (!wops->port_neigh_destroy) | |
fccd84d4 | 1699 | return -EOPNOTSUPP; |
e420114e JP |
1700 | return wops->port_neigh_destroy(rocker_port, n); |
1701 | } | |
1702 | ||
1703 | static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, | |
1704 | const unsigned char *addr, | |
1705 | __be16 vlan_id) | |
1706 | { | |
1707 | struct rocker_world_ops *wops = rocker_port->rocker->wops; | |
1708 | ||
1709 | if (!wops->port_ev_mac_vlan_seen) | |
fccd84d4 | 1710 | return -EOPNOTSUPP; |
e420114e JP |
1711 | return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); |
1712 | } | |
1713 | ||
936bd486 JP |
1714 | static int rocker_world_fib4_add(struct rocker *rocker, |
1715 | const struct fib_entry_notifier_info *fen_info) | |
1716 | { | |
1717 | struct rocker_world_ops *wops = rocker->wops; | |
1718 | ||
1719 | if (!wops->fib4_add) | |
1720 | return 0; | |
1721 | return wops->fib4_add(rocker, fen_info); | |
1722 | } | |
1723 | ||
1724 | static int rocker_world_fib4_del(struct rocker *rocker, | |
1725 | const struct fib_entry_notifier_info *fen_info) | |
1726 | { | |
1727 | struct rocker_world_ops *wops = rocker->wops; | |
1728 | ||
1729 | if (!wops->fib4_del) | |
1730 | return 0; | |
1731 | return wops->fib4_del(rocker, fen_info); | |
1732 | } | |
1733 | ||
1734 | static void rocker_world_fib4_abort(struct rocker *rocker) | |
1735 | { | |
1736 | struct rocker_world_ops *wops = rocker->wops; | |
1737 | ||
1738 | if (wops->fib4_abort) | |
1739 | wops->fib4_abort(rocker); | |
1740 | } | |
1741 | ||
3fbcdbf3 JP |
1742 | /***************** |
1743 | * Net device ops | |
1744 | *****************/ | |
1745 | ||
1746 | static int rocker_port_open(struct net_device *dev) | |
9f6bbf7c | 1747 | { |
3fbcdbf3 JP |
1748 | struct rocker_port *rocker_port = netdev_priv(dev); |
1749 | int err; | |
9f6bbf7c | 1750 | |
3fbcdbf3 JP |
1751 | err = rocker_port_dma_rings_init(rocker_port); |
1752 | if (err) | |
1753 | return err; | |
9f6bbf7c | 1754 | |
3fbcdbf3 JP |
1755 | err = request_irq(rocker_msix_tx_vector(rocker_port), |
1756 | rocker_tx_irq_handler, 0, | |
1757 | rocker_driver_name, rocker_port); | |
1758 | if (err) { | |
1759 | netdev_err(rocker_port->dev, "cannot assign tx irq\n"); | |
1760 | goto err_request_tx_irq; | |
1761 | } | |
9f6bbf7c | 1762 | |
3fbcdbf3 JP |
1763 | err = request_irq(rocker_msix_rx_vector(rocker_port), |
1764 | rocker_rx_irq_handler, 0, | |
1765 | rocker_driver_name, rocker_port); | |
1766 | if (err) { | |
1767 | netdev_err(rocker_port->dev, "cannot assign rx irq\n"); | |
1768 | goto err_request_rx_irq; | |
1769 | } | |
9f6bbf7c | 1770 | |
3fbcdbf3 JP |
1771 | err = rocker_world_port_open(rocker_port); |
1772 | if (err) { | |
1773 | netdev_err(rocker_port->dev, "cannot open port in world\n"); | |
1774 | goto err_world_port_open; | |
1775 | } | |
9f6bbf7c | 1776 | |
3fbcdbf3 JP |
1777 | napi_enable(&rocker_port->napi_tx); |
1778 | napi_enable(&rocker_port->napi_rx); | |
1779 | if (!dev->proto_down) | |
1780 | rocker_port_set_enable(rocker_port, true); | |
1781 | netif_start_queue(dev); | |
9f6bbf7c | 1782 | return 0; |
3fbcdbf3 JP |
1783 | |
1784 | err_world_port_open: | |
1785 | free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); | |
1786 | err_request_rx_irq: | |
1787 | free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); | |
1788 | err_request_tx_irq: | |
1789 | rocker_port_dma_rings_fini(rocker_port); | |
1790 | return err; | |
9f6bbf7c SF |
1791 | } |
1792 | ||
3fbcdbf3 | 1793 | static int rocker_port_stop(struct net_device *dev) |
9f6bbf7c | 1794 | { |
3fbcdbf3 JP |
1795 | struct rocker_port *rocker_port = netdev_priv(dev); |
1796 | ||
1797 | netif_stop_queue(dev); | |
1798 | rocker_port_set_enable(rocker_port, false); | |
1799 | napi_disable(&rocker_port->napi_rx); | |
1800 | napi_disable(&rocker_port->napi_tx); | |
1801 | rocker_world_port_stop(rocker_port); | |
1802 | free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); | |
1803 | free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); | |
1804 | rocker_port_dma_rings_fini(rocker_port); | |
9f6bbf7c SF |
1805 | |
1806 | return 0; | |
1807 | } | |
1808 | ||
3fbcdbf3 JP |
1809 | static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, |
1810 | const struct rocker_desc_info *desc_info) | |
9f6bbf7c | 1811 | { |
3fbcdbf3 JP |
1812 | const struct rocker *rocker = rocker_port->rocker; |
1813 | struct pci_dev *pdev = rocker->pdev; | |
1814 | const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; | |
1815 | struct rocker_tlv *attr; | |
1816 | int rem; | |
9f6bbf7c | 1817 | |
3fbcdbf3 JP |
1818 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); |
1819 | if (!attrs[ROCKER_TLV_TX_FRAGS]) | |
1820 | return; | |
1821 | rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { | |
1822 | const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; | |
1823 | dma_addr_t dma_handle; | |
1824 | size_t len; | |
1825 | ||
1826 | if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) | |
1827 | continue; | |
1828 | rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, | |
1829 | attr); | |
1830 | if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || | |
1831 | !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) | |
1832 | continue; | |
1833 | dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); | |
1834 | len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); | |
1835 | pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); | |
1836 | } | |
9f6bbf7c SF |
1837 | } |
1838 | ||
3fbcdbf3 JP |
1839 | static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, |
1840 | struct rocker_desc_info *desc_info, | |
1841 | char *buf, size_t buf_len) | |
9f6bbf7c | 1842 | { |
3fbcdbf3 JP |
1843 | const struct rocker *rocker = rocker_port->rocker; |
1844 | struct pci_dev *pdev = rocker->pdev; | |
1845 | dma_addr_t dma_handle; | |
1846 | struct rocker_tlv *frag; | |
9f6bbf7c | 1847 | |
3fbcdbf3 JP |
1848 | dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); |
1849 | if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { | |
1850 | if (net_ratelimit()) | |
1851 | netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); | |
1852 | return -EIO; | |
9f6bbf7c | 1853 | } |
3fbcdbf3 JP |
1854 | frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); |
1855 | if (!frag) | |
1856 | goto unmap_frag; | |
1857 | if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, | |
1858 | dma_handle)) | |
1859 | goto nest_cancel; | |
1860 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, | |
1861 | buf_len)) | |
1862 | goto nest_cancel; | |
1863 | rocker_tlv_nest_end(desc_info, frag); | |
9f6bbf7c | 1864 | return 0; |
3fbcdbf3 JP |
1865 | |
1866 | nest_cancel: | |
1867 | rocker_tlv_nest_cancel(desc_info, frag); | |
1868 | unmap_frag: | |
1869 | pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); | |
1870 | return -EMSGSIZE; | |
9f6bbf7c SF |
1871 | } |
1872 | ||
3fbcdbf3 | 1873 | static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) |
9f6bbf7c | 1874 | { |
3fbcdbf3 JP |
1875 | struct rocker_port *rocker_port = netdev_priv(dev); |
1876 | struct rocker *rocker = rocker_port->rocker; | |
1877 | struct rocker_desc_info *desc_info; | |
1878 | struct rocker_tlv *frags; | |
1879 | int i; | |
1880 | int err; | |
4b8ac966 JP |
1881 | |
1882 | desc_info = rocker_desc_head_get(&rocker_port->tx_ring); | |
1883 | if (unlikely(!desc_info)) { | |
1884 | if (net_ratelimit()) | |
1885 | netdev_err(dev, "tx ring full when queue awake\n"); | |
1886 | return NETDEV_TX_BUSY; | |
1887 | } | |
1888 | ||
1889 | rocker_desc_cookie_ptr_set(desc_info, skb); | |
1890 | ||
1891 | frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); | |
1892 | if (!frags) | |
1893 | goto out; | |
1894 | err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, | |
1895 | skb->data, skb_headlen(skb)); | |
1896 | if (err) | |
1897 | goto nest_cancel; | |
95b9be64 JP |
1898 | if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { |
1899 | err = skb_linearize(skb); | |
1900 | if (err) | |
1901 | goto unmap_frags; | |
1902 | } | |
4b8ac966 JP |
1903 | |
1904 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1905 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1906 | ||
1907 | err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, | |
1908 | skb_frag_address(frag), | |
1909 | skb_frag_size(frag)); | |
1910 | if (err) | |
1911 | goto unmap_frags; | |
1912 | } | |
1913 | rocker_tlv_nest_end(desc_info, frags); | |
1914 | ||
1915 | rocker_desc_gen_clear(desc_info); | |
1916 | rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); | |
1917 | ||
1918 | desc_info = rocker_desc_head_get(&rocker_port->tx_ring); | |
1919 | if (!desc_info) | |
1920 | netif_stop_queue(dev); | |
1921 | ||
1922 | return NETDEV_TX_OK; | |
1923 | ||
1924 | unmap_frags: | |
1925 | rocker_tx_desc_frags_unmap(rocker_port, desc_info); | |
1926 | nest_cancel: | |
1927 | rocker_tlv_nest_cancel(desc_info, frags); | |
1928 | out: | |
1929 | dev_kfree_skb(skb); | |
f2bbca51 DA |
1930 | dev->stats.tx_dropped++; |
1931 | ||
4b8ac966 JP |
1932 | return NETDEV_TX_OK; |
1933 | } | |
1934 | ||
1935 | static int rocker_port_set_mac_address(struct net_device *dev, void *p) | |
1936 | { | |
1937 | struct sockaddr *addr = p; | |
1938 | struct rocker_port *rocker_port = netdev_priv(dev); | |
1939 | int err; | |
1940 | ||
1941 | if (!is_valid_ether_addr(addr->sa_data)) | |
1942 | return -EADDRNOTAVAIL; | |
1943 | ||
1944 | err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); | |
1945 | if (err) | |
1946 | return err; | |
1947 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
1948 | return 0; | |
1949 | } | |
1950 | ||
77a58c74 SF |
1951 | static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) |
1952 | { | |
1953 | struct rocker_port *rocker_port = netdev_priv(dev); | |
1954 | int running = netif_running(dev); | |
1955 | int err; | |
1956 | ||
77a58c74 SF |
1957 | if (running) |
1958 | rocker_port_stop(dev); | |
1959 | ||
1960 | netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); | |
1961 | dev->mtu = new_mtu; | |
1962 | ||
1963 | err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); | |
1964 | if (err) | |
1965 | return err; | |
1966 | ||
1967 | if (running) | |
1968 | err = rocker_port_open(dev); | |
1969 | ||
1970 | return err; | |
1971 | } | |
1972 | ||
db19170b DA |
1973 | static int rocker_port_get_phys_port_name(struct net_device *dev, |
1974 | char *buf, size_t len) | |
1975 | { | |
1976 | struct rocker_port *rocker_port = netdev_priv(dev); | |
1977 | struct port_name name = { .buf = buf, .len = len }; | |
1978 | int err; | |
1979 | ||
53901cc0 | 1980 | err = rocker_cmd_exec(rocker_port, false, |
db19170b DA |
1981 | rocker_cmd_get_port_settings_prep, NULL, |
1982 | rocker_cmd_get_port_settings_phys_name_proc, | |
c4f20321 | 1983 | &name); |
db19170b DA |
1984 | |
1985 | return err ? -EOPNOTSUPP : 0; | |
1986 | } | |
1987 | ||
c3055246 AK |
1988 | static int rocker_port_change_proto_down(struct net_device *dev, |
1989 | bool proto_down) | |
1990 | { | |
1991 | struct rocker_port *rocker_port = netdev_priv(dev); | |
1992 | ||
1993 | if (rocker_port->dev->flags & IFF_UP) | |
1994 | rocker_port_set_enable(rocker_port, !proto_down); | |
1995 | rocker_port->dev->proto_down = proto_down; | |
1996 | return 0; | |
1997 | } | |
1998 | ||
503eebc2 JP |
1999 | static void rocker_port_neigh_destroy(struct net_device *dev, |
2000 | struct neighbour *n) | |
dd19f83d SF |
2001 | { |
2002 | struct rocker_port *rocker_port = netdev_priv(n->dev); | |
e420114e | 2003 | int err; |
dd19f83d | 2004 | |
e420114e JP |
2005 | err = rocker_world_port_neigh_destroy(rocker_port, n); |
2006 | if (err) | |
2007 | netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", | |
2008 | err); | |
dd19f83d SF |
2009 | } |
2010 | ||
98237d43 SF |
2011 | static const struct net_device_ops rocker_port_netdev_ops = { |
2012 | .ndo_open = rocker_port_open, | |
2013 | .ndo_stop = rocker_port_stop, | |
2014 | .ndo_start_xmit = rocker_port_xmit, | |
2015 | .ndo_set_mac_address = rocker_port_set_mac_address, | |
77a58c74 | 2016 | .ndo_change_mtu = rocker_port_change_mtu, |
85fdb956 | 2017 | .ndo_bridge_getlink = switchdev_port_bridge_getlink, |
fc8f40d8 | 2018 | .ndo_bridge_setlink = switchdev_port_bridge_setlink, |
54ba5a0b | 2019 | .ndo_bridge_dellink = switchdev_port_bridge_dellink, |
45d4122c SS |
2020 | .ndo_fdb_add = switchdev_port_fdb_add, |
2021 | .ndo_fdb_del = switchdev_port_fdb_del, | |
2022 | .ndo_fdb_dump = switchdev_port_fdb_dump, | |
db19170b | 2023 | .ndo_get_phys_port_name = rocker_port_get_phys_port_name, |
c3055246 | 2024 | .ndo_change_proto_down = rocker_port_change_proto_down, |
dd19f83d | 2025 | .ndo_neigh_destroy = rocker_port_neigh_destroy, |
98237d43 SF |
2026 | }; |
2027 | ||
2028 | /******************** | |
2029 | * swdev interface | |
2030 | ********************/ | |
2031 | ||
f8e20a9f SF |
2032 | static int rocker_port_attr_get(struct net_device *dev, |
2033 | struct switchdev_attr *attr) | |
4b8ac966 | 2034 | { |
e5054643 SH |
2035 | const struct rocker_port *rocker_port = netdev_priv(dev); |
2036 | const struct rocker *rocker = rocker_port->rocker; | |
e420114e | 2037 | int err = 0; |
4b8ac966 | 2038 | |
f8e20a9f | 2039 | switch (attr->id) { |
1f868398 | 2040 | case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: |
42275bd8 SF |
2041 | attr->u.ppid.id_len = sizeof(rocker->hw.id); |
2042 | memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); | |
f8e20a9f | 2043 | break; |
1f868398 | 2044 | case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: |
e420114e JP |
2045 | err = rocker_world_port_attr_bridge_flags_get(rocker_port, |
2046 | &attr->u.brport_flags); | |
6004c867 | 2047 | break; |
f8e20a9f SF |
2048 | default: |
2049 | return -EOPNOTSUPP; | |
2050 | } | |
2051 | ||
e420114e | 2052 | return err; |
4b8ac966 JP |
2053 | } |
2054 | ||
c4f20321 | 2055 | static int rocker_port_attr_set(struct net_device *dev, |
f7fadf30 | 2056 | const struct switchdev_attr *attr, |
7ea6eb3f | 2057 | struct switchdev_trans *trans) |
c4f20321 SF |
2058 | { |
2059 | struct rocker_port *rocker_port = netdev_priv(dev); | |
2060 | int err = 0; | |
2061 | ||
c4f20321 | 2062 | switch (attr->id) { |
1f868398 | 2063 | case SWITCHDEV_ATTR_ID_PORT_STP_STATE: |
e420114e JP |
2064 | err = rocker_world_port_attr_stp_state_set(rocker_port, |
2065 | attr->u.stp_state, | |
2066 | trans); | |
35636062 | 2067 | break; |
1f868398 | 2068 | case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: |
e420114e JP |
2069 | err = rocker_world_port_attr_bridge_flags_set(rocker_port, |
2070 | attr->u.brport_flags, | |
2071 | trans); | |
6004c867 | 2072 | break; |
d0cf57f9 | 2073 | case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: |
e420114e JP |
2074 | err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, |
2075 | attr->u.ageing_time, | |
2076 | trans); | |
d0cf57f9 | 2077 | break; |
c4f20321 SF |
2078 | default: |
2079 | err = -EOPNOTSUPP; | |
2080 | break; | |
2081 | } | |
2082 | ||
2083 | return err; | |
6c707945 SF |
2084 | } |
2085 | ||
9228ad26 | 2086 | static int rocker_port_obj_add(struct net_device *dev, |
648b4a99 | 2087 | const struct switchdev_obj *obj, |
7ea6eb3f | 2088 | struct switchdev_trans *trans) |
9228ad26 SF |
2089 | { |
2090 | struct rocker_port *rocker_port = netdev_priv(dev); | |
2091 | int err = 0; | |
2092 | ||
9e8f4a54 | 2093 | switch (obj->id) { |
57d80838 | 2094 | case SWITCHDEV_OBJ_ID_PORT_VLAN: |
e420114e JP |
2095 | err = rocker_world_port_obj_vlan_add(rocker_port, |
2096 | SWITCHDEV_OBJ_PORT_VLAN(obj), | |
2097 | trans); | |
9228ad26 | 2098 | break; |
57d80838 | 2099 | case SWITCHDEV_OBJ_ID_PORT_FDB: |
e420114e JP |
2100 | err = rocker_world_port_obj_fdb_add(rocker_port, |
2101 | SWITCHDEV_OBJ_PORT_FDB(obj), | |
2102 | trans); | |
45d4122c | 2103 | break; |
9228ad26 SF |
2104 | default: |
2105 | err = -EOPNOTSUPP; | |
2106 | break; | |
2107 | } | |
2108 | ||
2109 | return err; | |
2110 | } | |
2111 | ||
9228ad26 | 2112 | static int rocker_port_obj_del(struct net_device *dev, |
648b4a99 | 2113 | const struct switchdev_obj *obj) |
9228ad26 SF |
2114 | { |
2115 | struct rocker_port *rocker_port = netdev_priv(dev); | |
2116 | int err = 0; | |
2117 | ||
9e8f4a54 | 2118 | switch (obj->id) { |
57d80838 | 2119 | case SWITCHDEV_OBJ_ID_PORT_VLAN: |
e420114e JP |
2120 | err = rocker_world_port_obj_vlan_del(rocker_port, |
2121 | SWITCHDEV_OBJ_PORT_VLAN(obj)); | |
9228ad26 | 2122 | break; |
57d80838 | 2123 | case SWITCHDEV_OBJ_ID_PORT_FDB: |
e420114e JP |
2124 | err = rocker_world_port_obj_fdb_del(rocker_port, |
2125 | SWITCHDEV_OBJ_PORT_FDB(obj)); | |
45d4122c SS |
2126 | break; |
2127 | default: | |
2128 | err = -EOPNOTSUPP; | |
2129 | break; | |
2130 | } | |
2131 | ||
2132 | return err; | |
2133 | } | |
2134 | ||
45d4122c | 2135 | static int rocker_port_obj_dump(struct net_device *dev, |
648b4a99 JP |
2136 | struct switchdev_obj *obj, |
2137 | switchdev_obj_dump_cb_t *cb) | |
45d4122c | 2138 | { |
e5054643 | 2139 | const struct rocker_port *rocker_port = netdev_priv(dev); |
45d4122c SS |
2140 | int err = 0; |
2141 | ||
9e8f4a54 | 2142 | switch (obj->id) { |
57d80838 | 2143 | case SWITCHDEV_OBJ_ID_PORT_FDB: |
e420114e JP |
2144 | err = rocker_world_port_obj_fdb_dump(rocker_port, |
2145 | SWITCHDEV_OBJ_PORT_FDB(obj), | |
2146 | cb); | |
45d4122c | 2147 | break; |
57d80838 | 2148 | case SWITCHDEV_OBJ_ID_PORT_VLAN: |
e420114e JP |
2149 | err = rocker_world_port_obj_vlan_dump(rocker_port, |
2150 | SWITCHDEV_OBJ_PORT_VLAN(obj), | |
2151 | cb); | |
7d4f8d87 | 2152 | break; |
9228ad26 SF |
2153 | default: |
2154 | err = -EOPNOTSUPP; | |
2155 | break; | |
2156 | } | |
2157 | ||
2158 | return err; | |
2159 | } | |
2160 | ||
9d47c0a2 | 2161 | static const struct switchdev_ops rocker_port_switchdev_ops = { |
f8e20a9f | 2162 | .switchdev_port_attr_get = rocker_port_attr_get, |
35636062 | 2163 | .switchdev_port_attr_set = rocker_port_attr_set, |
9228ad26 SF |
2164 | .switchdev_port_obj_add = rocker_port_obj_add, |
2165 | .switchdev_port_obj_del = rocker_port_obj_del, | |
45d4122c | 2166 | .switchdev_port_obj_dump = rocker_port_obj_dump, |
4b8ac966 JP |
2167 | }; |
2168 | ||
db701955 IS |
2169 | struct rocker_fib_event_work { |
2170 | struct work_struct work; | |
2171 | struct fib_entry_notifier_info fen_info; | |
2172 | struct rocker *rocker; | |
2173 | unsigned long event; | |
2174 | }; | |
2175 | ||
2176 | static void rocker_router_fib_event_work(struct work_struct *work) | |
936bd486 | 2177 | { |
db701955 IS |
2178 | struct rocker_fib_event_work *fib_work = |
2179 | container_of(work, struct rocker_fib_event_work, work); | |
2180 | struct rocker *rocker = fib_work->rocker; | |
936bd486 JP |
2181 | int err; |
2182 | ||
db701955 IS |
2183 | /* Protect internal structures from changes */ |
2184 | rtnl_lock(); | |
2185 | switch (fib_work->event) { | |
936bd486 | 2186 | case FIB_EVENT_ENTRY_ADD: |
db701955 | 2187 | err = rocker_world_fib4_add(rocker, &fib_work->fen_info); |
936bd486 JP |
2188 | if (err) |
2189 | rocker_world_fib4_abort(rocker); | |
db701955 | 2190 | fib_info_put(fib_work->fen_info.fi); |
936bd486 JP |
2191 | break; |
2192 | case FIB_EVENT_ENTRY_DEL: | |
db701955 IS |
2193 | rocker_world_fib4_del(rocker, &fib_work->fen_info); |
2194 | fib_info_put(fib_work->fen_info.fi); | |
936bd486 JP |
2195 | break; |
2196 | case FIB_EVENT_RULE_ADD: /* fall through */ | |
2197 | case FIB_EVENT_RULE_DEL: | |
2198 | rocker_world_fib4_abort(rocker); | |
2199 | break; | |
2200 | } | |
db701955 IS |
2201 | rtnl_unlock(); |
2202 | kfree(fib_work); | |
2203 | } | |
2204 | ||
2205 | /* Called with rcu_read_lock() */ | |
2206 | static int rocker_router_fib_event(struct notifier_block *nb, | |
2207 | unsigned long event, void *ptr) | |
2208 | { | |
2209 | struct rocker *rocker = container_of(nb, struct rocker, fib_nb); | |
2210 | struct rocker_fib_event_work *fib_work; | |
2211 | ||
2212 | fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); | |
2213 | if (WARN_ON(!fib_work)) | |
2214 | return NOTIFY_BAD; | |
2215 | ||
2216 | INIT_WORK(&fib_work->work, rocker_router_fib_event_work); | |
2217 | fib_work->rocker = rocker; | |
2218 | fib_work->event = event; | |
2219 | ||
2220 | switch (event) { | |
2221 | case FIB_EVENT_ENTRY_ADD: /* fall through */ | |
2222 | case FIB_EVENT_ENTRY_DEL: | |
2223 | memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); | |
2224 | /* Take referece on fib_info to prevent it from being | |
2225 | * freed while work is queued. Release it afterwards. | |
2226 | */ | |
2227 | fib_info_hold(fib_work->fen_info.fi); | |
2228 | break; | |
2229 | } | |
2230 | ||
2231 | queue_work(rocker->rocker_owq, &fib_work->work); | |
2232 | ||
936bd486 JP |
2233 | return NOTIFY_DONE; |
2234 | } | |
2235 | ||
4b8ac966 JP |
2236 | /******************** |
2237 | * ethtool interface | |
2238 | ********************/ | |
2239 | ||
2240 | static int rocker_port_get_settings(struct net_device *dev, | |
2241 | struct ethtool_cmd *ecmd) | |
2242 | { | |
2243 | struct rocker_port *rocker_port = netdev_priv(dev); | |
2244 | ||
2245 | return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); | |
2246 | } | |
2247 | ||
2248 | static int rocker_port_set_settings(struct net_device *dev, | |
2249 | struct ethtool_cmd *ecmd) | |
2250 | { | |
2251 | struct rocker_port *rocker_port = netdev_priv(dev); | |
2252 | ||
2253 | return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); | |
2254 | } | |
2255 | ||
2256 | static void rocker_port_get_drvinfo(struct net_device *dev, | |
2257 | struct ethtool_drvinfo *drvinfo) | |
2258 | { | |
2259 | strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); | |
2260 | strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); | |
2261 | } | |
2262 | ||
9766e97a DA |
2263 | static struct rocker_port_stats { |
2264 | char str[ETH_GSTRING_LEN]; | |
2265 | int type; | |
2266 | } rocker_port_stats[] = { | |
2267 | { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, | |
2268 | { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, | |
2269 | { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, | |
2270 | { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, | |
2271 | ||
2272 | { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, | |
2273 | { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, | |
2274 | { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, | |
2275 | { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, | |
2276 | }; | |
2277 | ||
2278 | #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) | |
2279 | ||
2280 | static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, | |
2281 | u8 *data) | |
2282 | { | |
2283 | u8 *p = data; | |
2284 | int i; | |
2285 | ||
2286 | switch (stringset) { | |
2287 | case ETH_SS_STATS: | |
2288 | for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { | |
2289 | memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); | |
2290 | p += ETH_GSTRING_LEN; | |
2291 | } | |
2292 | break; | |
2293 | } | |
2294 | } | |
2295 | ||
2296 | static int | |
534ba6a8 | 2297 | rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, |
9766e97a DA |
2298 | struct rocker_desc_info *desc_info, |
2299 | void *priv) | |
2300 | { | |
2301 | struct rocker_tlv *cmd_stats; | |
2302 | ||
2303 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, | |
2304 | ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) | |
2305 | return -EMSGSIZE; | |
2306 | ||
2307 | cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | |
2308 | if (!cmd_stats) | |
2309 | return -EMSGSIZE; | |
2310 | ||
4a6bb6d3 SF |
2311 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, |
2312 | rocker_port->pport)) | |
9766e97a DA |
2313 | return -EMSGSIZE; |
2314 | ||
2315 | rocker_tlv_nest_end(desc_info, cmd_stats); | |
2316 | ||
2317 | return 0; | |
2318 | } | |
2319 | ||
2320 | static int | |
534ba6a8 | 2321 | rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, |
e5054643 | 2322 | const struct rocker_desc_info *desc_info, |
9766e97a DA |
2323 | void *priv) |
2324 | { | |
e5054643 SH |
2325 | const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; |
2326 | const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; | |
2327 | const struct rocker_tlv *pattr; | |
4a6bb6d3 | 2328 | u32 pport; |
9766e97a DA |
2329 | u64 *data = priv; |
2330 | int i; | |
2331 | ||
2332 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); | |
2333 | ||
2334 | if (!attrs[ROCKER_TLV_CMD_INFO]) | |
2335 | return -EIO; | |
2336 | ||
2337 | rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, | |
2338 | attrs[ROCKER_TLV_CMD_INFO]); | |
2339 | ||
4a6bb6d3 | 2340 | if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) |
9766e97a DA |
2341 | return -EIO; |
2342 | ||
4a6bb6d3 SF |
2343 | pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); |
2344 | if (pport != rocker_port->pport) | |
9766e97a DA |
2345 | return -EIO; |
2346 | ||
2347 | for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { | |
2348 | pattr = stats_attrs[rocker_port_stats[i].type]; | |
2349 | if (!pattr) | |
2350 | continue; | |
2351 | ||
2352 | data[i] = rocker_tlv_get_u64(pattr); | |
2353 | } | |
2354 | ||
2355 | return 0; | |
2356 | } | |
2357 | ||
2358 | static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, | |
2359 | void *priv) | |
2360 | { | |
53901cc0 | 2361 | return rocker_cmd_exec(rocker_port, false, |
9766e97a DA |
2362 | rocker_cmd_get_port_stats_prep, NULL, |
2363 | rocker_cmd_get_port_stats_ethtool_proc, | |
c4f20321 | 2364 | priv); |
9766e97a DA |
2365 | } |
2366 | ||
2367 | static void rocker_port_get_stats(struct net_device *dev, | |
2368 | struct ethtool_stats *stats, u64 *data) | |
2369 | { | |
2370 | struct rocker_port *rocker_port = netdev_priv(dev); | |
2371 | ||
2372 | if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { | |
2373 | int i; | |
2374 | ||
2375 | for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) | |
2376 | data[i] = 0; | |
2377 | } | |
9766e97a DA |
2378 | } |
2379 | ||
2380 | static int rocker_port_get_sset_count(struct net_device *netdev, int sset) | |
2381 | { | |
2382 | switch (sset) { | |
2383 | case ETH_SS_STATS: | |
2384 | return ROCKER_PORT_STATS_LEN; | |
2385 | default: | |
2386 | return -EOPNOTSUPP; | |
2387 | } | |
2388 | } | |
2389 | ||
4b8ac966 JP |
2390 | static const struct ethtool_ops rocker_port_ethtool_ops = { |
2391 | .get_settings = rocker_port_get_settings, | |
2392 | .set_settings = rocker_port_set_settings, | |
2393 | .get_drvinfo = rocker_port_get_drvinfo, | |
2394 | .get_link = ethtool_op_get_link, | |
9766e97a DA |
2395 | .get_strings = rocker_port_get_strings, |
2396 | .get_ethtool_stats = rocker_port_get_stats, | |
2397 | .get_sset_count = rocker_port_get_sset_count, | |
4b8ac966 JP |
2398 | }; |
2399 | ||
2400 | /***************** | |
2401 | * NAPI interface | |
2402 | *****************/ | |
2403 | ||
2404 | static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) | |
2405 | { | |
2406 | return container_of(napi, struct rocker_port, napi_tx); | |
2407 | } | |
2408 | ||
2409 | static int rocker_port_poll_tx(struct napi_struct *napi, int budget) | |
2410 | { | |
2411 | struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); | |
e5054643 SH |
2412 | const struct rocker *rocker = rocker_port->rocker; |
2413 | const struct rocker_desc_info *desc_info; | |
4b8ac966 JP |
2414 | u32 credits = 0; |
2415 | int err; | |
2416 | ||
2417 | /* Cleanup tx descriptors */ | |
2418 | while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { | |
f2bbca51 DA |
2419 | struct sk_buff *skb; |
2420 | ||
4b8ac966 JP |
2421 | err = rocker_desc_err(desc_info); |
2422 | if (err && net_ratelimit()) | |
2423 | netdev_err(rocker_port->dev, "tx desc received with err %d\n", | |
2424 | err); | |
2425 | rocker_tx_desc_frags_unmap(rocker_port, desc_info); | |
f2bbca51 DA |
2426 | |
2427 | skb = rocker_desc_cookie_ptr_get(desc_info); | |
2428 | if (err == 0) { | |
2429 | rocker_port->dev->stats.tx_packets++; | |
2430 | rocker_port->dev->stats.tx_bytes += skb->len; | |
4725ceb9 | 2431 | } else { |
f2bbca51 | 2432 | rocker_port->dev->stats.tx_errors++; |
4725ceb9 | 2433 | } |
f2bbca51 DA |
2434 | |
2435 | dev_kfree_skb_any(skb); | |
4b8ac966 JP |
2436 | credits++; |
2437 | } | |
2438 | ||
2439 | if (credits && netif_queue_stopped(rocker_port->dev)) | |
2440 | netif_wake_queue(rocker_port->dev); | |
2441 | ||
2442 | napi_complete(napi); | |
2443 | rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); | |
2444 | ||
2445 | return 0; | |
2446 | } | |
2447 | ||
e5054643 SH |
2448 | static int rocker_port_rx_proc(const struct rocker *rocker, |
2449 | const struct rocker_port *rocker_port, | |
4b8ac966 JP |
2450 | struct rocker_desc_info *desc_info) |
2451 | { | |
e5054643 | 2452 | const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; |
4b8ac966 JP |
2453 | struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); |
2454 | size_t rx_len; | |
3f98a8e6 | 2455 | u16 rx_flags = 0; |
4b8ac966 JP |
2456 | |
2457 | if (!skb) | |
2458 | return -ENOENT; | |
2459 | ||
2460 | rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); | |
2461 | if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) | |
2462 | return -EINVAL; | |
3f98a8e6 SF |
2463 | if (attrs[ROCKER_TLV_RX_FLAGS]) |
2464 | rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); | |
4b8ac966 JP |
2465 | |
2466 | rocker_dma_rx_ring_skb_unmap(rocker, attrs); | |
2467 | ||
2468 | rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); | |
2469 | skb_put(skb, rx_len); | |
2470 | skb->protocol = eth_type_trans(skb, rocker_port->dev); | |
f2bbca51 | 2471 | |
3f98a8e6 | 2472 | if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) |
6bc506b4 | 2473 | skb->offload_fwd_mark = 1; |
3f98a8e6 | 2474 | |
f2bbca51 DA |
2475 | rocker_port->dev->stats.rx_packets++; |
2476 | rocker_port->dev->stats.rx_bytes += skb->len; | |
2477 | ||
4b8ac966 JP |
2478 | netif_receive_skb(skb); |
2479 | ||
534ba6a8 | 2480 | return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); |
4b8ac966 JP |
2481 | } |
2482 | ||
2483 | static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) | |
2484 | { | |
2485 | return container_of(napi, struct rocker_port, napi_rx); | |
2486 | } | |
2487 | ||
2488 | static int rocker_port_poll_rx(struct napi_struct *napi, int budget) | |
2489 | { | |
2490 | struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); | |
e5054643 | 2491 | const struct rocker *rocker = rocker_port->rocker; |
4b8ac966 JP |
2492 | struct rocker_desc_info *desc_info; |
2493 | u32 credits = 0; | |
2494 | int err; | |
2495 | ||
2496 | /* Process rx descriptors */ | |
2497 | while (credits < budget && | |
2498 | (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { | |
2499 | err = rocker_desc_err(desc_info); | |
2500 | if (err) { | |
2501 | if (net_ratelimit()) | |
2502 | netdev_err(rocker_port->dev, "rx desc received with err %d\n", | |
2503 | err); | |
2504 | } else { | |
2505 | err = rocker_port_rx_proc(rocker, rocker_port, | |
2506 | desc_info); | |
2507 | if (err && net_ratelimit()) | |
2508 | netdev_err(rocker_port->dev, "rx processing failed with err %d\n", | |
2509 | err); | |
2510 | } | |
f2bbca51 DA |
2511 | if (err) |
2512 | rocker_port->dev->stats.rx_errors++; | |
2513 | ||
4b8ac966 JP |
2514 | rocker_desc_gen_clear(desc_info); |
2515 | rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); | |
2516 | credits++; | |
2517 | } | |
2518 | ||
2519 | if (credits < budget) | |
2520 | napi_complete(napi); | |
2521 | ||
2522 | rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); | |
2523 | ||
2524 | return credits; | |
2525 | } | |
2526 | ||
2527 | /***************** | |
2528 | * PCI driver ops | |
2529 | *****************/ | |
2530 | ||
e5054643 | 2531 | static void rocker_carrier_init(const struct rocker_port *rocker_port) |
4b8ac966 | 2532 | { |
e5054643 | 2533 | const struct rocker *rocker = rocker_port->rocker; |
4b8ac966 JP |
2534 | u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); |
2535 | bool link_up; | |
2536 | ||
4a6bb6d3 | 2537 | link_up = link_status & (1 << rocker_port->pport); |
4b8ac966 JP |
2538 | if (link_up) |
2539 | netif_carrier_on(rocker_port->dev); | |
2540 | else | |
2541 | netif_carrier_off(rocker_port->dev); | |
2542 | } | |
2543 | ||
e420114e | 2544 | static void rocker_remove_ports(struct rocker *rocker) |
4b8ac966 | 2545 | { |
9f6bbf7c | 2546 | struct rocker_port *rocker_port; |
4b8ac966 JP |
2547 | int i; |
2548 | ||
9f6bbf7c SF |
2549 | for (i = 0; i < rocker->port_count; i++) { |
2550 | rocker_port = rocker->ports[i]; | |
a0720310 SF |
2551 | if (!rocker_port) |
2552 | continue; | |
e420114e | 2553 | rocker_world_port_fini(rocker_port); |
9f6bbf7c | 2554 | unregister_netdev(rocker_port->dev); |
e420114e | 2555 | rocker_world_port_post_fini(rocker_port); |
1ebd47ef | 2556 | free_netdev(rocker_port->dev); |
9f6bbf7c | 2557 | } |
e420114e | 2558 | rocker_world_fini(rocker); |
4b8ac966 JP |
2559 | kfree(rocker->ports); |
2560 | } | |
2561 | ||
534ba6a8 | 2562 | static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) |
4b8ac966 | 2563 | { |
534ba6a8 | 2564 | const struct rocker *rocker = rocker_port->rocker; |
e5054643 | 2565 | const struct pci_dev *pdev = rocker->pdev; |
4b8ac966 JP |
2566 | int err; |
2567 | ||
2568 | err = rocker_cmd_get_port_settings_macaddr(rocker_port, | |
2569 | rocker_port->dev->dev_addr); | |
2570 | if (err) { | |
2571 | dev_warn(&pdev->dev, "failed to get mac address, using random\n"); | |
2572 | eth_hw_addr_random(rocker_port->dev); | |
2573 | } | |
2574 | } | |
2575 | ||
44770e11 JW |
2576 | #define ROCKER_PORT_MIN_MTU ETH_MIN_MTU |
2577 | #define ROCKER_PORT_MAX_MTU 9000 | |
4b8ac966 JP |
2578 | static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) |
2579 | { | |
97699056 | 2580 | struct pci_dev *pdev = rocker->pdev; |
4b8ac966 JP |
2581 | struct rocker_port *rocker_port; |
2582 | struct net_device *dev; | |
2583 | int err; | |
2584 | ||
2585 | dev = alloc_etherdev(sizeof(struct rocker_port)); | |
2586 | if (!dev) | |
2587 | return -ENOMEM; | |
97699056 | 2588 | SET_NETDEV_DEV(dev, &pdev->dev); |
4b8ac966 JP |
2589 | rocker_port = netdev_priv(dev); |
2590 | rocker_port->dev = dev; | |
2591 | rocker_port->rocker = rocker; | |
2592 | rocker_port->port_number = port_number; | |
4a6bb6d3 | 2593 | rocker_port->pport = port_number + 1; |
4b8ac966 | 2594 | |
e420114e JP |
2595 | err = rocker_world_check_init(rocker_port); |
2596 | if (err) { | |
2597 | dev_err(&pdev->dev, "world init failed\n"); | |
2598 | goto err_world_check_init; | |
2599 | } | |
2600 | ||
534ba6a8 | 2601 | rocker_port_dev_addr_init(rocker_port); |
4b8ac966 JP |
2602 | dev->netdev_ops = &rocker_port_netdev_ops; |
2603 | dev->ethtool_ops = &rocker_port_ethtool_ops; | |
9d47c0a2 | 2604 | dev->switchdev_ops = &rocker_port_switchdev_ops; |
d64b5e85 | 2605 | netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, |
11ce2ba3 | 2606 | NAPI_POLL_WEIGHT); |
4b8ac966 JP |
2607 | netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, |
2608 | NAPI_POLL_WEIGHT); | |
2609 | rocker_carrier_init(rocker_port); | |
2610 | ||
21518a6e | 2611 | dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; |
4b8ac966 | 2612 | |
44770e11 JW |
2613 | /* MTU range: 68 - 9000 */ |
2614 | dev->min_mtu = ROCKER_PORT_MIN_MTU; | |
2615 | dev->max_mtu = ROCKER_PORT_MAX_MTU; | |
2616 | ||
e420114e JP |
2617 | err = rocker_world_port_pre_init(rocker_port); |
2618 | if (err) { | |
2619 | dev_err(&pdev->dev, "port world pre-init failed\n"); | |
2620 | goto err_world_port_pre_init; | |
2621 | } | |
4b8ac966 JP |
2622 | err = register_netdev(dev); |
2623 | if (err) { | |
2624 | dev_err(&pdev->dev, "register_netdev failed\n"); | |
2625 | goto err_register_netdev; | |
2626 | } | |
2627 | rocker->ports[port_number] = rocker_port; | |
2628 | ||
e420114e JP |
2629 | err = rocker_world_port_init(rocker_port); |
2630 | if (err) { | |
2631 | dev_err(&pdev->dev, "port world init failed\n"); | |
2632 | goto err_world_port_init; | |
2633 | } | |
2634 | ||
4b8ac966 JP |
2635 | return 0; |
2636 | ||
e420114e | 2637 | err_world_port_init: |
6c4f7780 | 2638 | rocker->ports[port_number] = NULL; |
9f6bbf7c | 2639 | unregister_netdev(dev); |
4b8ac966 | 2640 | err_register_netdev: |
e420114e JP |
2641 | rocker_world_port_post_fini(rocker_port); |
2642 | err_world_port_pre_init: | |
2643 | err_world_check_init: | |
4b8ac966 JP |
2644 | free_netdev(dev); |
2645 | return err; | |
2646 | } | |
2647 | ||
2648 | static int rocker_probe_ports(struct rocker *rocker) | |
2649 | { | |
2650 | int i; | |
2651 | size_t alloc_size; | |
2652 | int err; | |
2653 | ||
2654 | alloc_size = sizeof(struct rocker_port *) * rocker->port_count; | |
27b808cb | 2655 | rocker->ports = kzalloc(alloc_size, GFP_KERNEL); |
e65ad3be DC |
2656 | if (!rocker->ports) |
2657 | return -ENOMEM; | |
4b8ac966 JP |
2658 | for (i = 0; i < rocker->port_count; i++) { |
2659 | err = rocker_probe_port(rocker, i); | |
2660 | if (err) | |
2661 | goto remove_ports; | |
2662 | } | |
2663 | return 0; | |
2664 | ||
2665 | remove_ports: | |
2666 | rocker_remove_ports(rocker); | |
2667 | return err; | |
2668 | } | |
2669 | ||
2670 | static int rocker_msix_init(struct rocker *rocker) | |
2671 | { | |
2672 | struct pci_dev *pdev = rocker->pdev; | |
2673 | int msix_entries; | |
2674 | int i; | |
2675 | int err; | |
2676 | ||
2677 | msix_entries = pci_msix_vec_count(pdev); | |
2678 | if (msix_entries < 0) | |
2679 | return msix_entries; | |
2680 | ||
2681 | if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) | |
2682 | return -EINVAL; | |
2683 | ||
2684 | rocker->msix_entries = kmalloc_array(msix_entries, | |
2685 | sizeof(struct msix_entry), | |
2686 | GFP_KERNEL); | |
2687 | if (!rocker->msix_entries) | |
2688 | return -ENOMEM; | |
2689 | ||
2690 | for (i = 0; i < msix_entries; i++) | |
2691 | rocker->msix_entries[i].entry = i; | |
2692 | ||
2693 | err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); | |
2694 | if (err < 0) | |
2695 | goto err_enable_msix; | |
2696 | ||
2697 | return 0; | |
2698 | ||
2699 | err_enable_msix: | |
2700 | kfree(rocker->msix_entries); | |
2701 | return err; | |
2702 | } | |
2703 | ||
e5054643 | 2704 | static void rocker_msix_fini(const struct rocker *rocker) |
4b8ac966 JP |
2705 | { |
2706 | pci_disable_msix(rocker->pdev); | |
2707 | kfree(rocker->msix_entries); | |
2708 | } | |
2709 | ||
2710 | static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
2711 | { | |
2712 | struct rocker *rocker; | |
2713 | int err; | |
2714 | ||
2715 | rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); | |
2716 | if (!rocker) | |
2717 | return -ENOMEM; | |
2718 | ||
2719 | err = pci_enable_device(pdev); | |
2720 | if (err) { | |
2721 | dev_err(&pdev->dev, "pci_enable_device failed\n"); | |
2722 | goto err_pci_enable_device; | |
2723 | } | |
2724 | ||
2725 | err = pci_request_regions(pdev, rocker_driver_name); | |
2726 | if (err) { | |
2727 | dev_err(&pdev->dev, "pci_request_regions failed\n"); | |
2728 | goto err_pci_request_regions; | |
2729 | } | |
2730 | ||
2731 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
2732 | if (!err) { | |
2733 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
2734 | if (err) { | |
2735 | dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); | |
2736 | goto err_pci_set_dma_mask; | |
2737 | } | |
2738 | } else { | |
2739 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2740 | if (err) { | |
2741 | dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); | |
2742 | goto err_pci_set_dma_mask; | |
2743 | } | |
2744 | } | |
2745 | ||
2746 | if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { | |
2747 | dev_err(&pdev->dev, "invalid PCI region size\n"); | |
3122a92e | 2748 | err = -EINVAL; |
4b8ac966 JP |
2749 | goto err_pci_resource_len_check; |
2750 | } | |
2751 | ||
2752 | rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), | |
2753 | pci_resource_len(pdev, 0)); | |
2754 | if (!rocker->hw_addr) { | |
2755 | dev_err(&pdev->dev, "ioremap failed\n"); | |
2756 | err = -EIO; | |
2757 | goto err_ioremap; | |
2758 | } | |
2759 | pci_set_master(pdev); | |
2760 | ||
2761 | rocker->pdev = pdev; | |
2762 | pci_set_drvdata(pdev, rocker); | |
2763 | ||
2764 | rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); | |
2765 | ||
2766 | err = rocker_msix_init(rocker); | |
2767 | if (err) { | |
2768 | dev_err(&pdev->dev, "MSI-X init failed\n"); | |
2769 | goto err_msix_init; | |
2770 | } | |
2771 | ||
2772 | err = rocker_basic_hw_test(rocker); | |
2773 | if (err) { | |
2774 | dev_err(&pdev->dev, "basic hw test failed\n"); | |
2775 | goto err_basic_hw_test; | |
2776 | } | |
2777 | ||
2778 | rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); | |
2779 | ||
2780 | err = rocker_dma_rings_init(rocker); | |
2781 | if (err) | |
2782 | goto err_dma_rings_init; | |
2783 | ||
2784 | err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), | |
2785 | rocker_cmd_irq_handler, 0, | |
2786 | rocker_driver_name, rocker); | |
2787 | if (err) { | |
2788 | dev_err(&pdev->dev, "cannot assign cmd irq\n"); | |
2789 | goto err_request_cmd_irq; | |
2790 | } | |
2791 | ||
2792 | err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), | |
2793 | rocker_event_irq_handler, 0, | |
2794 | rocker_driver_name, rocker); | |
2795 | if (err) { | |
2796 | dev_err(&pdev->dev, "cannot assign event irq\n"); | |
2797 | goto err_request_event_irq; | |
2798 | } | |
2799 | ||
c1bb279c IS |
2800 | rocker->rocker_owq = alloc_ordered_workqueue(rocker_driver_name, |
2801 | WQ_MEM_RECLAIM); | |
2802 | if (!rocker->rocker_owq) { | |
2803 | err = -ENOMEM; | |
2804 | goto err_alloc_ordered_workqueue; | |
2805 | } | |
2806 | ||
c3852ef7 IS |
2807 | /* Only FIBs pointing to our own netdevs are programmed into |
2808 | * the device, so no need to pass a callback. | |
2809 | */ | |
17f8be7d | 2810 | rocker->fib_nb.notifier_call = rocker_router_fib_event; |
c3852ef7 IS |
2811 | err = register_fib_notifier(&rocker->fib_nb, NULL); |
2812 | if (err) | |
2813 | goto err_register_fib_notifier; | |
17f8be7d | 2814 | |
4b8ac966 JP |
2815 | rocker->hw.id = rocker_read64(rocker, SWITCH_ID); |
2816 | ||
2817 | err = rocker_probe_ports(rocker); | |
2818 | if (err) { | |
2819 | dev_err(&pdev->dev, "failed to probe ports\n"); | |
2820 | goto err_probe_ports; | |
2821 | } | |
2822 | ||
c8beb5b2 SF |
2823 | dev_info(&pdev->dev, "Rocker switch with id %*phN\n", |
2824 | (int)sizeof(rocker->hw.id), &rocker->hw.id); | |
4b8ac966 JP |
2825 | |
2826 | return 0; | |
2827 | ||
2828 | err_probe_ports: | |
17f8be7d | 2829 | unregister_fib_notifier(&rocker->fib_nb); |
c3852ef7 | 2830 | err_register_fib_notifier: |
c1bb279c IS |
2831 | destroy_workqueue(rocker->rocker_owq); |
2832 | err_alloc_ordered_workqueue: | |
4b8ac966 JP |
2833 | free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); |
2834 | err_request_event_irq: | |
2835 | free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); | |
2836 | err_request_cmd_irq: | |
2837 | rocker_dma_rings_fini(rocker); | |
2838 | err_dma_rings_init: | |
2839 | err_basic_hw_test: | |
2840 | rocker_msix_fini(rocker); | |
2841 | err_msix_init: | |
2842 | iounmap(rocker->hw_addr); | |
2843 | err_ioremap: | |
2844 | err_pci_resource_len_check: | |
2845 | err_pci_set_dma_mask: | |
2846 | pci_release_regions(pdev); | |
2847 | err_pci_request_regions: | |
2848 | pci_disable_device(pdev); | |
2849 | err_pci_enable_device: | |
2850 | kfree(rocker); | |
2851 | return err; | |
2852 | } | |
2853 | ||
2854 | static void rocker_remove(struct pci_dev *pdev) | |
2855 | { | |
2856 | struct rocker *rocker = pci_get_drvdata(pdev); | |
2857 | ||
17f8be7d | 2858 | rocker_remove_ports(rocker); |
936bd486 | 2859 | unregister_fib_notifier(&rocker->fib_nb); |
4b8ac966 | 2860 | rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); |
c1bb279c | 2861 | destroy_workqueue(rocker->rocker_owq); |
4b8ac966 JP |
2862 | free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); |
2863 | free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); | |
2864 | rocker_dma_rings_fini(rocker); | |
2865 | rocker_msix_fini(rocker); | |
2866 | iounmap(rocker->hw_addr); | |
2867 | pci_release_regions(rocker->pdev); | |
2868 | pci_disable_device(rocker->pdev); | |
2869 | kfree(rocker); | |
2870 | } | |
2871 | ||
2872 | static struct pci_driver rocker_pci_driver = { | |
2873 | .name = rocker_driver_name, | |
2874 | .id_table = rocker_pci_id_table, | |
2875 | .probe = rocker_probe, | |
2876 | .remove = rocker_remove, | |
2877 | }; | |
2878 | ||
6c707945 SF |
2879 | /************************************ |
2880 | * Net device notifier event handler | |
2881 | ************************************/ | |
2882 | ||
e5054643 | 2883 | static bool rocker_port_dev_check(const struct net_device *dev) |
6c707945 SF |
2884 | { |
2885 | return dev->netdev_ops == &rocker_port_netdev_ops; | |
2886 | } | |
2887 | ||
936bd486 JP |
2888 | static bool rocker_port_dev_check_under(const struct net_device *dev, |
2889 | struct rocker *rocker) | |
2890 | { | |
2891 | struct rocker_port *rocker_port; | |
2892 | ||
2893 | if (!rocker_port_dev_check(dev)) | |
2894 | return false; | |
2895 | ||
2896 | rocker_port = netdev_priv(dev); | |
2897 | if (rocker_port->rocker != rocker) | |
2898 | return false; | |
2899 | ||
2900 | return true; | |
2901 | } | |
2902 | ||
cf2d6740 DA |
2903 | struct rocker_walk_data { |
2904 | struct rocker *rocker; | |
2905 | struct rocker_port *port; | |
2906 | }; | |
2907 | ||
2908 | static int rocker_lower_dev_walk(struct net_device *lower_dev, void *_data) | |
2909 | { | |
2910 | struct rocker_walk_data *data = _data; | |
2911 | int ret = 0; | |
2912 | ||
2913 | if (rocker_port_dev_check_under(lower_dev, data->rocker)) { | |
2914 | data->port = netdev_priv(lower_dev); | |
2915 | ret = 1; | |
2916 | } | |
2917 | ||
2918 | return ret; | |
2919 | } | |
2920 | ||
936bd486 JP |
2921 | struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, |
2922 | struct rocker *rocker) | |
2923 | { | |
cf2d6740 | 2924 | struct rocker_walk_data data; |
936bd486 JP |
2925 | |
2926 | if (rocker_port_dev_check_under(dev, rocker)) | |
2927 | return netdev_priv(dev); | |
2928 | ||
cf2d6740 DA |
2929 | data.rocker = rocker; |
2930 | data.port = NULL; | |
2931 | netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &data); | |
2932 | ||
2933 | return data.port; | |
936bd486 JP |
2934 | } |
2935 | ||
6c707945 SF |
2936 | static int rocker_netdevice_event(struct notifier_block *unused, |
2937 | unsigned long event, void *ptr) | |
2938 | { | |
686ed304 JP |
2939 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
2940 | struct netdev_notifier_changeupper_info *info; | |
2941 | struct rocker_port *rocker_port; | |
6c707945 SF |
2942 | int err; |
2943 | ||
686ed304 JP |
2944 | if (!rocker_port_dev_check(dev)) |
2945 | return NOTIFY_DONE; | |
2946 | ||
6c707945 SF |
2947 | switch (event) { |
2948 | case NETDEV_CHANGEUPPER: | |
686ed304 JP |
2949 | info = ptr; |
2950 | if (!info->master) | |
2951 | goto out; | |
2952 | rocker_port = netdev_priv(dev); | |
2953 | if (info->linking) { | |
e420114e JP |
2954 | err = rocker_world_port_master_linked(rocker_port, |
2955 | info->upper_dev); | |
2956 | if (err) | |
2957 | netdev_warn(dev, "failed to reflect master linked (err %d)\n", | |
2958 | err); | |
686ed304 | 2959 | } else { |
e420114e JP |
2960 | err = rocker_world_port_master_unlinked(rocker_port, |
2961 | info->upper_dev); | |
2962 | if (err) | |
2963 | netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", | |
2964 | err); | |
686ed304 | 2965 | } |
6c707945 | 2966 | } |
686ed304 | 2967 | out: |
6c707945 SF |
2968 | return NOTIFY_DONE; |
2969 | } | |
2970 | ||
2971 | static struct notifier_block rocker_netdevice_nb __read_mostly = { | |
2972 | .notifier_call = rocker_netdevice_event, | |
2973 | }; | |
2974 | ||
c1beeef7 SF |
2975 | /************************************ |
2976 | * Net event notifier event handler | |
2977 | ************************************/ | |
2978 | ||
c1beeef7 SF |
2979 | static int rocker_netevent_event(struct notifier_block *unused, |
2980 | unsigned long event, void *ptr) | |
2981 | { | |
e420114e | 2982 | struct rocker_port *rocker_port; |
c1beeef7 SF |
2983 | struct net_device *dev; |
2984 | struct neighbour *n = ptr; | |
2985 | int err; | |
2986 | ||
2987 | switch (event) { | |
2988 | case NETEVENT_NEIGH_UPDATE: | |
2989 | if (n->tbl != &arp_tbl) | |
2990 | return NOTIFY_DONE; | |
2991 | dev = n->dev; | |
2992 | if (!rocker_port_dev_check(dev)) | |
2993 | return NOTIFY_DONE; | |
e420114e JP |
2994 | rocker_port = netdev_priv(dev); |
2995 | err = rocker_world_port_neigh_update(rocker_port, n); | |
2996 | if (err) | |
2997 | netdev_warn(dev, "failed to handle neigh update (err %d)\n", | |
2998 | err); | |
c1beeef7 SF |
2999 | break; |
3000 | } | |
3001 | ||
3002 | return NOTIFY_DONE; | |
3003 | } | |
3004 | ||
3005 | static struct notifier_block rocker_netevent_nb __read_mostly = { | |
3006 | .notifier_call = rocker_netevent_event, | |
3007 | }; | |
3008 | ||
4b8ac966 JP |
3009 | /*********************** |
3010 | * Module init and exit | |
3011 | ***********************/ | |
3012 | ||
3013 | static int __init rocker_module_init(void) | |
3014 | { | |
6c707945 SF |
3015 | int err; |
3016 | ||
3017 | register_netdevice_notifier(&rocker_netdevice_nb); | |
c1beeef7 | 3018 | register_netevent_notifier(&rocker_netevent_nb); |
6c707945 SF |
3019 | err = pci_register_driver(&rocker_pci_driver); |
3020 | if (err) | |
3021 | goto err_pci_register_driver; | |
3022 | return 0; | |
3023 | ||
3024 | err_pci_register_driver: | |
a076e6bf | 3025 | unregister_netevent_notifier(&rocker_netevent_nb); |
6c707945 SF |
3026 | unregister_netdevice_notifier(&rocker_netdevice_nb); |
3027 | return err; | |
4b8ac966 JP |
3028 | } |
3029 | ||
3030 | static void __exit rocker_module_exit(void) | |
3031 | { | |
c1beeef7 | 3032 | unregister_netevent_notifier(&rocker_netevent_nb); |
6c707945 | 3033 | unregister_netdevice_notifier(&rocker_netdevice_nb); |
4b8ac966 JP |
3034 | pci_unregister_driver(&rocker_pci_driver); |
3035 | } | |
3036 | ||
3037 | module_init(rocker_module_init); | |
3038 | module_exit(rocker_module_exit); | |
3039 | ||
3040 | MODULE_LICENSE("GPL v2"); | |
3041 | MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); | |
3042 | MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); | |
3043 | MODULE_DESCRIPTION("Rocker switch device driver"); | |
3044 | MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); |