]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/rocker/rocker.c
Merge tag 'for-4.2' of git://git.infradead.org/battery-2.6
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / rocker / rocker.c
CommitLineData
4b8ac966
JP
1/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
9f6bbf7c 19#include <linux/hashtable.h>
4b8ac966
JP
20#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
6c707945 31#include <linux/if_bridge.h>
9f6bbf7c 32#include <linux/bitops.h>
db19170b 33#include <linux/ctype.h>
4b8ac966
JP
34#include <net/switchdev.h>
35#include <net/rtnetlink.h>
c1beeef7
SF
36#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
4b8ac966
JP
39#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49};
50
9f6bbf7c
SF
51struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
4a6bb6d3
SF
56 u32 in_pport;
57 u32 in_pport_mask;
9f6bbf7c
SF
58 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
4a6bb6d3 61 u32 in_pport;
9f6bbf7c
SF
62 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
4a6bb6d3
SF
69 u32 in_pport;
70 u32 in_pport_mask;
9f6bbf7c
SF
71 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
4a6bb6d3
SF
98 u32 in_pport;
99 u32 in_pport_mask;
9f6bbf7c
SF
100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114};
115
116struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
c1beeef7 118 u32 cmd;
9f6bbf7c
SF
119 u64 cookie;
120 struct rocker_flow_tbl_key key;
c1beeef7 121 size_t key_len;
9f6bbf7c
SF
122 u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149};
150
151struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
155 struct rocker_fdb_tbl_key {
4a6bb6d3 156 u32 pport;
9f6bbf7c
SF
157 u8 addr[ETH_ALEN];
158 __be16 vlan_id;
159 } key;
160};
161
162struct rocker_internal_vlan_tbl_entry {
163 struct hlist_node entry;
164 int ifindex; /* key */
165 u32 ref_count;
166 __be16 vlan_id;
167};
168
c1beeef7
SF
169struct rocker_neigh_tbl_entry {
170 struct hlist_node entry;
171 __be32 ip_addr; /* key */
172 struct net_device *dev;
173 u32 ref_count;
174 u32 index;
175 u8 eth_dst[ETH_ALEN];
176 bool ttl_check;
177};
178
4b8ac966
JP
179struct rocker_desc_info {
180 char *data; /* mapped */
181 size_t data_size;
182 size_t tlv_size;
183 struct rocker_desc *desc;
184 DEFINE_DMA_UNMAP_ADDR(mapaddr);
185};
186
187struct rocker_dma_ring_info {
188 size_t size;
189 u32 head;
190 u32 tail;
191 struct rocker_desc *desc; /* mapped */
192 dma_addr_t mapaddr;
193 struct rocker_desc_info *desc_info;
194 unsigned int type;
195};
196
197struct rocker;
198
9f6bbf7c
SF
199enum {
200 ROCKER_CTRL_LINK_LOCAL_MCAST,
201 ROCKER_CTRL_LOCAL_ARP,
202 ROCKER_CTRL_IPV4_MCAST,
203 ROCKER_CTRL_IPV6_MCAST,
204 ROCKER_CTRL_DFLT_BRIDGING,
205 ROCKER_CTRL_MAX,
206};
207
208#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
209#define ROCKER_N_INTERNAL_VLANS 255
210#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
211#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
212
4b8ac966
JP
213struct rocker_port {
214 struct net_device *dev;
6c707945 215 struct net_device *bridge_dev;
4b8ac966
JP
216 struct rocker *rocker;
217 unsigned int port_number;
4a6bb6d3 218 u32 pport;
9f6bbf7c 219 __be16 internal_vlan_id;
6c707945 220 int stp_state;
5111f80c 221 u32 brport_flags;
9f6bbf7c
SF
222 bool ctrls[ROCKER_CTRL_MAX];
223 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
4b8ac966
JP
224 struct napi_struct napi_tx;
225 struct napi_struct napi_rx;
226 struct rocker_dma_ring_info tx_ring;
227 struct rocker_dma_ring_info rx_ring;
228};
229
230struct rocker {
231 struct pci_dev *pdev;
232 u8 __iomem *hw_addr;
233 struct msix_entry *msix_entries;
234 unsigned int port_count;
235 struct rocker_port **ports;
236 struct {
237 u64 id;
238 } hw;
239 spinlock_t cmd_ring_lock;
240 struct rocker_dma_ring_info cmd_ring;
241 struct rocker_dma_ring_info event_ring;
9f6bbf7c
SF
242 DECLARE_HASHTABLE(flow_tbl, 16);
243 spinlock_t flow_tbl_lock;
244 u64 flow_tbl_next_cookie;
245 DECLARE_HASHTABLE(group_tbl, 16);
246 spinlock_t group_tbl_lock;
247 DECLARE_HASHTABLE(fdb_tbl, 16);
248 spinlock_t fdb_tbl_lock;
249 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
250 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
251 spinlock_t internal_vlan_tbl_lock;
c1beeef7
SF
252 DECLARE_HASHTABLE(neigh_tbl, 16);
253 spinlock_t neigh_tbl_lock;
254 u32 neigh_tbl_next_index;
9f6bbf7c
SF
255};
256
257static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
258static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
259static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
260static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
261static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
262static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
263static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
264static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
265static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
266
267/* Rocker priority levels for flow table entries. Higher
268 * priority match takes precedence over lower priority match.
269 */
270
271enum {
272 ROCKER_PRIORITY_UNKNOWN = 0,
273 ROCKER_PRIORITY_IG_PORT = 1,
274 ROCKER_PRIORITY_VLAN = 1,
275 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
276 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
9f6bbf7c
SF
277 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
278 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
279 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
280 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
281 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
282 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
283 ROCKER_PRIORITY_ACL_CTRL = 3,
284 ROCKER_PRIORITY_ACL_NORMAL = 2,
285 ROCKER_PRIORITY_ACL_DFLT = 1,
4b8ac966
JP
286};
287
9f6bbf7c
SF
288static bool rocker_vlan_id_is_internal(__be16 vlan_id)
289{
290 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
291 u16 end = 0xffe;
292 u16 _vlan_id = ntohs(vlan_id);
293
294 return (_vlan_id >= start && _vlan_id <= end);
295}
296
297static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
298 u16 vid, bool *pop_vlan)
299{
300 __be16 vlan_id;
301
302 if (pop_vlan)
303 *pop_vlan = false;
304 vlan_id = htons(vid);
305 if (!vlan_id) {
306 vlan_id = rocker_port->internal_vlan_id;
307 if (pop_vlan)
308 *pop_vlan = true;
309 }
310
311 return vlan_id;
312}
313
6c707945
SF
314static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
315 __be16 vlan_id)
316{
317 if (rocker_vlan_id_is_internal(vlan_id))
318 return 0;
319
320 return ntohs(vlan_id);
321}
322
323static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
324{
325 return !!rocker_port->bridge_dev;
326}
327
4b8ac966
JP
328struct rocker_wait {
329 wait_queue_head_t wait;
330 bool done;
331 bool nowait;
332};
333
334static void rocker_wait_reset(struct rocker_wait *wait)
335{
336 wait->done = false;
337 wait->nowait = false;
338}
339
340static void rocker_wait_init(struct rocker_wait *wait)
341{
342 init_waitqueue_head(&wait->wait);
343 rocker_wait_reset(wait);
344}
345
346static struct rocker_wait *rocker_wait_create(gfp_t gfp)
347{
348 struct rocker_wait *wait;
349
350 wait = kmalloc(sizeof(*wait), gfp);
351 if (!wait)
352 return NULL;
353 rocker_wait_init(wait);
354 return wait;
355}
356
357static void rocker_wait_destroy(struct rocker_wait *work)
358{
359 kfree(work);
360}
361
362static bool rocker_wait_event_timeout(struct rocker_wait *wait,
363 unsigned long timeout)
364{
365 wait_event_timeout(wait->wait, wait->done, HZ / 10);
366 if (!wait->done)
367 return false;
368 return true;
369}
370
371static void rocker_wait_wake_up(struct rocker_wait *wait)
372{
373 wait->done = true;
374 wake_up(&wait->wait);
375}
376
377static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
378{
379 return rocker->msix_entries[vector].vector;
380}
381
382static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
383{
384 return rocker_msix_vector(rocker_port->rocker,
385 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
386}
387
388static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
389{
390 return rocker_msix_vector(rocker_port->rocker,
391 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
392}
393
394#define rocker_write32(rocker, reg, val) \
395 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
396#define rocker_read32(rocker, reg) \
397 readl((rocker)->hw_addr + (ROCKER_ ## reg))
398#define rocker_write64(rocker, reg, val) \
399 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
400#define rocker_read64(rocker, reg) \
401 readq((rocker)->hw_addr + (ROCKER_ ## reg))
402
403/*****************************
404 * HW basic testing functions
405 *****************************/
406
407static int rocker_reg_test(struct rocker *rocker)
408{
409 struct pci_dev *pdev = rocker->pdev;
410 u64 test_reg;
411 u64 rnd;
412
413 rnd = prandom_u32();
414 rnd >>= 1;
415 rocker_write32(rocker, TEST_REG, rnd);
416 test_reg = rocker_read32(rocker, TEST_REG);
417 if (test_reg != rnd * 2) {
418 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
419 test_reg, rnd * 2);
420 return -EIO;
421 }
422
423 rnd = prandom_u32();
424 rnd <<= 31;
425 rnd |= prandom_u32();
426 rocker_write64(rocker, TEST_REG64, rnd);
427 test_reg = rocker_read64(rocker, TEST_REG64);
428 if (test_reg != rnd * 2) {
429 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
430 test_reg, rnd * 2);
431 return -EIO;
432 }
433
434 return 0;
435}
436
437static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
438 u32 test_type, dma_addr_t dma_handle,
439 unsigned char *buf, unsigned char *expect,
440 size_t size)
441{
442 struct pci_dev *pdev = rocker->pdev;
443 int i;
444
445 rocker_wait_reset(wait);
446 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
447
448 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
449 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
450 return -EIO;
451 }
452
453 for (i = 0; i < size; i++) {
454 if (buf[i] != expect[i]) {
455 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
456 buf[i], i, expect[i]);
457 return -EIO;
458 }
459 }
460 return 0;
461}
462
463#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
464#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
465
466static int rocker_dma_test_offset(struct rocker *rocker,
467 struct rocker_wait *wait, int offset)
468{
469 struct pci_dev *pdev = rocker->pdev;
470 unsigned char *alloc;
471 unsigned char *buf;
472 unsigned char *expect;
473 dma_addr_t dma_handle;
474 int i;
475 int err;
476
477 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
478 GFP_KERNEL | GFP_DMA);
479 if (!alloc)
480 return -ENOMEM;
481 buf = alloc + offset;
482 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
483
484 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
485 PCI_DMA_BIDIRECTIONAL);
486 if (pci_dma_mapping_error(pdev, dma_handle)) {
487 err = -EIO;
488 goto free_alloc;
489 }
490
491 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
492 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
493
494 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
495 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
496 dma_handle, buf, expect,
497 ROCKER_TEST_DMA_BUF_SIZE);
498 if (err)
499 goto unmap;
500
501 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
502 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
503 dma_handle, buf, expect,
504 ROCKER_TEST_DMA_BUF_SIZE);
505 if (err)
506 goto unmap;
507
508 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
509 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
510 expect[i] = ~buf[i];
511 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
512 dma_handle, buf, expect,
513 ROCKER_TEST_DMA_BUF_SIZE);
514 if (err)
515 goto unmap;
516
517unmap:
518 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
519 PCI_DMA_BIDIRECTIONAL);
520free_alloc:
521 kfree(alloc);
522
523 return err;
524}
525
526static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
527{
528 int i;
529 int err;
530
531 for (i = 0; i < 8; i++) {
532 err = rocker_dma_test_offset(rocker, wait, i);
533 if (err)
534 return err;
535 }
536 return 0;
537}
538
539static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
540{
541 struct rocker_wait *wait = dev_id;
542
543 rocker_wait_wake_up(wait);
544
545 return IRQ_HANDLED;
546}
547
548static int rocker_basic_hw_test(struct rocker *rocker)
549{
550 struct pci_dev *pdev = rocker->pdev;
551 struct rocker_wait wait;
552 int err;
553
554 err = rocker_reg_test(rocker);
555 if (err) {
556 dev_err(&pdev->dev, "reg test failed\n");
557 return err;
558 }
559
560 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
561 rocker_test_irq_handler, 0,
562 rocker_driver_name, &wait);
563 if (err) {
564 dev_err(&pdev->dev, "cannot assign test irq\n");
565 return err;
566 }
567
568 rocker_wait_init(&wait);
569 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
570
571 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
572 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
573 err = -EIO;
574 goto free_irq;
575 }
576
577 err = rocker_dma_test(rocker, &wait);
578 if (err)
579 dev_err(&pdev->dev, "dma test failed\n");
580
581free_irq:
582 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
583 return err;
584}
585
586/******
587 * TLV
588 ******/
589
590#define ROCKER_TLV_ALIGNTO 8U
591#define ROCKER_TLV_ALIGN(len) \
592 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
593#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
594
595/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
596 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
597 * | Header | Pad | Payload | Pad |
598 * | (struct rocker_tlv) | ing | | ing |
599 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
600 * <--------------------------- tlv->len -------------------------->
601 */
602
603static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
604 int *remaining)
605{
606 int totlen = ROCKER_TLV_ALIGN(tlv->len);
607
608 *remaining -= totlen;
609 return (struct rocker_tlv *) ((char *) tlv + totlen);
610}
611
612static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
613{
614 return remaining >= (int) ROCKER_TLV_HDRLEN &&
615 tlv->len >= ROCKER_TLV_HDRLEN &&
616 tlv->len <= remaining;
617}
618
619#define rocker_tlv_for_each(pos, head, len, rem) \
620 for (pos = head, rem = len; \
621 rocker_tlv_ok(pos, rem); \
622 pos = rocker_tlv_next(pos, &(rem)))
623
624#define rocker_tlv_for_each_nested(pos, tlv, rem) \
625 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
626 rocker_tlv_len(tlv), rem)
627
628static int rocker_tlv_attr_size(int payload)
629{
630 return ROCKER_TLV_HDRLEN + payload;
631}
632
633static int rocker_tlv_total_size(int payload)
634{
635 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
636}
637
638static int rocker_tlv_padlen(int payload)
639{
640 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
641}
642
643static int rocker_tlv_type(const struct rocker_tlv *tlv)
644{
645 return tlv->type;
646}
647
648static void *rocker_tlv_data(const struct rocker_tlv *tlv)
649{
650 return (char *) tlv + ROCKER_TLV_HDRLEN;
651}
652
653static int rocker_tlv_len(const struct rocker_tlv *tlv)
654{
655 return tlv->len - ROCKER_TLV_HDRLEN;
656}
657
658static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
659{
660 return *(u8 *) rocker_tlv_data(tlv);
661}
662
663static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
664{
665 return *(u16 *) rocker_tlv_data(tlv);
666}
667
9b03c71f
JP
668static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
669{
670 return *(__be16 *) rocker_tlv_data(tlv);
671}
672
4b8ac966
JP
673static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
674{
675 return *(u32 *) rocker_tlv_data(tlv);
676}
677
678static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
679{
680 return *(u64 *) rocker_tlv_data(tlv);
681}
682
683static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
684 const char *buf, int buf_len)
685{
686 const struct rocker_tlv *tlv;
687 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
688 int rem;
689
690 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
691
692 rocker_tlv_for_each(tlv, head, buf_len, rem) {
693 u32 type = rocker_tlv_type(tlv);
694
695 if (type > 0 && type <= maxtype)
696 tb[type] = (struct rocker_tlv *) tlv;
697 }
698}
699
700static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
701 const struct rocker_tlv *tlv)
702{
703 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
704 rocker_tlv_len(tlv));
705}
706
707static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
708 struct rocker_desc_info *desc_info)
709{
710 rocker_tlv_parse(tb, maxtype, desc_info->data,
711 desc_info->desc->tlv_size);
712}
713
714static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
715{
716 return (struct rocker_tlv *) ((char *) desc_info->data +
717 desc_info->tlv_size);
718}
719
720static int rocker_tlv_put(struct rocker_desc_info *desc_info,
721 int attrtype, int attrlen, const void *data)
722{
723 int tail_room = desc_info->data_size - desc_info->tlv_size;
724 int total_size = rocker_tlv_total_size(attrlen);
725 struct rocker_tlv *tlv;
726
727 if (unlikely(tail_room < total_size))
728 return -EMSGSIZE;
729
730 tlv = rocker_tlv_start(desc_info);
731 desc_info->tlv_size += total_size;
732 tlv->type = attrtype;
733 tlv->len = rocker_tlv_attr_size(attrlen);
734 memcpy(rocker_tlv_data(tlv), data, attrlen);
735 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
736 return 0;
737}
738
739static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
740 int attrtype, u8 value)
741{
742 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
743}
744
745static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
746 int attrtype, u16 value)
747{
748 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
749}
750
9b03c71f
JP
751static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
752 int attrtype, __be16 value)
753{
754 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
755}
756
4b8ac966
JP
757static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
758 int attrtype, u32 value)
759{
760 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
761}
762
9b03c71f
JP
763static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
764 int attrtype, __be32 value)
765{
766 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
767}
768
4b8ac966
JP
769static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
770 int attrtype, u64 value)
771{
772 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
773}
774
775static struct rocker_tlv *
776rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
777{
778 struct rocker_tlv *start = rocker_tlv_start(desc_info);
779
780 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
781 return NULL;
782
783 return start;
784}
785
786static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
787 struct rocker_tlv *start)
788{
789 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
790}
791
792static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
793 struct rocker_tlv *start)
794{
795 desc_info->tlv_size = (char *) start - desc_info->data;
796}
797
798/******************************************
799 * DMA rings and descriptors manipulations
800 ******************************************/
801
802static u32 __pos_inc(u32 pos, size_t limit)
803{
804 return ++pos == limit ? 0 : pos;
805}
806
807static int rocker_desc_err(struct rocker_desc_info *desc_info)
808{
7eb344f8
SF
809 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
810
811 switch (err) {
812 case ROCKER_OK:
813 return 0;
814 case -ROCKER_ENOENT:
815 return -ENOENT;
816 case -ROCKER_ENXIO:
817 return -ENXIO;
818 case -ROCKER_ENOMEM:
819 return -ENOMEM;
820 case -ROCKER_EEXIST:
821 return -EEXIST;
822 case -ROCKER_EINVAL:
823 return -EINVAL;
824 case -ROCKER_EMSGSIZE:
825 return -EMSGSIZE;
826 case -ROCKER_ENOTSUP:
827 return -EOPNOTSUPP;
828 case -ROCKER_ENOBUFS:
829 return -ENOBUFS;
830 }
831
832 return -EINVAL;
4b8ac966
JP
833}
834
835static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
836{
837 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
838}
839
840static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
841{
842 u32 comp_err = desc_info->desc->comp_err;
843
844 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
845}
846
847static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
848{
adedf37b 849 return (void *)(uintptr_t)desc_info->desc->cookie;
4b8ac966
JP
850}
851
852static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
853 void *ptr)
854{
adedf37b 855 desc_info->desc->cookie = (uintptr_t) ptr;
4b8ac966
JP
856}
857
858static struct rocker_desc_info *
859rocker_desc_head_get(struct rocker_dma_ring_info *info)
860{
861 static struct rocker_desc_info *desc_info;
862 u32 head = __pos_inc(info->head, info->size);
863
864 desc_info = &info->desc_info[info->head];
865 if (head == info->tail)
866 return NULL; /* ring full */
867 desc_info->tlv_size = 0;
868 return desc_info;
869}
870
871static void rocker_desc_commit(struct rocker_desc_info *desc_info)
872{
873 desc_info->desc->buf_size = desc_info->data_size;
874 desc_info->desc->tlv_size = desc_info->tlv_size;
875}
876
877static void rocker_desc_head_set(struct rocker *rocker,
878 struct rocker_dma_ring_info *info,
879 struct rocker_desc_info *desc_info)
880{
881 u32 head = __pos_inc(info->head, info->size);
882
883 BUG_ON(head == info->tail);
884 rocker_desc_commit(desc_info);
885 info->head = head;
886 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
887}
888
889static struct rocker_desc_info *
890rocker_desc_tail_get(struct rocker_dma_ring_info *info)
891{
892 static struct rocker_desc_info *desc_info;
893
894 if (info->tail == info->head)
895 return NULL; /* nothing to be done between head and tail */
896 desc_info = &info->desc_info[info->tail];
897 if (!rocker_desc_gen(desc_info))
898 return NULL; /* gen bit not set, desc is not ready yet */
899 info->tail = __pos_inc(info->tail, info->size);
900 desc_info->tlv_size = desc_info->desc->tlv_size;
901 return desc_info;
902}
903
904static void rocker_dma_ring_credits_set(struct rocker *rocker,
905 struct rocker_dma_ring_info *info,
906 u32 credits)
907{
908 if (credits)
909 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
910}
911
912static unsigned long rocker_dma_ring_size_fix(size_t size)
913{
914 return max(ROCKER_DMA_SIZE_MIN,
915 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
916}
917
918static int rocker_dma_ring_create(struct rocker *rocker,
919 unsigned int type,
920 size_t size,
921 struct rocker_dma_ring_info *info)
922{
923 int i;
924
925 BUG_ON(size != rocker_dma_ring_size_fix(size));
926 info->size = size;
927 info->type = type;
928 info->head = 0;
929 info->tail = 0;
930 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
931 GFP_KERNEL);
932 if (!info->desc_info)
933 return -ENOMEM;
934
935 info->desc = pci_alloc_consistent(rocker->pdev,
936 info->size * sizeof(*info->desc),
937 &info->mapaddr);
938 if (!info->desc) {
939 kfree(info->desc_info);
940 return -ENOMEM;
941 }
942
943 for (i = 0; i < info->size; i++)
944 info->desc_info[i].desc = &info->desc[i];
945
946 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
947 ROCKER_DMA_DESC_CTRL_RESET);
948 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
949 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
950
951 return 0;
952}
953
954static void rocker_dma_ring_destroy(struct rocker *rocker,
955 struct rocker_dma_ring_info *info)
956{
957 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
958
959 pci_free_consistent(rocker->pdev,
960 info->size * sizeof(struct rocker_desc),
961 info->desc, info->mapaddr);
962 kfree(info->desc_info);
963}
964
965static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
966 struct rocker_dma_ring_info *info)
967{
968 int i;
969
970 BUG_ON(info->head || info->tail);
971
972 /* When ring is consumer, we need to advance head for each desc.
973 * That tells hw that the desc is ready to be used by it.
974 */
975 for (i = 0; i < info->size - 1; i++)
976 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
977 rocker_desc_commit(&info->desc_info[i]);
978}
979
980static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
981 struct rocker_dma_ring_info *info,
982 int direction, size_t buf_size)
983{
984 struct pci_dev *pdev = rocker->pdev;
985 int i;
986 int err;
987
988 for (i = 0; i < info->size; i++) {
989 struct rocker_desc_info *desc_info = &info->desc_info[i];
990 struct rocker_desc *desc = &info->desc[i];
991 dma_addr_t dma_handle;
992 char *buf;
993
994 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
995 if (!buf) {
996 err = -ENOMEM;
997 goto rollback;
998 }
999
1000 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1001 if (pci_dma_mapping_error(pdev, dma_handle)) {
1002 kfree(buf);
1003 err = -EIO;
1004 goto rollback;
1005 }
1006
1007 desc_info->data = buf;
1008 desc_info->data_size = buf_size;
1009 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1010
1011 desc->buf_addr = dma_handle;
1012 desc->buf_size = buf_size;
1013 }
1014 return 0;
1015
1016rollback:
1017 for (i--; i >= 0; i--) {
1018 struct rocker_desc_info *desc_info = &info->desc_info[i];
1019
1020 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1021 desc_info->data_size, direction);
1022 kfree(desc_info->data);
1023 }
1024 return err;
1025}
1026
1027static void rocker_dma_ring_bufs_free(struct rocker *rocker,
1028 struct rocker_dma_ring_info *info,
1029 int direction)
1030{
1031 struct pci_dev *pdev = rocker->pdev;
1032 int i;
1033
1034 for (i = 0; i < info->size; i++) {
1035 struct rocker_desc_info *desc_info = &info->desc_info[i];
1036 struct rocker_desc *desc = &info->desc[i];
1037
1038 desc->buf_addr = 0;
1039 desc->buf_size = 0;
1040 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1041 desc_info->data_size, direction);
1042 kfree(desc_info->data);
1043 }
1044}
1045
1046static int rocker_dma_rings_init(struct rocker *rocker)
1047{
1048 struct pci_dev *pdev = rocker->pdev;
1049 int err;
1050
1051 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1052 ROCKER_DMA_CMD_DEFAULT_SIZE,
1053 &rocker->cmd_ring);
1054 if (err) {
1055 dev_err(&pdev->dev, "failed to create command dma ring\n");
1056 return err;
1057 }
1058
1059 spin_lock_init(&rocker->cmd_ring_lock);
1060
1061 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1062 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1063 if (err) {
1064 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1065 goto err_dma_cmd_ring_bufs_alloc;
1066 }
1067
1068 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1069 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1070 &rocker->event_ring);
1071 if (err) {
1072 dev_err(&pdev->dev, "failed to create event dma ring\n");
1073 goto err_dma_event_ring_create;
1074 }
1075
1076 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1077 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1078 if (err) {
1079 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1080 goto err_dma_event_ring_bufs_alloc;
1081 }
1082 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1083 return 0;
1084
1085err_dma_event_ring_bufs_alloc:
1086 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1087err_dma_event_ring_create:
1088 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1089 PCI_DMA_BIDIRECTIONAL);
1090err_dma_cmd_ring_bufs_alloc:
1091 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1092 return err;
1093}
1094
1095static void rocker_dma_rings_fini(struct rocker *rocker)
1096{
1097 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1098 PCI_DMA_BIDIRECTIONAL);
1099 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1100 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1101 PCI_DMA_BIDIRECTIONAL);
1102 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1103}
1104
1105static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1106 struct rocker_port *rocker_port,
1107 struct rocker_desc_info *desc_info,
1108 struct sk_buff *skb, size_t buf_len)
1109{
1110 struct pci_dev *pdev = rocker->pdev;
1111 dma_addr_t dma_handle;
1112
1113 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1114 PCI_DMA_FROMDEVICE);
1115 if (pci_dma_mapping_error(pdev, dma_handle))
1116 return -EIO;
1117 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1118 goto tlv_put_failure;
1119 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1120 goto tlv_put_failure;
1121 return 0;
1122
1123tlv_put_failure:
1124 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1125 desc_info->tlv_size = 0;
1126 return -EMSGSIZE;
1127}
1128
1129static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1130{
1131 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1132}
1133
1134static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1135 struct rocker_port *rocker_port,
1136 struct rocker_desc_info *desc_info)
1137{
1138 struct net_device *dev = rocker_port->dev;
1139 struct sk_buff *skb;
1140 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1141 int err;
1142
1143 /* Ensure that hw will see tlv_size zero in case of an error.
1144 * That tells hw to use another descriptor.
1145 */
1146 rocker_desc_cookie_ptr_set(desc_info, NULL);
1147 desc_info->tlv_size = 0;
1148
1149 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1150 if (!skb)
1151 return -ENOMEM;
1152 err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1153 skb, buf_len);
1154 if (err) {
1155 dev_kfree_skb_any(skb);
1156 return err;
1157 }
1158 rocker_desc_cookie_ptr_set(desc_info, skb);
1159 return 0;
1160}
1161
1162static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1163 struct rocker_tlv **attrs)
1164{
1165 struct pci_dev *pdev = rocker->pdev;
1166 dma_addr_t dma_handle;
1167 size_t len;
1168
1169 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1170 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1171 return;
1172 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1173 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1174 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1175}
1176
1177static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1178 struct rocker_desc_info *desc_info)
1179{
1180 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1181 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1182
1183 if (!skb)
1184 return;
1185 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1186 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1187 dev_kfree_skb_any(skb);
1188}
1189
1190static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1191 struct rocker_port *rocker_port)
1192{
1193 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1194 int i;
1195 int err;
1196
1197 for (i = 0; i < rx_ring->size; i++) {
1198 err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1199 &rx_ring->desc_info[i]);
1200 if (err)
1201 goto rollback;
1202 }
1203 return 0;
1204
1205rollback:
1206 for (i--; i >= 0; i--)
1207 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1208 return err;
1209}
1210
1211static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1212 struct rocker_port *rocker_port)
1213{
1214 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1215 int i;
1216
1217 for (i = 0; i < rx_ring->size; i++)
1218 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1219}
1220
1221static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1222{
1223 struct rocker *rocker = rocker_port->rocker;
1224 int err;
1225
1226 err = rocker_dma_ring_create(rocker,
1227 ROCKER_DMA_TX(rocker_port->port_number),
1228 ROCKER_DMA_TX_DEFAULT_SIZE,
1229 &rocker_port->tx_ring);
1230 if (err) {
1231 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1232 return err;
1233 }
1234
1235 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1236 PCI_DMA_TODEVICE,
1237 ROCKER_DMA_TX_DESC_SIZE);
1238 if (err) {
1239 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1240 goto err_dma_tx_ring_bufs_alloc;
1241 }
1242
1243 err = rocker_dma_ring_create(rocker,
1244 ROCKER_DMA_RX(rocker_port->port_number),
1245 ROCKER_DMA_RX_DEFAULT_SIZE,
1246 &rocker_port->rx_ring);
1247 if (err) {
1248 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1249 goto err_dma_rx_ring_create;
1250 }
1251
1252 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1253 PCI_DMA_BIDIRECTIONAL,
1254 ROCKER_DMA_RX_DESC_SIZE);
1255 if (err) {
1256 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1257 goto err_dma_rx_ring_bufs_alloc;
1258 }
1259
1260 err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1261 if (err) {
1262 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1263 goto err_dma_rx_ring_skbs_alloc;
1264 }
1265 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1266
1267 return 0;
1268
1269err_dma_rx_ring_skbs_alloc:
1270 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1271 PCI_DMA_BIDIRECTIONAL);
1272err_dma_rx_ring_bufs_alloc:
1273 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1274err_dma_rx_ring_create:
1275 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1276 PCI_DMA_TODEVICE);
1277err_dma_tx_ring_bufs_alloc:
1278 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1279 return err;
1280}
1281
1282static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1283{
1284 struct rocker *rocker = rocker_port->rocker;
1285
1286 rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1287 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1288 PCI_DMA_BIDIRECTIONAL);
1289 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1290 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1291 PCI_DMA_TODEVICE);
1292 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1293}
1294
1295static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1296{
1297 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1298
1299 if (enable)
71a83a6d 1300 val |= 1ULL << rocker_port->pport;
4b8ac966 1301 else
71a83a6d 1302 val &= ~(1ULL << rocker_port->pport);
4b8ac966
JP
1303 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1304}
1305
1306/********************************
1307 * Interrupt handler and helpers
1308 ********************************/
1309
1310static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1311{
1312 struct rocker *rocker = dev_id;
1313 struct rocker_desc_info *desc_info;
1314 struct rocker_wait *wait;
1315 u32 credits = 0;
1316
1317 spin_lock(&rocker->cmd_ring_lock);
1318 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1319 wait = rocker_desc_cookie_ptr_get(desc_info);
1320 if (wait->nowait) {
1321 rocker_desc_gen_clear(desc_info);
1322 rocker_wait_destroy(wait);
1323 } else {
1324 rocker_wait_wake_up(wait);
1325 }
1326 credits++;
1327 }
1328 spin_unlock(&rocker->cmd_ring_lock);
1329 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1330
1331 return IRQ_HANDLED;
1332}
1333
1334static void rocker_port_link_up(struct rocker_port *rocker_port)
1335{
1336 netif_carrier_on(rocker_port->dev);
1337 netdev_info(rocker_port->dev, "Link is up\n");
1338}
1339
1340static void rocker_port_link_down(struct rocker_port *rocker_port)
1341{
1342 netif_carrier_off(rocker_port->dev);
1343 netdev_info(rocker_port->dev, "Link is down\n");
1344}
1345
1346static int rocker_event_link_change(struct rocker *rocker,
1347 const struct rocker_tlv *info)
1348{
1349 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1350 unsigned int port_number;
1351 bool link_up;
1352 struct rocker_port *rocker_port;
1353
1354 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
4a6bb6d3 1355 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
4b8ac966
JP
1356 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1357 return -EIO;
1358 port_number =
4a6bb6d3 1359 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
4b8ac966
JP
1360 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1361
1362 if (port_number >= rocker->port_count)
1363 return -EINVAL;
1364
1365 rocker_port = rocker->ports[port_number];
1366 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1367 if (link_up)
1368 rocker_port_link_up(rocker_port);
1369 else
1370 rocker_port_link_down(rocker_port);
1371 }
1372
1373 return 0;
1374}
1375
9f6bbf7c
SF
1376#define ROCKER_OP_FLAG_REMOVE BIT(0)
1377#define ROCKER_OP_FLAG_NOWAIT BIT(1)
1378#define ROCKER_OP_FLAG_LEARNED BIT(2)
6c707945
SF
1379#define ROCKER_OP_FLAG_REFRESH BIT(3)
1380
1381static int rocker_port_fdb(struct rocker_port *rocker_port,
1382 const unsigned char *addr,
1383 __be16 vlan_id, int flags);
1384
1385static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1386 const struct rocker_tlv *info)
1387{
1388 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1389 unsigned int port_number;
1390 struct rocker_port *rocker_port;
1391 unsigned char *addr;
1392 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1393 __be16 vlan_id;
1394
1395 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
4a6bb6d3 1396 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
6c707945
SF
1397 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1398 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1399 return -EIO;
1400 port_number =
4a6bb6d3 1401 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
6c707945 1402 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
9b03c71f 1403 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
6c707945
SF
1404
1405 if (port_number >= rocker->port_count)
1406 return -EINVAL;
1407
1408 rocker_port = rocker->ports[port_number];
1409
1410 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1411 rocker_port->stp_state != BR_STATE_FORWARDING)
1412 return 0;
1413
1414 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1415}
9f6bbf7c 1416
4b8ac966
JP
1417static int rocker_event_process(struct rocker *rocker,
1418 struct rocker_desc_info *desc_info)
1419{
1420 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1421 struct rocker_tlv *info;
1422 u16 type;
1423
1424 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1425 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1426 !attrs[ROCKER_TLV_EVENT_INFO])
1427 return -EIO;
1428
1429 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1430 info = attrs[ROCKER_TLV_EVENT_INFO];
1431
1432 switch (type) {
1433 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1434 return rocker_event_link_change(rocker, info);
6c707945
SF
1435 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1436 return rocker_event_mac_vlan_seen(rocker, info);
4b8ac966
JP
1437 }
1438
1439 return -EOPNOTSUPP;
1440}
1441
1442static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1443{
1444 struct rocker *rocker = dev_id;
1445 struct pci_dev *pdev = rocker->pdev;
1446 struct rocker_desc_info *desc_info;
1447 u32 credits = 0;
1448 int err;
1449
1450 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1451 err = rocker_desc_err(desc_info);
1452 if (err) {
1453 dev_err(&pdev->dev, "event desc received with err %d\n",
1454 err);
1455 } else {
1456 err = rocker_event_process(rocker, desc_info);
1457 if (err)
1458 dev_err(&pdev->dev, "event processing failed with err %d\n",
1459 err);
1460 }
1461 rocker_desc_gen_clear(desc_info);
1462 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1463 credits++;
1464 }
1465 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1466
1467 return IRQ_HANDLED;
1468}
1469
1470static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1471{
1472 struct rocker_port *rocker_port = dev_id;
1473
1474 napi_schedule(&rocker_port->napi_tx);
1475 return IRQ_HANDLED;
1476}
1477
1478static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1479{
1480 struct rocker_port *rocker_port = dev_id;
1481
1482 napi_schedule(&rocker_port->napi_rx);
1483 return IRQ_HANDLED;
1484}
1485
1486/********************
1487 * Command interface
1488 ********************/
1489
1490typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1491 struct rocker_port *rocker_port,
1492 struct rocker_desc_info *desc_info,
1493 void *priv);
1494
1495static int rocker_cmd_exec(struct rocker *rocker,
1496 struct rocker_port *rocker_port,
1497 rocker_cmd_cb_t prepare, void *prepare_priv,
1498 rocker_cmd_cb_t process, void *process_priv,
1499 bool nowait)
1500{
1501 struct rocker_desc_info *desc_info;
1502 struct rocker_wait *wait;
1503 unsigned long flags;
1504 int err;
1505
1506 wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1507 if (!wait)
1508 return -ENOMEM;
1509 wait->nowait = nowait;
1510
1511 spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1512 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1513 if (!desc_info) {
1514 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1515 err = -EAGAIN;
1516 goto out;
1517 }
1518 err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1519 if (err) {
1520 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1521 goto out;
1522 }
1523 rocker_desc_cookie_ptr_set(desc_info, wait);
1524 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1525 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1526
1527 if (nowait)
1528 return 0;
1529
1530 if (!rocker_wait_event_timeout(wait, HZ / 10))
1531 return -EIO;
1532
1533 err = rocker_desc_err(desc_info);
1534 if (err)
1535 return err;
1536
1537 if (process)
1538 err = process(rocker, rocker_port, desc_info, process_priv);
1539
1540 rocker_desc_gen_clear(desc_info);
1541out:
1542 rocker_wait_destroy(wait);
1543 return err;
1544}
1545
1546static int
1547rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1548 struct rocker_port *rocker_port,
1549 struct rocker_desc_info *desc_info,
1550 void *priv)
1551{
1552 struct rocker_tlv *cmd_info;
1553
1554 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1555 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1556 return -EMSGSIZE;
1557 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1558 if (!cmd_info)
1559 return -EMSGSIZE;
4a6bb6d3
SF
1560 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1561 rocker_port->pport))
4b8ac966
JP
1562 return -EMSGSIZE;
1563 rocker_tlv_nest_end(desc_info, cmd_info);
1564 return 0;
1565}
1566
1567static int
1568rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1569 struct rocker_port *rocker_port,
1570 struct rocker_desc_info *desc_info,
1571 void *priv)
1572{
1573 struct ethtool_cmd *ecmd = priv;
1574 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1575 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1576 u32 speed;
1577 u8 duplex;
1578 u8 autoneg;
1579
1580 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1581 if (!attrs[ROCKER_TLV_CMD_INFO])
1582 return -EIO;
1583
1584 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1585 attrs[ROCKER_TLV_CMD_INFO]);
1586 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1587 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1588 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1589 return -EIO;
1590
1591 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1592 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1593 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1594
1595 ecmd->transceiver = XCVR_INTERNAL;
1596 ecmd->supported = SUPPORTED_TP;
1597 ecmd->phy_address = 0xff;
1598 ecmd->port = PORT_TP;
1599 ethtool_cmd_speed_set(ecmd, speed);
1600 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1601 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1602
1603 return 0;
1604}
1605
1606static int
1607rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1608 struct rocker_port *rocker_port,
1609 struct rocker_desc_info *desc_info,
1610 void *priv)
1611{
1612 unsigned char *macaddr = priv;
1613 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1614 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1615 struct rocker_tlv *attr;
1616
1617 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1618 if (!attrs[ROCKER_TLV_CMD_INFO])
1619 return -EIO;
1620
1621 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1622 attrs[ROCKER_TLV_CMD_INFO]);
1623 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1624 if (!attr)
1625 return -EIO;
1626
1627 if (rocker_tlv_len(attr) != ETH_ALEN)
1628 return -EINVAL;
1629
1630 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1631 return 0;
1632}
1633
db19170b
DA
1634struct port_name {
1635 char *buf;
1636 size_t len;
1637};
1638
1639static int
1640rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
1641 struct rocker_port *rocker_port,
1642 struct rocker_desc_info *desc_info,
1643 void *priv)
1644{
1645 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1646 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1647 struct port_name *name = priv;
1648 struct rocker_tlv *attr;
1649 size_t i, j, len;
1650 char *str;
1651
1652 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1653 if (!attrs[ROCKER_TLV_CMD_INFO])
1654 return -EIO;
1655
1656 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1657 attrs[ROCKER_TLV_CMD_INFO]);
1658 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1659 if (!attr)
1660 return -EIO;
1661
1662 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1663 str = rocker_tlv_data(attr);
1664
1665 /* make sure name only contains alphanumeric characters */
1666 for (i = j = 0; i < len; ++i) {
1667 if (isalnum(str[i])) {
1668 name->buf[j] = str[i];
1669 j++;
1670 }
1671 }
1672
1673 if (j == 0)
1674 return -EIO;
1675
1676 name->buf[j] = '\0';
1677
1678 return 0;
1679}
1680
4b8ac966
JP
1681static int
1682rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1683 struct rocker_port *rocker_port,
1684 struct rocker_desc_info *desc_info,
1685 void *priv)
1686{
1687 struct ethtool_cmd *ecmd = priv;
1688 struct rocker_tlv *cmd_info;
1689
1690 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1691 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1692 return -EMSGSIZE;
1693 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1694 if (!cmd_info)
1695 return -EMSGSIZE;
4a6bb6d3
SF
1696 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1697 rocker_port->pport))
4b8ac966
JP
1698 return -EMSGSIZE;
1699 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1700 ethtool_cmd_speed(ecmd)))
1701 return -EMSGSIZE;
1702 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1703 ecmd->duplex))
1704 return -EMSGSIZE;
1705 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1706 ecmd->autoneg))
1707 return -EMSGSIZE;
1708 rocker_tlv_nest_end(desc_info, cmd_info);
1709 return 0;
1710}
1711
1712static int
1713rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1714 struct rocker_port *rocker_port,
1715 struct rocker_desc_info *desc_info,
1716 void *priv)
1717{
1718 unsigned char *macaddr = priv;
1719 struct rocker_tlv *cmd_info;
1720
1721 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1722 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1723 return -EMSGSIZE;
1724 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1725 if (!cmd_info)
1726 return -EMSGSIZE;
4a6bb6d3
SF
1727 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1728 rocker_port->pport))
4b8ac966
JP
1729 return -EMSGSIZE;
1730 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1731 ETH_ALEN, macaddr))
1732 return -EMSGSIZE;
1733 rocker_tlv_nest_end(desc_info, cmd_info);
1734 return 0;
1735}
1736
5111f80c
SF
1737static int
1738rocker_cmd_set_port_learning_prep(struct rocker *rocker,
1739 struct rocker_port *rocker_port,
1740 struct rocker_desc_info *desc_info,
1741 void *priv)
1742{
1743 struct rocker_tlv *cmd_info;
1744
1745 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1746 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1747 return -EMSGSIZE;
1748 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1749 if (!cmd_info)
1750 return -EMSGSIZE;
4a6bb6d3
SF
1751 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1752 rocker_port->pport))
5111f80c
SF
1753 return -EMSGSIZE;
1754 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1755 !!(rocker_port->brport_flags & BR_LEARNING)))
1756 return -EMSGSIZE;
1757 rocker_tlv_nest_end(desc_info, cmd_info);
1758 return 0;
1759}
1760
4b8ac966
JP
1761static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1762 struct ethtool_cmd *ecmd)
1763{
1764 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1765 rocker_cmd_get_port_settings_prep, NULL,
1766 rocker_cmd_get_port_settings_ethtool_proc,
1767 ecmd, false);
1768}
1769
1770static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1771 unsigned char *macaddr)
1772{
1773 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1774 rocker_cmd_get_port_settings_prep, NULL,
1775 rocker_cmd_get_port_settings_macaddr_proc,
1776 macaddr, false);
1777}
1778
1779static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1780 struct ethtool_cmd *ecmd)
1781{
1782 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1783 rocker_cmd_set_port_settings_ethtool_prep,
1784 ecmd, NULL, NULL, false);
1785}
1786
1787static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1788 unsigned char *macaddr)
1789{
1790 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1791 rocker_cmd_set_port_settings_macaddr_prep,
1792 macaddr, NULL, NULL, false);
1793}
1794
5111f80c
SF
1795static int rocker_port_set_learning(struct rocker_port *rocker_port)
1796{
1797 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1798 rocker_cmd_set_port_learning_prep,
1799 NULL, NULL, NULL, false);
1800}
1801
9f6bbf7c
SF
1802static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1803 struct rocker_flow_tbl_entry *entry)
1804{
4a6bb6d3
SF
1805 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1806 entry->key.ig_port.in_pport))
9f6bbf7c 1807 return -EMSGSIZE;
4a6bb6d3
SF
1808 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1809 entry->key.ig_port.in_pport_mask))
9f6bbf7c
SF
1810 return -EMSGSIZE;
1811 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1812 entry->key.ig_port.goto_tbl))
1813 return -EMSGSIZE;
1814
1815 return 0;
1816}
1817
1818static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1819 struct rocker_flow_tbl_entry *entry)
1820{
4a6bb6d3
SF
1821 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1822 entry->key.vlan.in_pport))
9f6bbf7c 1823 return -EMSGSIZE;
9b03c71f
JP
1824 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1825 entry->key.vlan.vlan_id))
9f6bbf7c 1826 return -EMSGSIZE;
9b03c71f
JP
1827 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1828 entry->key.vlan.vlan_id_mask))
9f6bbf7c
SF
1829 return -EMSGSIZE;
1830 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1831 entry->key.vlan.goto_tbl))
1832 return -EMSGSIZE;
1833 if (entry->key.vlan.untagged &&
9b03c71f
JP
1834 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1835 entry->key.vlan.new_vlan_id))
9f6bbf7c
SF
1836 return -EMSGSIZE;
1837
1838 return 0;
1839}
1840
1841static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1842 struct rocker_flow_tbl_entry *entry)
1843{
4a6bb6d3
SF
1844 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1845 entry->key.term_mac.in_pport))
9f6bbf7c 1846 return -EMSGSIZE;
4a6bb6d3
SF
1847 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1848 entry->key.term_mac.in_pport_mask))
9f6bbf7c 1849 return -EMSGSIZE;
9b03c71f
JP
1850 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1851 entry->key.term_mac.eth_type))
9f6bbf7c
SF
1852 return -EMSGSIZE;
1853 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1854 ETH_ALEN, entry->key.term_mac.eth_dst))
1855 return -EMSGSIZE;
1856 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1857 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1858 return -EMSGSIZE;
9b03c71f
JP
1859 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1860 entry->key.term_mac.vlan_id))
9f6bbf7c 1861 return -EMSGSIZE;
9b03c71f
JP
1862 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1863 entry->key.term_mac.vlan_id_mask))
9f6bbf7c
SF
1864 return -EMSGSIZE;
1865 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1866 entry->key.term_mac.goto_tbl))
1867 return -EMSGSIZE;
1868 if (entry->key.term_mac.copy_to_cpu &&
1869 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1870 entry->key.term_mac.copy_to_cpu))
1871 return -EMSGSIZE;
1872
1873 return 0;
1874}
1875
1876static int
1877rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1878 struct rocker_flow_tbl_entry *entry)
1879{
9b03c71f
JP
1880 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1881 entry->key.ucast_routing.eth_type))
9f6bbf7c 1882 return -EMSGSIZE;
9b03c71f
JP
1883 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1884 entry->key.ucast_routing.dst4))
9f6bbf7c 1885 return -EMSGSIZE;
9b03c71f
JP
1886 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1887 entry->key.ucast_routing.dst4_mask))
9f6bbf7c
SF
1888 return -EMSGSIZE;
1889 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1890 entry->key.ucast_routing.goto_tbl))
1891 return -EMSGSIZE;
1892 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1893 entry->key.ucast_routing.group_id))
1894 return -EMSGSIZE;
1895
1896 return 0;
1897}
1898
1899static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1900 struct rocker_flow_tbl_entry *entry)
1901{
1902 if (entry->key.bridge.has_eth_dst &&
1903 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1904 ETH_ALEN, entry->key.bridge.eth_dst))
1905 return -EMSGSIZE;
1906 if (entry->key.bridge.has_eth_dst_mask &&
1907 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1908 ETH_ALEN, entry->key.bridge.eth_dst_mask))
1909 return -EMSGSIZE;
1910 if (entry->key.bridge.vlan_id &&
9b03c71f
JP
1911 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1912 entry->key.bridge.vlan_id))
9f6bbf7c
SF
1913 return -EMSGSIZE;
1914 if (entry->key.bridge.tunnel_id &&
1915 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1916 entry->key.bridge.tunnel_id))
1917 return -EMSGSIZE;
1918 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1919 entry->key.bridge.goto_tbl))
1920 return -EMSGSIZE;
1921 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1922 entry->key.bridge.group_id))
1923 return -EMSGSIZE;
1924 if (entry->key.bridge.copy_to_cpu &&
1925 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1926 entry->key.bridge.copy_to_cpu))
1927 return -EMSGSIZE;
1928
1929 return 0;
1930}
1931
1932static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1933 struct rocker_flow_tbl_entry *entry)
1934{
4a6bb6d3
SF
1935 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1936 entry->key.acl.in_pport))
9f6bbf7c 1937 return -EMSGSIZE;
4a6bb6d3
SF
1938 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1939 entry->key.acl.in_pport_mask))
9f6bbf7c
SF
1940 return -EMSGSIZE;
1941 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1942 ETH_ALEN, entry->key.acl.eth_src))
1943 return -EMSGSIZE;
1944 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1945 ETH_ALEN, entry->key.acl.eth_src_mask))
1946 return -EMSGSIZE;
1947 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1948 ETH_ALEN, entry->key.acl.eth_dst))
1949 return -EMSGSIZE;
1950 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1951 ETH_ALEN, entry->key.acl.eth_dst_mask))
1952 return -EMSGSIZE;
9b03c71f
JP
1953 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1954 entry->key.acl.eth_type))
9f6bbf7c 1955 return -EMSGSIZE;
9b03c71f
JP
1956 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1957 entry->key.acl.vlan_id))
9f6bbf7c 1958 return -EMSGSIZE;
9b03c71f
JP
1959 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1960 entry->key.acl.vlan_id_mask))
9f6bbf7c
SF
1961 return -EMSGSIZE;
1962
1963 switch (ntohs(entry->key.acl.eth_type)) {
1964 case ETH_P_IP:
1965 case ETH_P_IPV6:
1966 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1967 entry->key.acl.ip_proto))
1968 return -EMSGSIZE;
1969 if (rocker_tlv_put_u8(desc_info,
1970 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1971 entry->key.acl.ip_proto_mask))
1972 return -EMSGSIZE;
1973 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1974 entry->key.acl.ip_tos & 0x3f))
1975 return -EMSGSIZE;
1976 if (rocker_tlv_put_u8(desc_info,
1977 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1978 entry->key.acl.ip_tos_mask & 0x3f))
1979 return -EMSGSIZE;
1980 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1981 (entry->key.acl.ip_tos & 0xc0) >> 6))
1982 return -EMSGSIZE;
1983 if (rocker_tlv_put_u8(desc_info,
1984 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1985 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1986 return -EMSGSIZE;
1987 break;
1988 }
1989
1990 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1991 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1992 entry->key.acl.group_id))
1993 return -EMSGSIZE;
1994
1995 return 0;
1996}
1997
1998static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1999 struct rocker_port *rocker_port,
2000 struct rocker_desc_info *desc_info,
2001 void *priv)
2002{
2003 struct rocker_flow_tbl_entry *entry = priv;
2004 struct rocker_tlv *cmd_info;
2005 int err = 0;
2006
c1beeef7 2007 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
9f6bbf7c
SF
2008 return -EMSGSIZE;
2009 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2010 if (!cmd_info)
2011 return -EMSGSIZE;
2012 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2013 entry->key.tbl_id))
2014 return -EMSGSIZE;
2015 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2016 entry->key.priority))
2017 return -EMSGSIZE;
2018 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2019 return -EMSGSIZE;
2020 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2021 entry->cookie))
2022 return -EMSGSIZE;
2023
2024 switch (entry->key.tbl_id) {
2025 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2026 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2027 break;
2028 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2029 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2030 break;
2031 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2032 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2033 break;
2034 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2035 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2036 break;
2037 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2038 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2039 break;
2040 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2041 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2042 break;
2043 default:
2044 err = -ENOTSUPP;
2045 break;
2046 }
2047
2048 if (err)
2049 return err;
2050
2051 rocker_tlv_nest_end(desc_info, cmd_info);
2052
2053 return 0;
2054}
2055
2056static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
2057 struct rocker_port *rocker_port,
2058 struct rocker_desc_info *desc_info,
2059 void *priv)
2060{
2061 const struct rocker_flow_tbl_entry *entry = priv;
2062 struct rocker_tlv *cmd_info;
2063
c1beeef7 2064 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
9f6bbf7c
SF
2065 return -EMSGSIZE;
2066 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2067 if (!cmd_info)
2068 return -EMSGSIZE;
2069 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2070 entry->cookie))
2071 return -EMSGSIZE;
2072 rocker_tlv_nest_end(desc_info, cmd_info);
2073
2074 return 0;
2075}
2076
2077static int
2078rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2079 struct rocker_group_tbl_entry *entry)
2080{
4a6bb6d3 2081 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
9f6bbf7c
SF
2082 ROCKER_GROUP_PORT_GET(entry->group_id)))
2083 return -EMSGSIZE;
2084 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2085 entry->l2_interface.pop_vlan))
2086 return -EMSGSIZE;
2087
2088 return 0;
2089}
2090
2091static int
2092rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2093 struct rocker_group_tbl_entry *entry)
2094{
2095 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2096 entry->l2_rewrite.group_id))
2097 return -EMSGSIZE;
2098 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2099 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2100 ETH_ALEN, entry->l2_rewrite.eth_src))
2101 return -EMSGSIZE;
2102 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2103 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2104 ETH_ALEN, entry->l2_rewrite.eth_dst))
2105 return -EMSGSIZE;
2106 if (entry->l2_rewrite.vlan_id &&
9b03c71f
JP
2107 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2108 entry->l2_rewrite.vlan_id))
9f6bbf7c
SF
2109 return -EMSGSIZE;
2110
2111 return 0;
2112}
2113
2114static int
2115rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2116 struct rocker_group_tbl_entry *entry)
2117{
2118 int i;
2119 struct rocker_tlv *group_ids;
2120
2121 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2122 entry->group_count))
2123 return -EMSGSIZE;
2124
2125 group_ids = rocker_tlv_nest_start(desc_info,
2126 ROCKER_TLV_OF_DPA_GROUP_IDS);
2127 if (!group_ids)
2128 return -EMSGSIZE;
2129
2130 for (i = 0; i < entry->group_count; i++)
2131 /* Note TLV array is 1-based */
2132 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2133 return -EMSGSIZE;
2134
2135 rocker_tlv_nest_end(desc_info, group_ids);
2136
2137 return 0;
2138}
2139
2140static int
2141rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2142 struct rocker_group_tbl_entry *entry)
2143{
2144 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2145 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2146 ETH_ALEN, entry->l3_unicast.eth_src))
2147 return -EMSGSIZE;
2148 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2149 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2150 ETH_ALEN, entry->l3_unicast.eth_dst))
2151 return -EMSGSIZE;
2152 if (entry->l3_unicast.vlan_id &&
9b03c71f
JP
2153 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2154 entry->l3_unicast.vlan_id))
9f6bbf7c
SF
2155 return -EMSGSIZE;
2156 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2157 entry->l3_unicast.ttl_check))
2158 return -EMSGSIZE;
2159 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2160 entry->l3_unicast.group_id))
2161 return -EMSGSIZE;
2162
2163 return 0;
2164}
2165
2166static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2167 struct rocker_port *rocker_port,
2168 struct rocker_desc_info *desc_info,
2169 void *priv)
2170{
2171 struct rocker_group_tbl_entry *entry = priv;
2172 struct rocker_tlv *cmd_info;
2173 int err = 0;
2174
2175 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2176 return -EMSGSIZE;
2177 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2178 if (!cmd_info)
2179 return -EMSGSIZE;
2180
2181 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2182 entry->group_id))
2183 return -EMSGSIZE;
2184
2185 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2186 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2187 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2188 break;
2189 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2190 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2191 break;
2192 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2193 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2194 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2195 break;
2196 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2197 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2198 break;
2199 default:
2200 err = -ENOTSUPP;
2201 break;
2202 }
2203
2204 if (err)
2205 return err;
2206
2207 rocker_tlv_nest_end(desc_info, cmd_info);
2208
2209 return 0;
2210}
2211
2212static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2213 struct rocker_port *rocker_port,
2214 struct rocker_desc_info *desc_info,
2215 void *priv)
2216{
2217 const struct rocker_group_tbl_entry *entry = priv;
2218 struct rocker_tlv *cmd_info;
2219
2220 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2221 return -EMSGSIZE;
2222 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2223 if (!cmd_info)
2224 return -EMSGSIZE;
2225 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2226 entry->group_id))
2227 return -EMSGSIZE;
2228 rocker_tlv_nest_end(desc_info, cmd_info);
2229
2230 return 0;
2231}
2232
c1beeef7
SF
2233/***************************************************
2234 * Flow, group, FDB, internal VLAN and neigh tables
2235 ***************************************************/
9f6bbf7c
SF
2236
2237static int rocker_init_tbls(struct rocker *rocker)
2238{
2239 hash_init(rocker->flow_tbl);
2240 spin_lock_init(&rocker->flow_tbl_lock);
2241
2242 hash_init(rocker->group_tbl);
2243 spin_lock_init(&rocker->group_tbl_lock);
2244
2245 hash_init(rocker->fdb_tbl);
2246 spin_lock_init(&rocker->fdb_tbl_lock);
2247
2248 hash_init(rocker->internal_vlan_tbl);
2249 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2250
c1beeef7
SF
2251 hash_init(rocker->neigh_tbl);
2252 spin_lock_init(&rocker->neigh_tbl_lock);
2253
9f6bbf7c
SF
2254 return 0;
2255}
2256
2257static void rocker_free_tbls(struct rocker *rocker)
2258{
2259 unsigned long flags;
2260 struct rocker_flow_tbl_entry *flow_entry;
2261 struct rocker_group_tbl_entry *group_entry;
2262 struct rocker_fdb_tbl_entry *fdb_entry;
2263 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
c1beeef7 2264 struct rocker_neigh_tbl_entry *neigh_entry;
9f6bbf7c
SF
2265 struct hlist_node *tmp;
2266 int bkt;
2267
2268 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2269 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2270 hash_del(&flow_entry->entry);
2271 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2272
2273 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2274 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2275 hash_del(&group_entry->entry);
2276 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2277
2278 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2279 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2280 hash_del(&fdb_entry->entry);
2281 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2282
2283 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2284 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2285 tmp, internal_vlan_entry, entry)
2286 hash_del(&internal_vlan_entry->entry);
2287 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
c1beeef7
SF
2288
2289 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2290 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2291 hash_del(&neigh_entry->entry);
2292 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
9f6bbf7c
SF
2293}
2294
2295static struct rocker_flow_tbl_entry *
2296rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2297{
2298 struct rocker_flow_tbl_entry *found;
c1beeef7 2299 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
9f6bbf7c
SF
2300
2301 hash_for_each_possible(rocker->flow_tbl, found,
2302 entry, match->key_crc32) {
c1beeef7 2303 if (memcmp(&found->key, &match->key, key_len) == 0)
9f6bbf7c
SF
2304 return found;
2305 }
2306
2307 return NULL;
2308}
2309
2310static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2311 struct rocker_flow_tbl_entry *match,
2312 bool nowait)
2313{
2314 struct rocker *rocker = rocker_port->rocker;
2315 struct rocker_flow_tbl_entry *found;
c1beeef7 2316 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
9f6bbf7c 2317 unsigned long flags;
9f6bbf7c 2318
c1beeef7 2319 match->key_crc32 = crc32(~0, &match->key, key_len);
9f6bbf7c
SF
2320
2321 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2322
2323 found = rocker_flow_tbl_find(rocker, match);
2324
2325 if (found) {
c1beeef7
SF
2326 match->cookie = found->cookie;
2327 hash_del(&found->entry);
2328 kfree(found);
2329 found = match;
2330 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
9f6bbf7c
SF
2331 } else {
2332 found = match;
2333 found->cookie = rocker->flow_tbl_next_cookie++;
c1beeef7 2334 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
9f6bbf7c
SF
2335 }
2336
c1beeef7 2337 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
9f6bbf7c
SF
2338
2339 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2340
c1beeef7
SF
2341 return rocker_cmd_exec(rocker, rocker_port,
2342 rocker_cmd_flow_tbl_add,
2343 found, NULL, NULL, nowait);
9f6bbf7c
SF
2344}
2345
2346static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2347 struct rocker_flow_tbl_entry *match,
2348 bool nowait)
2349{
2350 struct rocker *rocker = rocker_port->rocker;
2351 struct rocker_flow_tbl_entry *found;
c1beeef7 2352 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
9f6bbf7c 2353 unsigned long flags;
9f6bbf7c
SF
2354 int err = 0;
2355
c1beeef7 2356 match->key_crc32 = crc32(~0, &match->key, key_len);
9f6bbf7c
SF
2357
2358 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2359
2360 found = rocker_flow_tbl_find(rocker, match);
2361
2362 if (found) {
c1beeef7
SF
2363 hash_del(&found->entry);
2364 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
9f6bbf7c
SF
2365 }
2366
2367 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2368
2369 kfree(match);
2370
c1beeef7 2371 if (found) {
9f6bbf7c
SF
2372 err = rocker_cmd_exec(rocker, rocker_port,
2373 rocker_cmd_flow_tbl_del,
2374 found, NULL, NULL, nowait);
2375 kfree(found);
2376 }
2377
2378 return err;
2379}
2380
2381static gfp_t rocker_op_flags_gfp(int flags)
2382{
2383 return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2384}
2385
2386static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2387 int flags, struct rocker_flow_tbl_entry *entry)
2388{
2389 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2390
2391 if (flags & ROCKER_OP_FLAG_REMOVE)
2392 return rocker_flow_tbl_del(rocker_port, entry, nowait);
2393 else
2394 return rocker_flow_tbl_add(rocker_port, entry, nowait);
2395}
2396
2397static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
4a6bb6d3 2398 int flags, u32 in_pport, u32 in_pport_mask,
9f6bbf7c
SF
2399 enum rocker_of_dpa_table_id goto_tbl)
2400{
2401 struct rocker_flow_tbl_entry *entry;
2402
2403 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2404 if (!entry)
2405 return -ENOMEM;
2406
2407 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2408 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
4a6bb6d3
SF
2409 entry->key.ig_port.in_pport = in_pport;
2410 entry->key.ig_port.in_pport_mask = in_pport_mask;
9f6bbf7c
SF
2411 entry->key.ig_port.goto_tbl = goto_tbl;
2412
2413 return rocker_flow_tbl_do(rocker_port, flags, entry);
2414}
2415
2416static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
4a6bb6d3 2417 int flags, u32 in_pport,
9f6bbf7c
SF
2418 __be16 vlan_id, __be16 vlan_id_mask,
2419 enum rocker_of_dpa_table_id goto_tbl,
2420 bool untagged, __be16 new_vlan_id)
2421{
2422 struct rocker_flow_tbl_entry *entry;
2423
2424 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2425 if (!entry)
2426 return -ENOMEM;
2427
2428 entry->key.priority = ROCKER_PRIORITY_VLAN;
2429 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
4a6bb6d3 2430 entry->key.vlan.in_pport = in_pport;
9f6bbf7c
SF
2431 entry->key.vlan.vlan_id = vlan_id;
2432 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2433 entry->key.vlan.goto_tbl = goto_tbl;
2434
2435 entry->key.vlan.untagged = untagged;
2436 entry->key.vlan.new_vlan_id = new_vlan_id;
2437
2438 return rocker_flow_tbl_do(rocker_port, flags, entry);
2439}
2440
2441static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
4a6bb6d3 2442 u32 in_pport, u32 in_pport_mask,
9f6bbf7c
SF
2443 __be16 eth_type, const u8 *eth_dst,
2444 const u8 *eth_dst_mask, __be16 vlan_id,
2445 __be16 vlan_id_mask, bool copy_to_cpu,
2446 int flags)
2447{
2448 struct rocker_flow_tbl_entry *entry;
2449
2450 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2451 if (!entry)
2452 return -ENOMEM;
2453
2454 if (is_multicast_ether_addr(eth_dst)) {
2455 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2456 entry->key.term_mac.goto_tbl =
2457 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2458 } else {
2459 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2460 entry->key.term_mac.goto_tbl =
2461 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2462 }
2463
2464 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
4a6bb6d3
SF
2465 entry->key.term_mac.in_pport = in_pport;
2466 entry->key.term_mac.in_pport_mask = in_pport_mask;
9f6bbf7c
SF
2467 entry->key.term_mac.eth_type = eth_type;
2468 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2469 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2470 entry->key.term_mac.vlan_id = vlan_id;
2471 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2472 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2473
2474 return rocker_flow_tbl_do(rocker_port, flags, entry);
2475}
2476
2477static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2478 int flags,
2479 const u8 *eth_dst, const u8 *eth_dst_mask,
2480 __be16 vlan_id, u32 tunnel_id,
2481 enum rocker_of_dpa_table_id goto_tbl,
2482 u32 group_id, bool copy_to_cpu)
2483{
2484 struct rocker_flow_tbl_entry *entry;
2485 u32 priority;
2486 bool vlan_bridging = !!vlan_id;
2487 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2488 bool wild = false;
2489
2490 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2491 if (!entry)
2492 return -ENOMEM;
2493
2494 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2495
2496 if (eth_dst) {
2497 entry->key.bridge.has_eth_dst = 1;
2498 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2499 }
2500 if (eth_dst_mask) {
2501 entry->key.bridge.has_eth_dst_mask = 1;
2502 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2503 if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2504 wild = true;
2505 }
2506
2507 priority = ROCKER_PRIORITY_UNKNOWN;
51ace887 2508 if (vlan_bridging && dflt && wild)
9f6bbf7c 2509 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
51ace887 2510 else if (vlan_bridging && dflt && !wild)
9f6bbf7c 2511 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
51ace887 2512 else if (vlan_bridging && !dflt)
9f6bbf7c 2513 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
51ace887 2514 else if (!vlan_bridging && dflt && wild)
9f6bbf7c 2515 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
51ace887 2516 else if (!vlan_bridging && dflt && !wild)
9f6bbf7c 2517 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
51ace887 2518 else if (!vlan_bridging && !dflt)
9f6bbf7c
SF
2519 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2520
2521 entry->key.priority = priority;
2522 entry->key.bridge.vlan_id = vlan_id;
2523 entry->key.bridge.tunnel_id = tunnel_id;
2524 entry->key.bridge.goto_tbl = goto_tbl;
2525 entry->key.bridge.group_id = group_id;
2526 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2527
2528 return rocker_flow_tbl_do(rocker_port, flags, entry);
2529}
2530
c1beeef7
SF
2531static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2532 __be16 eth_type, __be32 dst,
2533 __be32 dst_mask, u32 priority,
2534 enum rocker_of_dpa_table_id goto_tbl,
2535 u32 group_id, int flags)
2536{
2537 struct rocker_flow_tbl_entry *entry;
2538
2539 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2540 if (!entry)
2541 return -ENOMEM;
2542
2543 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2544 entry->key.priority = priority;
2545 entry->key.ucast_routing.eth_type = eth_type;
2546 entry->key.ucast_routing.dst4 = dst;
2547 entry->key.ucast_routing.dst4_mask = dst_mask;
2548 entry->key.ucast_routing.goto_tbl = goto_tbl;
2549 entry->key.ucast_routing.group_id = group_id;
2550 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2551 ucast_routing.group_id);
2552
2553 return rocker_flow_tbl_do(rocker_port, flags, entry);
2554}
2555
9f6bbf7c 2556static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
4a6bb6d3
SF
2557 int flags, u32 in_pport,
2558 u32 in_pport_mask,
9f6bbf7c
SF
2559 const u8 *eth_src, const u8 *eth_src_mask,
2560 const u8 *eth_dst, const u8 *eth_dst_mask,
2561 __be16 eth_type,
2562 __be16 vlan_id, __be16 vlan_id_mask,
2563 u8 ip_proto, u8 ip_proto_mask,
2564 u8 ip_tos, u8 ip_tos_mask,
2565 u32 group_id)
2566{
2567 u32 priority;
2568 struct rocker_flow_tbl_entry *entry;
2569
2570 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2571 if (!entry)
2572 return -ENOMEM;
2573
2574 priority = ROCKER_PRIORITY_ACL_NORMAL;
2575 if (eth_dst && eth_dst_mask) {
2576 if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2577 priority = ROCKER_PRIORITY_ACL_DFLT;
2578 else if (is_link_local_ether_addr(eth_dst))
2579 priority = ROCKER_PRIORITY_ACL_CTRL;
2580 }
2581
2582 entry->key.priority = priority;
2583 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4a6bb6d3
SF
2584 entry->key.acl.in_pport = in_pport;
2585 entry->key.acl.in_pport_mask = in_pport_mask;
9f6bbf7c
SF
2586
2587 if (eth_src)
2588 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2589 if (eth_src_mask)
2590 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2591 if (eth_dst)
2592 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2593 if (eth_dst_mask)
2594 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2595
2596 entry->key.acl.eth_type = eth_type;
2597 entry->key.acl.vlan_id = vlan_id;
2598 entry->key.acl.vlan_id_mask = vlan_id_mask;
2599 entry->key.acl.ip_proto = ip_proto;
2600 entry->key.acl.ip_proto_mask = ip_proto_mask;
2601 entry->key.acl.ip_tos = ip_tos;
2602 entry->key.acl.ip_tos_mask = ip_tos_mask;
2603 entry->key.acl.group_id = group_id;
2604
2605 return rocker_flow_tbl_do(rocker_port, flags, entry);
2606}
2607
2608static struct rocker_group_tbl_entry *
2609rocker_group_tbl_find(struct rocker *rocker,
2610 struct rocker_group_tbl_entry *match)
2611{
2612 struct rocker_group_tbl_entry *found;
2613
2614 hash_for_each_possible(rocker->group_tbl, found,
2615 entry, match->group_id) {
2616 if (found->group_id == match->group_id)
2617 return found;
2618 }
2619
2620 return NULL;
2621}
2622
2623static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2624{
2625 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2626 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2627 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2628 kfree(entry->group_ids);
2629 break;
2630 default:
2631 break;
2632 }
2633 kfree(entry);
2634}
2635
2636static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2637 struct rocker_group_tbl_entry *match,
2638 bool nowait)
2639{
2640 struct rocker *rocker = rocker_port->rocker;
2641 struct rocker_group_tbl_entry *found;
2642 unsigned long flags;
9f6bbf7c
SF
2643
2644 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2645
2646 found = rocker_group_tbl_find(rocker, match);
2647
2648 if (found) {
2649 hash_del(&found->entry);
2650 rocker_group_tbl_entry_free(found);
2651 found = match;
2652 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2653 } else {
2654 found = match;
2655 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2656 }
2657
2658 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2659
2660 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2661
c1beeef7
SF
2662 return rocker_cmd_exec(rocker, rocker_port,
2663 rocker_cmd_group_tbl_add,
2664 found, NULL, NULL, nowait);
9f6bbf7c
SF
2665}
2666
2667static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2668 struct rocker_group_tbl_entry *match,
2669 bool nowait)
2670{
2671 struct rocker *rocker = rocker_port->rocker;
2672 struct rocker_group_tbl_entry *found;
2673 unsigned long flags;
2674 int err = 0;
2675
2676 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2677
2678 found = rocker_group_tbl_find(rocker, match);
2679
2680 if (found) {
2681 hash_del(&found->entry);
2682 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2683 }
2684
2685 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2686
2687 rocker_group_tbl_entry_free(match);
2688
2689 if (found) {
2690 err = rocker_cmd_exec(rocker, rocker_port,
2691 rocker_cmd_group_tbl_del,
2692 found, NULL, NULL, nowait);
2693 rocker_group_tbl_entry_free(found);
2694 }
2695
2696 return err;
2697}
2698
2699static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2700 int flags, struct rocker_group_tbl_entry *entry)
2701{
2702 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2703
2704 if (flags & ROCKER_OP_FLAG_REMOVE)
2705 return rocker_group_tbl_del(rocker_port, entry, nowait);
2706 else
2707 return rocker_group_tbl_add(rocker_port, entry, nowait);
2708}
2709
2710static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2711 int flags, __be16 vlan_id,
4a6bb6d3 2712 u32 out_pport, int pop_vlan)
9f6bbf7c
SF
2713{
2714 struct rocker_group_tbl_entry *entry;
2715
2716 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2717 if (!entry)
2718 return -ENOMEM;
2719
4a6bb6d3 2720 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
9f6bbf7c
SF
2721 entry->l2_interface.pop_vlan = pop_vlan;
2722
2723 return rocker_group_tbl_do(rocker_port, flags, entry);
2724}
2725
2726static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2727 int flags, u8 group_count,
2728 u32 *group_ids, u32 group_id)
2729{
2730 struct rocker_group_tbl_entry *entry;
2731
2732 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2733 if (!entry)
2734 return -ENOMEM;
2735
2736 entry->group_id = group_id;
2737 entry->group_count = group_count;
2738
2739 entry->group_ids = kcalloc(group_count, sizeof(u32),
2740 rocker_op_flags_gfp(flags));
2741 if (!entry->group_ids) {
2742 kfree(entry);
2743 return -ENOMEM;
2744 }
2745 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2746
2747 return rocker_group_tbl_do(rocker_port, flags, entry);
2748}
2749
2750static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2751 int flags, __be16 vlan_id,
2752 u8 group_count, u32 *group_ids,
2753 u32 group_id)
2754{
2755 return rocker_group_l2_fan_out(rocker_port, flags,
2756 group_count, group_ids,
2757 group_id);
2758}
2759
c1beeef7
SF
2760static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2761 int flags, u32 index, u8 *src_mac,
2762 u8 *dst_mac, __be16 vlan_id,
2763 bool ttl_check, u32 pport)
2764{
2765 struct rocker_group_tbl_entry *entry;
2766
2767 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2768 if (!entry)
2769 return -ENOMEM;
2770
2771 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2772 if (src_mac)
2773 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2774 if (dst_mac)
2775 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2776 entry->l3_unicast.vlan_id = vlan_id;
2777 entry->l3_unicast.ttl_check = ttl_check;
2778 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2779
2780 return rocker_group_tbl_do(rocker_port, flags, entry);
2781}
2782
2783static struct rocker_neigh_tbl_entry *
2784 rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
2785{
2786 struct rocker_neigh_tbl_entry *found;
2787
0f43deba
SF
2788 hash_for_each_possible(rocker->neigh_tbl, found,
2789 entry, be32_to_cpu(ip_addr))
c1beeef7
SF
2790 if (found->ip_addr == ip_addr)
2791 return found;
2792
2793 return NULL;
2794}
2795
2796static void _rocker_neigh_add(struct rocker *rocker,
2797 struct rocker_neigh_tbl_entry *entry)
2798{
2799 entry->index = rocker->neigh_tbl_next_index++;
2800 entry->ref_count++;
0f43deba
SF
2801 hash_add(rocker->neigh_tbl, &entry->entry,
2802 be32_to_cpu(entry->ip_addr));
c1beeef7
SF
2803}
2804
2805static void _rocker_neigh_del(struct rocker *rocker,
2806 struct rocker_neigh_tbl_entry *entry)
2807{
2808 if (--entry->ref_count == 0) {
2809 hash_del(&entry->entry);
2810 kfree(entry);
2811 }
2812}
2813
2814static void _rocker_neigh_update(struct rocker *rocker,
2815 struct rocker_neigh_tbl_entry *entry,
2816 u8 *eth_dst, bool ttl_check)
2817{
2818 if (eth_dst) {
2819 ether_addr_copy(entry->eth_dst, eth_dst);
2820 entry->ttl_check = ttl_check;
2821 } else {
2822 entry->ref_count++;
2823 }
2824}
2825
2826static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2827 int flags, __be32 ip_addr, u8 *eth_dst)
2828{
2829 struct rocker *rocker = rocker_port->rocker;
2830 struct rocker_neigh_tbl_entry *entry;
2831 struct rocker_neigh_tbl_entry *found;
2832 unsigned long lock_flags;
2833 __be16 eth_type = htons(ETH_P_IP);
2834 enum rocker_of_dpa_table_id goto_tbl =
2835 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2836 u32 group_id;
2837 u32 priority = 0;
2838 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2839 bool updating;
2840 bool removing;
2841 int err = 0;
2842
2843 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2844 if (!entry)
2845 return -ENOMEM;
2846
2847 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2848
2849 found = rocker_neigh_tbl_find(rocker, ip_addr);
2850
2851 updating = found && adding;
2852 removing = found && !adding;
2853 adding = !found && adding;
2854
2855 if (adding) {
2856 entry->ip_addr = ip_addr;
2857 entry->dev = rocker_port->dev;
2858 ether_addr_copy(entry->eth_dst, eth_dst);
2859 entry->ttl_check = true;
2860 _rocker_neigh_add(rocker, entry);
2861 } else if (removing) {
2862 memcpy(entry, found, sizeof(*entry));
2863 _rocker_neigh_del(rocker, found);
2864 } else if (updating) {
2865 _rocker_neigh_update(rocker, found, eth_dst, true);
2866 memcpy(entry, found, sizeof(*entry));
2867 } else {
2868 err = -ENOENT;
2869 }
2870
2871 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2872
2873 if (err)
2874 goto err_out;
2875
2876 /* For each active neighbor, we have an L3 unicast group and
2877 * a /32 route to the neighbor, which uses the L3 unicast
2878 * group. The L3 unicast group can also be referred to by
2879 * other routes' nexthops.
2880 */
2881
2882 err = rocker_group_l3_unicast(rocker_port, flags,
2883 entry->index,
2884 rocker_port->dev->dev_addr,
2885 entry->eth_dst,
2886 rocker_port->internal_vlan_id,
2887 entry->ttl_check,
2888 rocker_port->pport);
2889 if (err) {
2890 netdev_err(rocker_port->dev,
2891 "Error (%d) L3 unicast group index %d\n",
2892 err, entry->index);
2893 goto err_out;
2894 }
2895
2896 if (adding || removing) {
2897 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
2898 err = rocker_flow_tbl_ucast4_routing(rocker_port,
2899 eth_type, ip_addr,
2900 inet_make_mask(32),
2901 priority, goto_tbl,
2902 group_id, flags);
2903
2904 if (err)
2905 netdev_err(rocker_port->dev,
2906 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2907 err, &entry->ip_addr, group_id);
2908 }
2909
2910err_out:
2911 if (!adding)
2912 kfree(entry);
2913
2914 return err;
2915}
2916
2917static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2918 __be32 ip_addr)
2919{
2920 struct net_device *dev = rocker_port->dev;
0f43deba 2921 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
c1beeef7
SF
2922 int err = 0;
2923
1f9993f6 2924 if (!n) {
c1beeef7 2925 n = neigh_create(&arp_tbl, &ip_addr, dev);
1f9993f6
YX
2926 if (IS_ERR(n))
2927 return IS_ERR(n);
2928 }
c1beeef7
SF
2929
2930 /* If the neigh is already resolved, then go ahead and
2931 * install the entry, otherwise start the ARP process to
2932 * resolve the neigh.
2933 */
2934
2935 if (n->nud_state & NUD_VALID)
2936 err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
2937 else
2938 neigh_event_send(n, NULL);
2939
1f9993f6 2940 neigh_release(n);
c1beeef7
SF
2941 return err;
2942}
2943
2944static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
2945 __be32 ip_addr, u32 *index)
2946{
2947 struct rocker *rocker = rocker_port->rocker;
2948 struct rocker_neigh_tbl_entry *entry;
2949 struct rocker_neigh_tbl_entry *found;
2950 unsigned long lock_flags;
2951 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2952 bool updating;
2953 bool removing;
2954 bool resolved = true;
2955 int err = 0;
2956
2957 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2958 if (!entry)
2959 return -ENOMEM;
2960
2961 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2962
2963 found = rocker_neigh_tbl_find(rocker, ip_addr);
2964 if (found)
2965 *index = found->index;
2966
2967 updating = found && adding;
2968 removing = found && !adding;
2969 adding = !found && adding;
2970
2971 if (adding) {
2972 entry->ip_addr = ip_addr;
2973 entry->dev = rocker_port->dev;
2974 _rocker_neigh_add(rocker, entry);
2975 *index = entry->index;
2976 resolved = false;
2977 } else if (removing) {
2978 _rocker_neigh_del(rocker, found);
2979 } else if (updating) {
2980 _rocker_neigh_update(rocker, found, NULL, false);
2981 resolved = !is_zero_ether_addr(found->eth_dst);
2982 } else {
2983 err = -ENOENT;
2984 }
2985
2986 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2987
2988 if (!adding)
2989 kfree(entry);
2990
2991 if (err)
2992 return err;
2993
2994 /* Resolved means neigh ip_addr is resolved to neigh mac. */
2995
2996 if (!resolved)
2997 err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
2998
2999 return err;
3000}
3001
6c707945
SF
3002static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3003 int flags, __be16 vlan_id)
3004{
3005 struct rocker_port *p;
3006 struct rocker *rocker = rocker_port->rocker;
3007 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
04f49faf 3008 u32 *group_ids;
6c707945 3009 u8 group_count = 0;
04f49faf 3010 int err = 0;
6c707945
SF
3011 int i;
3012
04f49faf
SF
3013 group_ids = kcalloc(rocker->port_count, sizeof(u32),
3014 rocker_op_flags_gfp(flags));
3015 if (!group_ids)
3016 return -ENOMEM;
3017
6c707945
SF
3018 /* Adjust the flood group for this VLAN. The flood group
3019 * references an L2 interface group for each port in this
3020 * VLAN.
3021 */
3022
3023 for (i = 0; i < rocker->port_count; i++) {
3024 p = rocker->ports[i];
3025 if (!rocker_port_is_bridged(p))
3026 continue;
3027 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3028 group_ids[group_count++] =
4a6bb6d3 3029 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
6c707945
SF
3030 }
3031 }
3032
3033 /* If there are no bridged ports in this VLAN, we're done */
3034 if (group_count == 0)
04f49faf 3035 goto no_ports_in_vlan;
6c707945
SF
3036
3037 err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
3038 group_count, group_ids,
3039 group_id);
3040 if (err)
3041 netdev_err(rocker_port->dev,
3042 "Error (%d) port VLAN l2 flood group\n", err);
3043
04f49faf
SF
3044no_ports_in_vlan:
3045 kfree(group_ids);
6c707945
SF
3046 return err;
3047}
3048
3049static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3050 int flags, __be16 vlan_id,
3051 bool pop_vlan)
3052{
3053 struct rocker *rocker = rocker_port->rocker;
3054 struct rocker_port *p;
3055 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
4a6bb6d3 3056 u32 out_pport;
6c707945
SF
3057 int ref = 0;
3058 int err;
3059 int i;
3060
3061 /* An L2 interface group for this port in this VLAN, but
3062 * only when port STP state is LEARNING|FORWARDING.
3063 */
3064
3065 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3066 rocker_port->stp_state == BR_STATE_FORWARDING) {
4a6bb6d3 3067 out_pport = rocker_port->pport;
6c707945 3068 err = rocker_group_l2_interface(rocker_port, flags,
4a6bb6d3 3069 vlan_id, out_pport,
6c707945
SF
3070 pop_vlan);
3071 if (err) {
3072 netdev_err(rocker_port->dev,
4a6bb6d3
SF
3073 "Error (%d) port VLAN l2 group for pport %d\n",
3074 err, out_pport);
6c707945
SF
3075 return err;
3076 }
3077 }
3078
3079 /* An L2 interface group for this VLAN to CPU port.
3080 * Add when first port joins this VLAN and destroy when
3081 * last port leaves this VLAN.
3082 */
3083
3084 for (i = 0; i < rocker->port_count; i++) {
3085 p = rocker->ports[i];
3086 if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
3087 ref++;
3088 }
3089
3090 if ((!adding || ref != 1) && (adding || ref != 0))
3091 return 0;
3092
4a6bb6d3 3093 out_pport = 0;
6c707945 3094 err = rocker_group_l2_interface(rocker_port, flags,
4a6bb6d3 3095 vlan_id, out_pport,
6c707945
SF
3096 pop_vlan);
3097 if (err) {
3098 netdev_err(rocker_port->dev,
3099 "Error (%d) port VLAN l2 group for CPU port\n", err);
3100 return err;
3101 }
3102
3103 return 0;
3104}
3105
9f6bbf7c
SF
3106static struct rocker_ctrl {
3107 const u8 *eth_dst;
3108 const u8 *eth_dst_mask;
11e6c65a 3109 __be16 eth_type;
9f6bbf7c
SF
3110 bool acl;
3111 bool bridge;
3112 bool term;
3113 bool copy_to_cpu;
3114} rocker_ctrls[] = {
3115 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3116 /* pass link local multicast pkts up to CPU for filtering */
3117 .eth_dst = ll_mac,
3118 .eth_dst_mask = ll_mask,
3119 .acl = true,
3120 },
3121 [ROCKER_CTRL_LOCAL_ARP] = {
3122 /* pass local ARP pkts up to CPU */
3123 .eth_dst = zero_mac,
3124 .eth_dst_mask = zero_mac,
3125 .eth_type = htons(ETH_P_ARP),
3126 .acl = true,
3127 },
3128 [ROCKER_CTRL_IPV4_MCAST] = {
3129 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3130 .eth_dst = ipv4_mcast,
3131 .eth_dst_mask = ipv4_mask,
3132 .eth_type = htons(ETH_P_IP),
3133 .term = true,
3134 .copy_to_cpu = true,
3135 },
3136 [ROCKER_CTRL_IPV6_MCAST] = {
3137 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3138 .eth_dst = ipv6_mcast,
3139 .eth_dst_mask = ipv6_mask,
3140 .eth_type = htons(ETH_P_IPV6),
3141 .term = true,
3142 .copy_to_cpu = true,
3143 },
3144 [ROCKER_CTRL_DFLT_BRIDGING] = {
3145 /* flood any pkts on vlan */
3146 .bridge = true,
3147 .copy_to_cpu = true,
3148 },
3149};
3150
3151static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3152 int flags, struct rocker_ctrl *ctrl,
3153 __be16 vlan_id)
3154{
4a6bb6d3
SF
3155 u32 in_pport = rocker_port->pport;
3156 u32 in_pport_mask = 0xffffffff;
3157 u32 out_pport = 0;
9f6bbf7c
SF
3158 u8 *eth_src = NULL;
3159 u8 *eth_src_mask = NULL;
3160 __be16 vlan_id_mask = htons(0xffff);
3161 u8 ip_proto = 0;
3162 u8 ip_proto_mask = 0;
3163 u8 ip_tos = 0;
3164 u8 ip_tos_mask = 0;
4a6bb6d3 3165 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
9f6bbf7c
SF
3166 int err;
3167
3168 err = rocker_flow_tbl_acl(rocker_port, flags,
4a6bb6d3 3169 in_pport, in_pport_mask,
9f6bbf7c
SF
3170 eth_src, eth_src_mask,
3171 ctrl->eth_dst, ctrl->eth_dst_mask,
3172 ctrl->eth_type,
3173 vlan_id, vlan_id_mask,
3174 ip_proto, ip_proto_mask,
3175 ip_tos, ip_tos_mask,
3176 group_id);
3177
3178 if (err)
3179 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3180
3181 return err;
3182}
3183
6c707945
SF
3184static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3185 int flags, struct rocker_ctrl *ctrl,
3186 __be16 vlan_id)
3187{
3188 enum rocker_of_dpa_table_id goto_tbl =
3189 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3190 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3191 u32 tunnel_id = 0;
3192 int err;
3193
3194 if (!rocker_port_is_bridged(rocker_port))
3195 return 0;
3196
3197 err = rocker_flow_tbl_bridge(rocker_port, flags,
3198 ctrl->eth_dst, ctrl->eth_dst_mask,
3199 vlan_id, tunnel_id,
3200 goto_tbl, group_id, ctrl->copy_to_cpu);
3201
3202 if (err)
3203 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3204
3205 return err;
3206}
3207
9f6bbf7c
SF
3208static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3209 int flags, struct rocker_ctrl *ctrl,
3210 __be16 vlan_id)
3211{
4a6bb6d3 3212 u32 in_pport_mask = 0xffffffff;
9f6bbf7c
SF
3213 __be16 vlan_id_mask = htons(0xffff);
3214 int err;
3215
3216 if (ntohs(vlan_id) == 0)
3217 vlan_id = rocker_port->internal_vlan_id;
3218
3219 err = rocker_flow_tbl_term_mac(rocker_port,
4a6bb6d3 3220 rocker_port->pport, in_pport_mask,
9f6bbf7c
SF
3221 ctrl->eth_type, ctrl->eth_dst,
3222 ctrl->eth_dst_mask, vlan_id,
3223 vlan_id_mask, ctrl->copy_to_cpu,
3224 flags);
3225
3226 if (err)
3227 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3228
3229 return err;
3230}
3231
3232static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
3233 struct rocker_ctrl *ctrl, __be16 vlan_id)
3234{
3235 if (ctrl->acl)
3236 return rocker_port_ctrl_vlan_acl(rocker_port, flags,
3237 ctrl, vlan_id);
6c707945
SF
3238 if (ctrl->bridge)
3239 return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
3240 ctrl, vlan_id);
9f6bbf7c
SF
3241
3242 if (ctrl->term)
3243 return rocker_port_ctrl_vlan_term(rocker_port, flags,
3244 ctrl, vlan_id);
3245
3246 return -EOPNOTSUPP;
3247}
3248
3249static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3250 int flags, __be16 vlan_id)
3251{
3252 int err = 0;
3253 int i;
3254
3255 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3256 if (rocker_port->ctrls[i]) {
3257 err = rocker_port_ctrl_vlan(rocker_port, flags,
3258 &rocker_ctrls[i], vlan_id);
3259 if (err)
3260 return err;
3261 }
3262 }
3263
3264 return err;
3265}
3266
3267static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
3268 struct rocker_ctrl *ctrl)
3269{
3270 u16 vid;
3271 int err = 0;
3272
3273 for (vid = 1; vid < VLAN_N_VID; vid++) {
3274 if (!test_bit(vid, rocker_port->vlan_bitmap))
3275 continue;
3276 err = rocker_port_ctrl_vlan(rocker_port, flags,
3277 ctrl, htons(vid));
3278 if (err)
3279 break;
3280 }
3281
3282 return err;
3283}
3284
6c707945
SF
3285static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
3286 u16 vid)
3287{
3288 enum rocker_of_dpa_table_id goto_tbl =
3289 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
4a6bb6d3 3290 u32 in_pport = rocker_port->pport;
6c707945
SF
3291 __be16 vlan_id = htons(vid);
3292 __be16 vlan_id_mask = htons(0xffff);
3293 __be16 internal_vlan_id;
3294 bool untagged;
3295 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3296 int err;
3297
3298 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3299
3300 if (adding && test_and_set_bit(ntohs(internal_vlan_id),
3301 rocker_port->vlan_bitmap))
3302 return 0; /* already added */
3303 else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
3304 rocker_port->vlan_bitmap))
3305 return 0; /* already removed */
3306
3307 if (adding) {
3308 err = rocker_port_ctrl_vlan_add(rocker_port, flags,
3309 internal_vlan_id);
3310 if (err) {
3311 netdev_err(rocker_port->dev,
3312 "Error (%d) port ctrl vlan add\n", err);
3313 return err;
3314 }
3315 }
3316
3317 err = rocker_port_vlan_l2_groups(rocker_port, flags,
3318 internal_vlan_id, untagged);
3319 if (err) {
3320 netdev_err(rocker_port->dev,
3321 "Error (%d) port VLAN l2 groups\n", err);
3322 return err;
3323 }
3324
3325 err = rocker_port_vlan_flood_group(rocker_port, flags,
3326 internal_vlan_id);
3327 if (err) {
3328 netdev_err(rocker_port->dev,
3329 "Error (%d) port VLAN l2 flood group\n", err);
3330 return err;
3331 }
3332
3333 err = rocker_flow_tbl_vlan(rocker_port, flags,
4a6bb6d3 3334 in_pport, vlan_id, vlan_id_mask,
6c707945
SF
3335 goto_tbl, untagged, internal_vlan_id);
3336 if (err)
3337 netdev_err(rocker_port->dev,
3338 "Error (%d) port VLAN table\n", err);
3339
3340 return err;
3341}
3342
9f6bbf7c
SF
3343static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
3344{
3345 enum rocker_of_dpa_table_id goto_tbl;
4a6bb6d3
SF
3346 u32 in_pport;
3347 u32 in_pport_mask;
9f6bbf7c
SF
3348 int err;
3349
3350 /* Normal Ethernet Frames. Matches pkts from any local physical
3351 * ports. Goto VLAN tbl.
3352 */
3353
4a6bb6d3
SF
3354 in_pport = 0;
3355 in_pport_mask = 0xffff0000;
9f6bbf7c
SF
3356 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3357
3358 err = rocker_flow_tbl_ig_port(rocker_port, flags,
4a6bb6d3 3359 in_pport, in_pport_mask,
9f6bbf7c
SF
3360 goto_tbl);
3361 if (err)
3362 netdev_err(rocker_port->dev,
3363 "Error (%d) ingress port table entry\n", err);
3364
3365 return err;
3366}
3367
6c707945
SF
3368struct rocker_fdb_learn_work {
3369 struct work_struct work;
3370 struct net_device *dev;
3371 int flags;
3372 u8 addr[ETH_ALEN];
3373 u16 vid;
3374};
3375
3376static void rocker_port_fdb_learn_work(struct work_struct *work)
3377{
3378 struct rocker_fdb_learn_work *lw =
3379 container_of(work, struct rocker_fdb_learn_work, work);
3380 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3381 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3aeb6617
JP
3382 struct netdev_switch_notifier_fdb_info info;
3383
3384 info.addr = lw->addr;
3385 info.vid = lw->vid;
6c707945 3386
51ace887 3387 if (learned && removing)
3aeb6617
JP
3388 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3389 lw->dev, &info.info);
51ace887 3390 else if (learned && !removing)
3aeb6617
JP
3391 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3392 lw->dev, &info.info);
6c707945
SF
3393
3394 kfree(work);
3395}
3396
3397static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3398 int flags, const u8 *addr, __be16 vlan_id)
3399{
3400 struct rocker_fdb_learn_work *lw;
3401 enum rocker_of_dpa_table_id goto_tbl =
3402 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4a6bb6d3 3403 u32 out_pport = rocker_port->pport;
6c707945
SF
3404 u32 tunnel_id = 0;
3405 u32 group_id = ROCKER_GROUP_NONE;
5111f80c 3406 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
6c707945
SF
3407 bool copy_to_cpu = false;
3408 int err;
3409
3410 if (rocker_port_is_bridged(rocker_port))
4a6bb6d3 3411 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
6c707945
SF
3412
3413 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3414 err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3415 vlan_id, tunnel_id, goto_tbl,
3416 group_id, copy_to_cpu);
3417 if (err)
3418 return err;
3419 }
3420
5111f80c
SF
3421 if (!syncing)
3422 return 0;
3423
6c707945
SF
3424 if (!rocker_port_is_bridged(rocker_port))
3425 return 0;
3426
3427 lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3428 if (!lw)
3429 return -ENOMEM;
3430
3431 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3432
3433 lw->dev = rocker_port->dev;
3434 lw->flags = flags;
3435 ether_addr_copy(lw->addr, addr);
3436 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3437
3438 schedule_work(&lw->work);
3439
3440 return 0;
3441}
3442
3443static struct rocker_fdb_tbl_entry *
3444rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3445{
3446 struct rocker_fdb_tbl_entry *found;
3447
3448 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3449 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3450 return found;
3451
3452 return NULL;
3453}
3454
3455static int rocker_port_fdb(struct rocker_port *rocker_port,
3456 const unsigned char *addr,
3457 __be16 vlan_id, int flags)
3458{
3459 struct rocker *rocker = rocker_port->rocker;
3460 struct rocker_fdb_tbl_entry *fdb;
3461 struct rocker_fdb_tbl_entry *found;
3462 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3463 unsigned long lock_flags;
3464
3465 fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3466 if (!fdb)
3467 return -ENOMEM;
3468
3469 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
4a6bb6d3 3470 fdb->key.pport = rocker_port->pport;
6c707945
SF
3471 ether_addr_copy(fdb->key.addr, addr);
3472 fdb->key.vlan_id = vlan_id;
3473 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3474
3475 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3476
3477 found = rocker_fdb_tbl_find(rocker, fdb);
3478
3479 if (removing && found) {
3480 kfree(fdb);
3481 hash_del(&found->entry);
3482 } else if (!removing && !found) {
3483 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3484 }
3485
3486 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3487
3488 /* Check if adding and already exists, or removing and can't find */
3489 if (!found != !removing) {
3490 kfree(fdb);
3491 if (!found && removing)
3492 return 0;
3493 /* Refreshing existing to update aging timers */
3494 flags |= ROCKER_OP_FLAG_REFRESH;
3495 }
3496
3497 return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3498}
3499
3500static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3501{
3502 struct rocker *rocker = rocker_port->rocker;
3503 struct rocker_fdb_tbl_entry *found;
3504 unsigned long lock_flags;
3505 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3506 struct hlist_node *tmp;
3507 int bkt;
3508 int err = 0;
3509
3510 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3511 rocker_port->stp_state == BR_STATE_FORWARDING)
3512 return 0;
3513
3514 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3515
3516 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4a6bb6d3 3517 if (found->key.pport != rocker_port->pport)
6c707945
SF
3518 continue;
3519 if (!found->learned)
3520 continue;
3521 err = rocker_port_fdb_learn(rocker_port, flags,
3522 found->key.addr,
3523 found->key.vlan_id);
3524 if (err)
3525 goto err_out;
3526 hash_del(&found->entry);
3527 }
3528
3529err_out:
3530 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3531
3532 return err;
3533}
3534
9f6bbf7c
SF
3535static int rocker_port_router_mac(struct rocker_port *rocker_port,
3536 int flags, __be16 vlan_id)
3537{
4a6bb6d3 3538 u32 in_pport_mask = 0xffffffff;
9f6bbf7c
SF
3539 __be16 eth_type;
3540 const u8 *dst_mac_mask = ff_mac;
3541 __be16 vlan_id_mask = htons(0xffff);
3542 bool copy_to_cpu = false;
3543 int err;
3544
3545 if (ntohs(vlan_id) == 0)
3546 vlan_id = rocker_port->internal_vlan_id;
3547
3548 eth_type = htons(ETH_P_IP);
3549 err = rocker_flow_tbl_term_mac(rocker_port,
4a6bb6d3 3550 rocker_port->pport, in_pport_mask,
9f6bbf7c
SF
3551 eth_type, rocker_port->dev->dev_addr,
3552 dst_mac_mask, vlan_id, vlan_id_mask,
3553 copy_to_cpu, flags);
3554 if (err)
3555 return err;
3556
3557 eth_type = htons(ETH_P_IPV6);
3558 err = rocker_flow_tbl_term_mac(rocker_port,
4a6bb6d3 3559 rocker_port->pport, in_pport_mask,
9f6bbf7c
SF
3560 eth_type, rocker_port->dev->dev_addr,
3561 dst_mac_mask, vlan_id, vlan_id_mask,
3562 copy_to_cpu, flags);
3563
3564 return err;
3565}
3566
6c707945
SF
3567static int rocker_port_fwding(struct rocker_port *rocker_port)
3568{
3569 bool pop_vlan;
4a6bb6d3 3570 u32 out_pport;
6c707945
SF
3571 __be16 vlan_id;
3572 u16 vid;
3573 int flags = ROCKER_OP_FLAG_NOWAIT;
3574 int err;
3575
3576 /* Port will be forwarding-enabled if its STP state is LEARNING
3577 * or FORWARDING. Traffic from CPU can still egress, regardless of
3578 * port STP state. Use L2 interface group on port VLANs as a way
3579 * to toggle port forwarding: if forwarding is disabled, L2
3580 * interface group will not exist.
3581 */
3582
3583 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3584 rocker_port->stp_state != BR_STATE_FORWARDING)
3585 flags |= ROCKER_OP_FLAG_REMOVE;
3586
4a6bb6d3 3587 out_pport = rocker_port->pport;
6c707945
SF
3588 for (vid = 1; vid < VLAN_N_VID; vid++) {
3589 if (!test_bit(vid, rocker_port->vlan_bitmap))
3590 continue;
3591 vlan_id = htons(vid);
3592 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3593 err = rocker_group_l2_interface(rocker_port, flags,
4a6bb6d3 3594 vlan_id, out_pport,
6c707945
SF
3595 pop_vlan);
3596 if (err) {
3597 netdev_err(rocker_port->dev,
4a6bb6d3
SF
3598 "Error (%d) port VLAN l2 group for pport %d\n",
3599 err, out_pport);
6c707945
SF
3600 return err;
3601 }
3602 }
3603
3604 return 0;
3605}
3606
3607static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3608{
3609 bool want[ROCKER_CTRL_MAX] = { 0, };
3610 int flags;
3611 int err;
3612 int i;
3613
3614 if (rocker_port->stp_state == state)
3615 return 0;
3616
3617 rocker_port->stp_state = state;
3618
3619 switch (state) {
3620 case BR_STATE_DISABLED:
3621 /* port is completely disabled */
3622 break;
3623 case BR_STATE_LISTENING:
3624 case BR_STATE_BLOCKING:
3625 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3626 break;
3627 case BR_STATE_LEARNING:
3628 case BR_STATE_FORWARDING:
3629 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3630 want[ROCKER_CTRL_IPV4_MCAST] = true;
3631 want[ROCKER_CTRL_IPV6_MCAST] = true;
3632 if (rocker_port_is_bridged(rocker_port))
3633 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3634 else
3635 want[ROCKER_CTRL_LOCAL_ARP] = true;
3636 break;
3637 }
3638
3639 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3640 if (want[i] != rocker_port->ctrls[i]) {
3641 flags = ROCKER_OP_FLAG_NOWAIT |
3642 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3643 err = rocker_port_ctrl(rocker_port, flags,
3644 &rocker_ctrls[i]);
3645 if (err)
3646 return err;
3647 rocker_port->ctrls[i] = want[i];
3648 }
3649 }
3650
3651 err = rocker_port_fdb_flush(rocker_port);
3652 if (err)
3653 return err;
3654
3655 return rocker_port_fwding(rocker_port);
3656}
3657
e47172ab
SF
3658static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
3659{
3660 if (rocker_port_is_bridged(rocker_port))
3661 /* bridge STP will enable port */
3662 return 0;
3663
3664 /* port is not bridged, so simulate going to FORWARDING state */
3665 return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
3666}
3667
3668static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
3669{
3670 if (rocker_port_is_bridged(rocker_port))
3671 /* bridge STP will disable port */
3672 return 0;
3673
3674 /* port is not bridged, so simulate going to DISABLED state */
3675 return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
3676}
3677
9f6bbf7c
SF
3678static struct rocker_internal_vlan_tbl_entry *
3679rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3680{
3681 struct rocker_internal_vlan_tbl_entry *found;
3682
3683 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3684 entry, ifindex) {
3685 if (found->ifindex == ifindex)
3686 return found;
3687 }
3688
3689 return NULL;
3690}
3691
3692static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3693 int ifindex)
3694{
3695 struct rocker *rocker = rocker_port->rocker;
3696 struct rocker_internal_vlan_tbl_entry *entry;
3697 struct rocker_internal_vlan_tbl_entry *found;
3698 unsigned long lock_flags;
3699 int i;
3700
3701 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3702 if (!entry)
3703 return 0;
3704
3705 entry->ifindex = ifindex;
3706
3707 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3708
3709 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3710 if (found) {
3711 kfree(entry);
3712 goto found;
3713 }
3714
3715 found = entry;
3716 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3717
3718 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3719 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3720 continue;
3721 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3722 goto found;
3723 }
3724
3725 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3726
3727found:
3728 found->ref_count++;
3729 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3730
3731 return found->vlan_id;
3732}
3733
3734static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3735 int ifindex)
3736{
3737 struct rocker *rocker = rocker_port->rocker;
3738 struct rocker_internal_vlan_tbl_entry *found;
3739 unsigned long lock_flags;
3740 unsigned long bit;
3741
3742 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3743
3744 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3745 if (!found) {
3746 netdev_err(rocker_port->dev,
3747 "ifindex (%d) not found in internal VLAN tbl\n",
3748 ifindex);
3749 goto not_found;
3750 }
3751
3752 if (--found->ref_count <= 0) {
3753 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3754 clear_bit(bit, rocker->internal_vlan_bitmap);
3755 hash_del(&found->entry);
3756 kfree(found);
3757 }
3758
3759not_found:
3760 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3761}
3762
c1beeef7
SF
3763static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
3764 int dst_len, struct fib_info *fi, u32 tb_id,
3765 int flags)
3766{
3767 struct fib_nh *nh;
3768 __be16 eth_type = htons(ETH_P_IP);
3769 __be32 dst_mask = inet_make_mask(dst_len);
3770 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3771 u32 priority = fi->fib_priority;
3772 enum rocker_of_dpa_table_id goto_tbl =
3773 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3774 u32 group_id;
3775 bool nh_on_port;
3776 bool has_gw;
3777 u32 index;
3778 int err;
3779
3780 /* XXX support ECMP */
3781
3782 nh = fi->fib_nh;
3783 nh_on_port = (fi->fib_dev == rocker_port->dev);
3784 has_gw = !!nh->nh_gw;
3785
3786 if (has_gw && nh_on_port) {
3787 err = rocker_port_ipv4_nh(rocker_port, flags,
3788 nh->nh_gw, &index);
3789 if (err)
3790 return err;
3791
3792 group_id = ROCKER_GROUP_L3_UNICAST(index);
3793 } else {
3794 /* Send to CPU for processing */
3795 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3796 }
3797
3798 err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
3799 dst_mask, priority, goto_tbl,
3800 group_id, flags);
3801 if (err)
3802 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3803 err, &dst);
3804
3805 return err;
3806}
3807
4b8ac966
JP
3808/*****************
3809 * Net device ops
3810 *****************/
3811
3812static int rocker_port_open(struct net_device *dev)
3813{
3814 struct rocker_port *rocker_port = netdev_priv(dev);
3815 int err;
3816
3817 err = rocker_port_dma_rings_init(rocker_port);
3818 if (err)
3819 return err;
3820
3821 err = request_irq(rocker_msix_tx_vector(rocker_port),
3822 rocker_tx_irq_handler, 0,
3823 rocker_driver_name, rocker_port);
3824 if (err) {
3825 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3826 goto err_request_tx_irq;
3827 }
3828
3829 err = request_irq(rocker_msix_rx_vector(rocker_port),
3830 rocker_rx_irq_handler, 0,
3831 rocker_driver_name, rocker_port);
3832 if (err) {
3833 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3834 goto err_request_rx_irq;
3835 }
3836
e47172ab 3837 err = rocker_port_fwd_enable(rocker_port);
6c707945 3838 if (err)
e47172ab 3839 goto err_fwd_enable;
6c707945 3840
4b8ac966
JP
3841 napi_enable(&rocker_port->napi_tx);
3842 napi_enable(&rocker_port->napi_rx);
3843 rocker_port_set_enable(rocker_port, true);
3844 netif_start_queue(dev);
3845 return 0;
3846
e47172ab 3847err_fwd_enable:
6c707945 3848 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4b8ac966
JP
3849err_request_rx_irq:
3850 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3851err_request_tx_irq:
3852 rocker_port_dma_rings_fini(rocker_port);
3853 return err;
3854}
3855
3856static int rocker_port_stop(struct net_device *dev)
3857{
3858 struct rocker_port *rocker_port = netdev_priv(dev);
3859
3860 netif_stop_queue(dev);
3861 rocker_port_set_enable(rocker_port, false);
3862 napi_disable(&rocker_port->napi_rx);
3863 napi_disable(&rocker_port->napi_tx);
e47172ab 3864 rocker_port_fwd_disable(rocker_port);
4b8ac966
JP
3865 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3866 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3867 rocker_port_dma_rings_fini(rocker_port);
3868
3869 return 0;
3870}
3871
3872static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3873 struct rocker_desc_info *desc_info)
3874{
3875 struct rocker *rocker = rocker_port->rocker;
3876 struct pci_dev *pdev = rocker->pdev;
3877 struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3878 struct rocker_tlv *attr;
3879 int rem;
3880
3881 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3882 if (!attrs[ROCKER_TLV_TX_FRAGS])
3883 return;
3884 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3885 struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3886 dma_addr_t dma_handle;
3887 size_t len;
3888
3889 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3890 continue;
3891 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3892 attr);
3893 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3894 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3895 continue;
3896 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3897 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3898 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3899 }
3900}
3901
3902static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3903 struct rocker_desc_info *desc_info,
3904 char *buf, size_t buf_len)
3905{
3906 struct rocker *rocker = rocker_port->rocker;
3907 struct pci_dev *pdev = rocker->pdev;
3908 dma_addr_t dma_handle;
3909 struct rocker_tlv *frag;
3910
3911 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3912 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3913 if (net_ratelimit())
3914 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3915 return -EIO;
3916 }
3917 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3918 if (!frag)
3919 goto unmap_frag;
3920 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3921 dma_handle))
3922 goto nest_cancel;
3923 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3924 buf_len))
3925 goto nest_cancel;
3926 rocker_tlv_nest_end(desc_info, frag);
3927 return 0;
3928
3929nest_cancel:
3930 rocker_tlv_nest_cancel(desc_info, frag);
3931unmap_frag:
3932 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3933 return -EMSGSIZE;
3934}
3935
3936static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3937{
3938 struct rocker_port *rocker_port = netdev_priv(dev);
3939 struct rocker *rocker = rocker_port->rocker;
3940 struct rocker_desc_info *desc_info;
3941 struct rocker_tlv *frags;
3942 int i;
3943 int err;
3944
3945 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3946 if (unlikely(!desc_info)) {
3947 if (net_ratelimit())
3948 netdev_err(dev, "tx ring full when queue awake\n");
3949 return NETDEV_TX_BUSY;
3950 }
3951
3952 rocker_desc_cookie_ptr_set(desc_info, skb);
3953
3954 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3955 if (!frags)
3956 goto out;
3957 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3958 skb->data, skb_headlen(skb));
3959 if (err)
3960 goto nest_cancel;
3961 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
3962 goto nest_cancel;
3963
3964 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3965 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3966
3967 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3968 skb_frag_address(frag),
3969 skb_frag_size(frag));
3970 if (err)
3971 goto unmap_frags;
3972 }
3973 rocker_tlv_nest_end(desc_info, frags);
3974
3975 rocker_desc_gen_clear(desc_info);
3976 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3977
3978 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3979 if (!desc_info)
3980 netif_stop_queue(dev);
3981
3982 return NETDEV_TX_OK;
3983
3984unmap_frags:
3985 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3986nest_cancel:
3987 rocker_tlv_nest_cancel(desc_info, frags);
3988out:
3989 dev_kfree_skb(skb);
f2bbca51
DA
3990 dev->stats.tx_dropped++;
3991
4b8ac966
JP
3992 return NETDEV_TX_OK;
3993}
3994
3995static int rocker_port_set_mac_address(struct net_device *dev, void *p)
3996{
3997 struct sockaddr *addr = p;
3998 struct rocker_port *rocker_port = netdev_priv(dev);
3999 int err;
4000
4001 if (!is_valid_ether_addr(addr->sa_data))
4002 return -EADDRNOTAVAIL;
4003
4004 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4005 if (err)
4006 return err;
4007 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4008 return 0;
4009}
4010
6c707945
SF
4011static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
4012 __be16 proto, u16 vid)
4013{
4014 struct rocker_port *rocker_port = netdev_priv(dev);
4015 int err;
4016
4017 err = rocker_port_vlan(rocker_port, 0, vid);
4018 if (err)
4019 return err;
4020
4021 return rocker_port_router_mac(rocker_port, 0, htons(vid));
4022}
4023
4024static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
4025 __be16 proto, u16 vid)
4026{
4027 struct rocker_port *rocker_port = netdev_priv(dev);
4028 int err;
4029
4030 err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
4031 htons(vid));
4032 if (err)
4033 return err;
4034
4035 return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
4036}
4037
4038static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
4039 struct net_device *dev,
4040 const unsigned char *addr, u16 vid,
4041 u16 nlm_flags)
4042{
4043 struct rocker_port *rocker_port = netdev_priv(dev);
4044 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
4045 int flags = 0;
4046
4047 if (!rocker_port_is_bridged(rocker_port))
4048 return -EINVAL;
4049
4050 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
4051}
4052
4053static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
4054 struct net_device *dev,
4055 const unsigned char *addr, u16 vid)
4056{
4057 struct rocker_port *rocker_port = netdev_priv(dev);
4058 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
4059 int flags = ROCKER_OP_FLAG_REMOVE;
4060
4061 if (!rocker_port_is_bridged(rocker_port))
4062 return -EINVAL;
4063
4064 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
4065}
4066
ce76ca68
JP
4067static int rocker_fdb_fill_info(struct sk_buff *skb,
4068 struct rocker_port *rocker_port,
4069 const unsigned char *addr, u16 vid,
4070 u32 portid, u32 seq, int type,
4071 unsigned int flags)
4072{
4073 struct nlmsghdr *nlh;
4074 struct ndmsg *ndm;
4075
4076 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
4077 if (!nlh)
4078 return -EMSGSIZE;
4079
4080 ndm = nlmsg_data(nlh);
4081 ndm->ndm_family = AF_BRIDGE;
4082 ndm->ndm_pad1 = 0;
4083 ndm->ndm_pad2 = 0;
4084 ndm->ndm_flags = NTF_SELF;
4085 ndm->ndm_type = 0;
4086 ndm->ndm_ifindex = rocker_port->dev->ifindex;
4087 ndm->ndm_state = NUD_REACHABLE;
4088
4089 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4090 goto nla_put_failure;
4091
4092 if (vid && nla_put_u16(skb, NDA_VLAN, vid))
4093 goto nla_put_failure;
4094
053c095a
JB
4095 nlmsg_end(skb, nlh);
4096 return 0;
ce76ca68
JP
4097
4098nla_put_failure:
4099 nlmsg_cancel(skb, nlh);
4100 return -EMSGSIZE;
4101}
4102
4103static int rocker_port_fdb_dump(struct sk_buff *skb,
4104 struct netlink_callback *cb,
4105 struct net_device *dev,
4106 struct net_device *filter_dev,
4107 int idx)
4108{
4109 struct rocker_port *rocker_port = netdev_priv(dev);
4110 struct rocker *rocker = rocker_port->rocker;
4111 struct rocker_fdb_tbl_entry *found;
4112 struct hlist_node *tmp;
4113 int bkt;
4114 unsigned long lock_flags;
4115 const unsigned char *addr;
4116 u16 vid;
4117 int err;
4118
4119 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4120 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4a6bb6d3 4121 if (found->key.pport != rocker_port->pport)
ce76ca68
JP
4122 continue;
4123 if (idx < cb->args[0])
4124 goto skip;
4125 addr = found->key.addr;
4126 vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
4127 err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
4128 NETLINK_CB(cb->skb).portid,
4129 cb->nlh->nlmsg_seq,
4130 RTM_NEWNEIGH, NLM_F_MULTI);
4131 if (err < 0)
4132 break;
4133skip:
4134 ++idx;
4135 }
4136 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4137 return idx;
4138}
4139
5111f80c 4140static int rocker_port_bridge_setlink(struct net_device *dev,
add511b3 4141 struct nlmsghdr *nlh, u16 flags)
5111f80c
SF
4142{
4143 struct rocker_port *rocker_port = netdev_priv(dev);
4144 struct nlattr *protinfo;
5111f80c 4145 struct nlattr *attr;
5111f80c
SF
4146 int err;
4147
4148 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
4149 IFLA_PROTINFO);
5111f80c
SF
4150 if (protinfo) {
4151 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
4152 if (attr) {
e7560582
TG
4153 if (nla_len(attr) < sizeof(u8))
4154 return -EINVAL;
4155
5111f80c
SF
4156 if (nla_get_u8(attr))
4157 rocker_port->brport_flags |= BR_LEARNING;
4158 else
4159 rocker_port->brport_flags &= ~BR_LEARNING;
4160 err = rocker_port_set_learning(rocker_port);
4161 if (err)
4162 return err;
4163 }
4164 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
4165 if (attr) {
e7560582
TG
4166 if (nla_len(attr) < sizeof(u8))
4167 return -EINVAL;
4168
5111f80c
SF
4169 if (nla_get_u8(attr))
4170 rocker_port->brport_flags |= BR_LEARNING_SYNC;
4171 else
4172 rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
4173 }
4174 }
4175
4176 return 0;
4177}
4178
4179static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4180 struct net_device *dev,
46c264da 4181 u32 filter_mask, int nlflags)
5111f80c
SF
4182{
4183 struct rocker_port *rocker_port = netdev_priv(dev);
1d460b98 4184 u16 mode = BRIDGE_MODE_UNDEF;
5111f80c
SF
4185 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
4186
4187 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
46c264da
ND
4188 rocker_port->brport_flags, mask,
4189 nlflags);
5111f80c
SF
4190}
4191
db19170b
DA
4192static int rocker_port_get_phys_port_name(struct net_device *dev,
4193 char *buf, size_t len)
4194{
4195 struct rocker_port *rocker_port = netdev_priv(dev);
4196 struct port_name name = { .buf = buf, .len = len };
4197 int err;
4198
4199 err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
4200 rocker_cmd_get_port_settings_prep, NULL,
4201 rocker_cmd_get_port_settings_phys_name_proc,
4202 &name, false);
4203
4204 return err ? -EOPNOTSUPP : 0;
4205}
4206
98237d43
SF
4207static const struct net_device_ops rocker_port_netdev_ops = {
4208 .ndo_open = rocker_port_open,
4209 .ndo_stop = rocker_port_stop,
4210 .ndo_start_xmit = rocker_port_xmit,
4211 .ndo_set_mac_address = rocker_port_set_mac_address,
4212 .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
4213 .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
4214 .ndo_fdb_add = rocker_port_fdb_add,
4215 .ndo_fdb_del = rocker_port_fdb_del,
4216 .ndo_fdb_dump = rocker_port_fdb_dump,
4217 .ndo_bridge_setlink = rocker_port_bridge_setlink,
4218 .ndo_bridge_getlink = rocker_port_bridge_getlink,
db19170b 4219 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
98237d43
SF
4220};
4221
4222/********************
4223 * swdev interface
4224 ********************/
4225
4226static int rocker_port_swdev_parent_id_get(struct net_device *dev,
4227 struct netdev_phys_item_id *psid)
4b8ac966
JP
4228{
4229 struct rocker_port *rocker_port = netdev_priv(dev);
4230 struct rocker *rocker = rocker_port->rocker;
4231
4232 psid->id_len = sizeof(rocker->hw.id);
4233 memcpy(&psid->id, &rocker->hw.id, psid->id_len);
4234 return 0;
4235}
4236
98237d43 4237static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
6c707945
SF
4238{
4239 struct rocker_port *rocker_port = netdev_priv(dev);
4240
4241 return rocker_port_stp_update(rocker_port, state);
4242}
4243
98237d43
SF
4244static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
4245 __be32 dst, int dst_len,
4246 struct fib_info *fi,
4247 u8 tos, u8 type,
4248 u32 nlflags, u32 tb_id)
c1beeef7
SF
4249{
4250 struct rocker_port *rocker_port = netdev_priv(dev);
4251 int flags = 0;
4252
4253 return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
4254 fi, tb_id, flags);
4255}
4256
98237d43
SF
4257static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
4258 __be32 dst, int dst_len,
4259 struct fib_info *fi,
4260 u8 tos, u8 type, u32 tb_id)
c1beeef7
SF
4261{
4262 struct rocker_port *rocker_port = netdev_priv(dev);
4263 int flags = ROCKER_OP_FLAG_REMOVE;
4264
4265 return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
4266 fi, tb_id, flags);
4267}
4268
98237d43
SF
4269static const struct swdev_ops rocker_port_swdev_ops = {
4270 .swdev_parent_id_get = rocker_port_swdev_parent_id_get,
4271 .swdev_port_stp_update = rocker_port_swdev_port_stp_update,
4272 .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add,
4273 .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del,
4b8ac966
JP
4274};
4275
4276/********************
4277 * ethtool interface
4278 ********************/
4279
4280static int rocker_port_get_settings(struct net_device *dev,
4281 struct ethtool_cmd *ecmd)
4282{
4283 struct rocker_port *rocker_port = netdev_priv(dev);
4284
4285 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4286}
4287
4288static int rocker_port_set_settings(struct net_device *dev,
4289 struct ethtool_cmd *ecmd)
4290{
4291 struct rocker_port *rocker_port = netdev_priv(dev);
4292
4293 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4294}
4295
4296static void rocker_port_get_drvinfo(struct net_device *dev,
4297 struct ethtool_drvinfo *drvinfo)
4298{
4299 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4300 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4301}
4302
9766e97a
DA
4303static struct rocker_port_stats {
4304 char str[ETH_GSTRING_LEN];
4305 int type;
4306} rocker_port_stats[] = {
4307 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4308 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4309 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4310 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4311
4312 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4313 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4314 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4315 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4316};
4317
4318#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4319
4320static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4321 u8 *data)
4322{
4323 u8 *p = data;
4324 int i;
4325
4326 switch (stringset) {
4327 case ETH_SS_STATS:
4328 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4329 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4330 p += ETH_GSTRING_LEN;
4331 }
4332 break;
4333 }
4334}
4335
4336static int
4337rocker_cmd_get_port_stats_prep(struct rocker *rocker,
4338 struct rocker_port *rocker_port,
4339 struct rocker_desc_info *desc_info,
4340 void *priv)
4341{
4342 struct rocker_tlv *cmd_stats;
4343
4344 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4345 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4346 return -EMSGSIZE;
4347
4348 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4349 if (!cmd_stats)
4350 return -EMSGSIZE;
4351
4a6bb6d3
SF
4352 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4353 rocker_port->pport))
9766e97a
DA
4354 return -EMSGSIZE;
4355
4356 rocker_tlv_nest_end(desc_info, cmd_stats);
4357
4358 return 0;
4359}
4360
4361static int
4362rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
4363 struct rocker_port *rocker_port,
4364 struct rocker_desc_info *desc_info,
4365 void *priv)
4366{
4367 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4368 struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4369 struct rocker_tlv *pattr;
4a6bb6d3 4370 u32 pport;
9766e97a
DA
4371 u64 *data = priv;
4372 int i;
4373
4374 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4375
4376 if (!attrs[ROCKER_TLV_CMD_INFO])
4377 return -EIO;
4378
4379 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4380 attrs[ROCKER_TLV_CMD_INFO]);
4381
4a6bb6d3 4382 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
9766e97a
DA
4383 return -EIO;
4384
4a6bb6d3
SF
4385 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4386 if (pport != rocker_port->pport)
9766e97a
DA
4387 return -EIO;
4388
4389 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4390 pattr = stats_attrs[rocker_port_stats[i].type];
4391 if (!pattr)
4392 continue;
4393
4394 data[i] = rocker_tlv_get_u64(pattr);
4395 }
4396
4397 return 0;
4398}
4399
4400static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4401 void *priv)
4402{
4403 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
4404 rocker_cmd_get_port_stats_prep, NULL,
4405 rocker_cmd_get_port_stats_ethtool_proc,
4406 priv, false);
4407}
4408
4409static void rocker_port_get_stats(struct net_device *dev,
4410 struct ethtool_stats *stats, u64 *data)
4411{
4412 struct rocker_port *rocker_port = netdev_priv(dev);
4413
4414 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4415 int i;
4416
4417 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4418 data[i] = 0;
4419 }
4420
4421 return;
4422}
4423
4424static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4425{
4426 switch (sset) {
4427 case ETH_SS_STATS:
4428 return ROCKER_PORT_STATS_LEN;
4429 default:
4430 return -EOPNOTSUPP;
4431 }
4432}
4433
4b8ac966
JP
4434static const struct ethtool_ops rocker_port_ethtool_ops = {
4435 .get_settings = rocker_port_get_settings,
4436 .set_settings = rocker_port_set_settings,
4437 .get_drvinfo = rocker_port_get_drvinfo,
4438 .get_link = ethtool_op_get_link,
9766e97a
DA
4439 .get_strings = rocker_port_get_strings,
4440 .get_ethtool_stats = rocker_port_get_stats,
4441 .get_sset_count = rocker_port_get_sset_count,
4b8ac966
JP
4442};
4443
4444/*****************
4445 * NAPI interface
4446 *****************/
4447
4448static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4449{
4450 return container_of(napi, struct rocker_port, napi_tx);
4451}
4452
4453static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4454{
4455 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4456 struct rocker *rocker = rocker_port->rocker;
4457 struct rocker_desc_info *desc_info;
4458 u32 credits = 0;
4459 int err;
4460
4461 /* Cleanup tx descriptors */
4462 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
f2bbca51
DA
4463 struct sk_buff *skb;
4464
4b8ac966
JP
4465 err = rocker_desc_err(desc_info);
4466 if (err && net_ratelimit())
4467 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4468 err);
4469 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
f2bbca51
DA
4470
4471 skb = rocker_desc_cookie_ptr_get(desc_info);
4472 if (err == 0) {
4473 rocker_port->dev->stats.tx_packets++;
4474 rocker_port->dev->stats.tx_bytes += skb->len;
4475 } else
4476 rocker_port->dev->stats.tx_errors++;
4477
4478 dev_kfree_skb_any(skb);
4b8ac966
JP
4479 credits++;
4480 }
4481
4482 if (credits && netif_queue_stopped(rocker_port->dev))
4483 netif_wake_queue(rocker_port->dev);
4484
4485 napi_complete(napi);
4486 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4487
4488 return 0;
4489}
4490
4491static int rocker_port_rx_proc(struct rocker *rocker,
4492 struct rocker_port *rocker_port,
4493 struct rocker_desc_info *desc_info)
4494{
4495 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4496 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4497 size_t rx_len;
4498
4499 if (!skb)
4500 return -ENOENT;
4501
4502 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4503 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4504 return -EINVAL;
4505
4506 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4507
4508 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4509 skb_put(skb, rx_len);
4510 skb->protocol = eth_type_trans(skb, rocker_port->dev);
f2bbca51
DA
4511
4512 rocker_port->dev->stats.rx_packets++;
4513 rocker_port->dev->stats.rx_bytes += skb->len;
4514
4b8ac966
JP
4515 netif_receive_skb(skb);
4516
4517 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
4518}
4519
4520static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4521{
4522 return container_of(napi, struct rocker_port, napi_rx);
4523}
4524
4525static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4526{
4527 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4528 struct rocker *rocker = rocker_port->rocker;
4529 struct rocker_desc_info *desc_info;
4530 u32 credits = 0;
4531 int err;
4532
4533 /* Process rx descriptors */
4534 while (credits < budget &&
4535 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4536 err = rocker_desc_err(desc_info);
4537 if (err) {
4538 if (net_ratelimit())
4539 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4540 err);
4541 } else {
4542 err = rocker_port_rx_proc(rocker, rocker_port,
4543 desc_info);
4544 if (err && net_ratelimit())
4545 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4546 err);
4547 }
f2bbca51
DA
4548 if (err)
4549 rocker_port->dev->stats.rx_errors++;
4550
4b8ac966
JP
4551 rocker_desc_gen_clear(desc_info);
4552 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4553 credits++;
4554 }
4555
4556 if (credits < budget)
4557 napi_complete(napi);
4558
4559 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4560
4561 return credits;
4562}
4563
4564/*****************
4565 * PCI driver ops
4566 *****************/
4567
4568static void rocker_carrier_init(struct rocker_port *rocker_port)
4569{
4570 struct rocker *rocker = rocker_port->rocker;
4571 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4572 bool link_up;
4573
4a6bb6d3 4574 link_up = link_status & (1 << rocker_port->pport);
4b8ac966
JP
4575 if (link_up)
4576 netif_carrier_on(rocker_port->dev);
4577 else
4578 netif_carrier_off(rocker_port->dev);
4579}
4580
4581static void rocker_remove_ports(struct rocker *rocker)
4582{
9f6bbf7c 4583 struct rocker_port *rocker_port;
4b8ac966
JP
4584 int i;
4585
9f6bbf7c
SF
4586 for (i = 0; i < rocker->port_count; i++) {
4587 rocker_port = rocker->ports[i];
4588 rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
4589 unregister_netdev(rocker_port->dev);
4590 }
4b8ac966
JP
4591 kfree(rocker->ports);
4592}
4593
4594static void rocker_port_dev_addr_init(struct rocker *rocker,
4595 struct rocker_port *rocker_port)
4596{
4597 struct pci_dev *pdev = rocker->pdev;
4598 int err;
4599
4600 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4601 rocker_port->dev->dev_addr);
4602 if (err) {
4603 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4604 eth_hw_addr_random(rocker_port->dev);
4605 }
4606}
4607
4608static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4609{
4610 struct pci_dev *pdev = rocker->pdev;
4611 struct rocker_port *rocker_port;
4612 struct net_device *dev;
4613 int err;
4614
4615 dev = alloc_etherdev(sizeof(struct rocker_port));
4616 if (!dev)
4617 return -ENOMEM;
4618 rocker_port = netdev_priv(dev);
4619 rocker_port->dev = dev;
4620 rocker_port->rocker = rocker;
4621 rocker_port->port_number = port_number;
4a6bb6d3 4622 rocker_port->pport = port_number + 1;
5111f80c 4623 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4b8ac966
JP
4624
4625 rocker_port_dev_addr_init(rocker, rocker_port);
4626 dev->netdev_ops = &rocker_port_netdev_ops;
4627 dev->ethtool_ops = &rocker_port_ethtool_ops;
98237d43 4628 dev->swdev_ops = &rocker_port_swdev_ops;
4b8ac966
JP
4629 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4630 NAPI_POLL_WEIGHT);
4631 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4632 NAPI_POLL_WEIGHT);
4633 rocker_carrier_init(rocker_port);
4634
c1beeef7
SF
4635 dev->features |= NETIF_F_NETNS_LOCAL |
4636 NETIF_F_HW_VLAN_CTAG_FILTER |
4637 NETIF_F_HW_SWITCH_OFFLOAD;
4b8ac966
JP
4638
4639 err = register_netdev(dev);
4640 if (err) {
4641 dev_err(&pdev->dev, "register_netdev failed\n");
4642 goto err_register_netdev;
4643 }
4644 rocker->ports[port_number] = rocker_port;
4645
5111f80c
SF
4646 rocker_port_set_learning(rocker_port);
4647
9f6bbf7c
SF
4648 rocker_port->internal_vlan_id =
4649 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4650 err = rocker_port_ig_tbl(rocker_port, 0);
4651 if (err) {
4652 dev_err(&pdev->dev, "install ig port table failed\n");
4653 goto err_port_ig_tbl;
4654 }
4655
4b8ac966
JP
4656 return 0;
4657
9f6bbf7c
SF
4658err_port_ig_tbl:
4659 unregister_netdev(dev);
4b8ac966
JP
4660err_register_netdev:
4661 free_netdev(dev);
4662 return err;
4663}
4664
4665static int rocker_probe_ports(struct rocker *rocker)
4666{
4667 int i;
4668 size_t alloc_size;
4669 int err;
4670
4671 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4672 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
e65ad3be
DC
4673 if (!rocker->ports)
4674 return -ENOMEM;
4b8ac966
JP
4675 for (i = 0; i < rocker->port_count; i++) {
4676 err = rocker_probe_port(rocker, i);
4677 if (err)
4678 goto remove_ports;
4679 }
4680 return 0;
4681
4682remove_ports:
4683 rocker_remove_ports(rocker);
4684 return err;
4685}
4686
4687static int rocker_msix_init(struct rocker *rocker)
4688{
4689 struct pci_dev *pdev = rocker->pdev;
4690 int msix_entries;
4691 int i;
4692 int err;
4693
4694 msix_entries = pci_msix_vec_count(pdev);
4695 if (msix_entries < 0)
4696 return msix_entries;
4697
4698 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4699 return -EINVAL;
4700
4701 rocker->msix_entries = kmalloc_array(msix_entries,
4702 sizeof(struct msix_entry),
4703 GFP_KERNEL);
4704 if (!rocker->msix_entries)
4705 return -ENOMEM;
4706
4707 for (i = 0; i < msix_entries; i++)
4708 rocker->msix_entries[i].entry = i;
4709
4710 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4711 if (err < 0)
4712 goto err_enable_msix;
4713
4714 return 0;
4715
4716err_enable_msix:
4717 kfree(rocker->msix_entries);
4718 return err;
4719}
4720
4721static void rocker_msix_fini(struct rocker *rocker)
4722{
4723 pci_disable_msix(rocker->pdev);
4724 kfree(rocker->msix_entries);
4725}
4726
4727static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4728{
4729 struct rocker *rocker;
4730 int err;
4731
4732 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4733 if (!rocker)
4734 return -ENOMEM;
4735
4736 err = pci_enable_device(pdev);
4737 if (err) {
4738 dev_err(&pdev->dev, "pci_enable_device failed\n");
4739 goto err_pci_enable_device;
4740 }
4741
4742 err = pci_request_regions(pdev, rocker_driver_name);
4743 if (err) {
4744 dev_err(&pdev->dev, "pci_request_regions failed\n");
4745 goto err_pci_request_regions;
4746 }
4747
4748 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4749 if (!err) {
4750 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4751 if (err) {
4752 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4753 goto err_pci_set_dma_mask;
4754 }
4755 } else {
4756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4757 if (err) {
4758 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4759 goto err_pci_set_dma_mask;
4760 }
4761 }
4762
4763 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4764 dev_err(&pdev->dev, "invalid PCI region size\n");
3122a92e 4765 err = -EINVAL;
4b8ac966
JP
4766 goto err_pci_resource_len_check;
4767 }
4768
4769 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4770 pci_resource_len(pdev, 0));
4771 if (!rocker->hw_addr) {
4772 dev_err(&pdev->dev, "ioremap failed\n");
4773 err = -EIO;
4774 goto err_ioremap;
4775 }
4776 pci_set_master(pdev);
4777
4778 rocker->pdev = pdev;
4779 pci_set_drvdata(pdev, rocker);
4780
4781 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4782
4783 err = rocker_msix_init(rocker);
4784 if (err) {
4785 dev_err(&pdev->dev, "MSI-X init failed\n");
4786 goto err_msix_init;
4787 }
4788
4789 err = rocker_basic_hw_test(rocker);
4790 if (err) {
4791 dev_err(&pdev->dev, "basic hw test failed\n");
4792 goto err_basic_hw_test;
4793 }
4794
4795 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4796
4797 err = rocker_dma_rings_init(rocker);
4798 if (err)
4799 goto err_dma_rings_init;
4800
4801 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4802 rocker_cmd_irq_handler, 0,
4803 rocker_driver_name, rocker);
4804 if (err) {
4805 dev_err(&pdev->dev, "cannot assign cmd irq\n");
4806 goto err_request_cmd_irq;
4807 }
4808
4809 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
4810 rocker_event_irq_handler, 0,
4811 rocker_driver_name, rocker);
4812 if (err) {
4813 dev_err(&pdev->dev, "cannot assign event irq\n");
4814 goto err_request_event_irq;
4815 }
4816
4817 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4818
9f6bbf7c
SF
4819 err = rocker_init_tbls(rocker);
4820 if (err) {
4821 dev_err(&pdev->dev, "cannot init rocker tables\n");
4822 goto err_init_tbls;
4823 }
4824
4b8ac966
JP
4825 err = rocker_probe_ports(rocker);
4826 if (err) {
4827 dev_err(&pdev->dev, "failed to probe ports\n");
4828 goto err_probe_ports;
4829 }
4830
4831 dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
4832
4833 return 0;
4834
4835err_probe_ports:
9f6bbf7c
SF
4836 rocker_free_tbls(rocker);
4837err_init_tbls:
4b8ac966
JP
4838 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4839err_request_event_irq:
4840 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4841err_request_cmd_irq:
4842 rocker_dma_rings_fini(rocker);
4843err_dma_rings_init:
4844err_basic_hw_test:
4845 rocker_msix_fini(rocker);
4846err_msix_init:
4847 iounmap(rocker->hw_addr);
4848err_ioremap:
4849err_pci_resource_len_check:
4850err_pci_set_dma_mask:
4851 pci_release_regions(pdev);
4852err_pci_request_regions:
4853 pci_disable_device(pdev);
4854err_pci_enable_device:
4855 kfree(rocker);
4856 return err;
4857}
4858
4859static void rocker_remove(struct pci_dev *pdev)
4860{
4861 struct rocker *rocker = pci_get_drvdata(pdev);
4862
9f6bbf7c 4863 rocker_free_tbls(rocker);
4b8ac966
JP
4864 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4865 rocker_remove_ports(rocker);
4866 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4867 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4868 rocker_dma_rings_fini(rocker);
4869 rocker_msix_fini(rocker);
4870 iounmap(rocker->hw_addr);
4871 pci_release_regions(rocker->pdev);
4872 pci_disable_device(rocker->pdev);
4873 kfree(rocker);
4874}
4875
4876static struct pci_driver rocker_pci_driver = {
4877 .name = rocker_driver_name,
4878 .id_table = rocker_pci_id_table,
4879 .probe = rocker_probe,
4880 .remove = rocker_remove,
4881};
4882
6c707945
SF
4883/************************************
4884 * Net device notifier event handler
4885 ************************************/
4886
4887static bool rocker_port_dev_check(struct net_device *dev)
4888{
4889 return dev->netdev_ops == &rocker_port_netdev_ops;
4890}
4891
4892static int rocker_port_bridge_join(struct rocker_port *rocker_port,
4893 struct net_device *bridge)
4894{
4895 int err;
4896
4897 rocker_port_internal_vlan_id_put(rocker_port,
4898 rocker_port->dev->ifindex);
4899
4900 rocker_port->bridge_dev = bridge;
4901
4902 /* Use bridge internal VLAN ID for untagged pkts */
4903 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4904 if (err)
4905 return err;
4906 rocker_port->internal_vlan_id =
4907 rocker_port_internal_vlan_id_get(rocker_port,
4908 bridge->ifindex);
e47172ab 4909 return rocker_port_vlan(rocker_port, 0, 0);
6c707945
SF
4910}
4911
4912static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
4913{
4914 int err;
4915
4916 rocker_port_internal_vlan_id_put(rocker_port,
4917 rocker_port->bridge_dev->ifindex);
4918
4919 rocker_port->bridge_dev = NULL;
4920
4921 /* Use port internal VLAN ID for untagged pkts */
4922 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4923 if (err)
4924 return err;
4925 rocker_port->internal_vlan_id =
4926 rocker_port_internal_vlan_id_get(rocker_port,
4927 rocker_port->dev->ifindex);
4928 err = rocker_port_vlan(rocker_port, 0, 0);
e47172ab
SF
4929 if (err)
4930 return err;
4931
4932 if (rocker_port->dev->flags & IFF_UP)
4933 err = rocker_port_fwd_enable(rocker_port);
6c707945
SF
4934
4935 return err;
4936}
4937
4938static int rocker_port_master_changed(struct net_device *dev)
4939{
4940 struct rocker_port *rocker_port = netdev_priv(dev);
4941 struct net_device *master = netdev_master_upper_dev_get(dev);
4942 int err = 0;
4943
a6e95cc7
SH
4944 /* There are currently three cases handled here:
4945 * 1. Joining a bridge
4946 * 2. Leaving a previously joined bridge
4947 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
4948 * in which case nothing is done
4949 */
6c707945
SF
4950 if (master && master->rtnl_link_ops &&
4951 !strcmp(master->rtnl_link_ops->kind, "bridge"))
4952 err = rocker_port_bridge_join(rocker_port, master);
a6e95cc7 4953 else if (rocker_port_is_bridged(rocker_port))
6c707945
SF
4954 err = rocker_port_bridge_leave(rocker_port);
4955
4956 return err;
4957}
4958
4959static int rocker_netdevice_event(struct notifier_block *unused,
4960 unsigned long event, void *ptr)
4961{
4962 struct net_device *dev;
4963 int err;
4964
4965 switch (event) {
4966 case NETDEV_CHANGEUPPER:
4967 dev = netdev_notifier_info_to_dev(ptr);
4968 if (!rocker_port_dev_check(dev))
4969 return NOTIFY_DONE;
4970 err = rocker_port_master_changed(dev);
4971 if (err)
4972 netdev_warn(dev,
4973 "failed to reflect master change (err %d)\n",
4974 err);
4975 break;
4976 }
4977
4978 return NOTIFY_DONE;
4979}
4980
4981static struct notifier_block rocker_netdevice_nb __read_mostly = {
4982 .notifier_call = rocker_netdevice_event,
4983};
4984
c1beeef7
SF
4985/************************************
4986 * Net event notifier event handler
4987 ************************************/
4988
4989static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
4990{
4991 struct rocker_port *rocker_port = netdev_priv(dev);
4992 int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
4993 __be32 ip_addr = *(__be32 *)n->primary_key;
4994
4995 return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
4996}
4997
4998static int rocker_netevent_event(struct notifier_block *unused,
4999 unsigned long event, void *ptr)
5000{
5001 struct net_device *dev;
5002 struct neighbour *n = ptr;
5003 int err;
5004
5005 switch (event) {
5006 case NETEVENT_NEIGH_UPDATE:
5007 if (n->tbl != &arp_tbl)
5008 return NOTIFY_DONE;
5009 dev = n->dev;
5010 if (!rocker_port_dev_check(dev))
5011 return NOTIFY_DONE;
5012 err = rocker_neigh_update(dev, n);
5013 if (err)
5014 netdev_warn(dev,
5015 "failed to handle neigh update (err %d)\n",
5016 err);
5017 break;
5018 }
5019
5020 return NOTIFY_DONE;
5021}
5022
5023static struct notifier_block rocker_netevent_nb __read_mostly = {
5024 .notifier_call = rocker_netevent_event,
5025};
5026
4b8ac966
JP
5027/***********************
5028 * Module init and exit
5029 ***********************/
5030
5031static int __init rocker_module_init(void)
5032{
6c707945
SF
5033 int err;
5034
5035 register_netdevice_notifier(&rocker_netdevice_nb);
c1beeef7 5036 register_netevent_notifier(&rocker_netevent_nb);
6c707945
SF
5037 err = pci_register_driver(&rocker_pci_driver);
5038 if (err)
5039 goto err_pci_register_driver;
5040 return 0;
5041
5042err_pci_register_driver:
c1beeef7 5043 unregister_netdevice_notifier(&rocker_netevent_nb);
6c707945
SF
5044 unregister_netdevice_notifier(&rocker_netdevice_nb);
5045 return err;
4b8ac966
JP
5046}
5047
5048static void __exit rocker_module_exit(void)
5049{
c1beeef7 5050 unregister_netevent_notifier(&rocker_netevent_nb);
6c707945 5051 unregister_netdevice_notifier(&rocker_netdevice_nb);
4b8ac966
JP
5052 pci_unregister_driver(&rocker_pci_driver);
5053}
5054
5055module_init(rocker_module_init);
5056module_exit(rocker_module_exit);
5057
5058MODULE_LICENSE("GPL v2");
5059MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5060MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5061MODULE_DESCRIPTION("Rocker switch device driver");
5062MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);