]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/octeon/ethernet.c
Merge branch 'for-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[mirror_ubuntu-artful-kernel.git] / drivers / staging / octeon / ethernet.c
1 /*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
3 *
4 * Copyright (c) 2003-2007 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22
23 #include <net/dst.h>
24
25 #include <asm/octeon/octeon.h>
26
27 #include "ethernet-defines.h"
28 #include "octeon-ethernet.h"
29 #include "ethernet-mem.h"
30 #include "ethernet-rx.h"
31 #include "ethernet-tx.h"
32 #include "ethernet-mdio.h"
33 #include "ethernet-util.h"
34
35 #include <asm/octeon/cvmx-pip.h>
36 #include <asm/octeon/cvmx-pko.h>
37 #include <asm/octeon/cvmx-fau.h>
38 #include <asm/octeon/cvmx-ipd.h>
39 #include <asm/octeon/cvmx-helper.h>
40 #include <asm/octeon/cvmx-asxx-defs.h>
41 #include <asm/octeon/cvmx-gmxx-defs.h>
42 #include <asm/octeon/cvmx-smix-defs.h>
43
44 #define OCTEON_MAX_MTU 65392
45
46 static int num_packet_buffers = 1024;
47 module_param(num_packet_buffers, int, 0444);
48 MODULE_PARM_DESC(num_packet_buffers, "\n"
49 "\tNumber of packet buffers to allocate and store in the\n"
50 "\tFPA. By default, 1024 packet buffers are used.\n");
51
52 static int pow_receive_group = 15;
53 module_param(pow_receive_group, int, 0444);
54 MODULE_PARM_DESC(pow_receive_group, "\n"
55 "\tPOW group to receive packets from. All ethernet hardware\n"
56 "\twill be configured to send incoming packets to this POW\n"
57 "\tgroup. Also any other software can submit packets to this\n"
58 "\tgroup for the kernel to process.");
59
60 static int receive_group_order;
61 module_param(receive_group_order, int, 0444);
62 MODULE_PARM_DESC(receive_group_order, "\n"
63 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
64 "\twill be configured to send incoming packets to multiple POW\n"
65 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
66 "\tgroups are taken into use and groups are allocated starting\n"
67 "\tfrom 0. By default, a single group is used.\n");
68
69 int pow_send_group = -1;
70 module_param(pow_send_group, int, 0644);
71 MODULE_PARM_DESC(pow_send_group, "\n"
72 "\tPOW group to send packets to other software on. This\n"
73 "\tcontrols the creation of the virtual device pow0.\n"
74 "\talways_use_pow also depends on this value.");
75
76 int always_use_pow;
77 module_param(always_use_pow, int, 0444);
78 MODULE_PARM_DESC(always_use_pow, "\n"
79 "\tWhen set, always send to the pow group. This will cause\n"
80 "\tpackets sent to real ethernet devices to be sent to the\n"
81 "\tPOW group instead of the hardware. Unless some other\n"
82 "\tapplication changes the config, packets will still be\n"
83 "\treceived from the low level hardware. Use this option\n"
84 "\tto allow a CVMX app to intercept all packets from the\n"
85 "\tlinux kernel. You must specify pow_send_group along with\n"
86 "\tthis option.");
87
88 char pow_send_list[128] = "";
89 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
90 MODULE_PARM_DESC(pow_send_list, "\n"
91 "\tComma separated list of ethernet devices that should use the\n"
92 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
93 "\tis a per port version of always_use_pow. always_use_pow takes\n"
94 "\tprecedence over this list. For example, setting this to\n"
95 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
96 "\tusing the pow_send_group.");
97
98 int rx_napi_weight = 32;
99 module_param(rx_napi_weight, int, 0444);
100 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
101
102 /* Mask indicating which receive groups are in use. */
103 int pow_receive_groups;
104
105 /*
106 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
107 *
108 * Set to one right before cvm_oct_poll_queue is destroyed.
109 */
110 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
111
112 /*
113 * Array of every ethernet device owned by this driver indexed by
114 * the ipd input port number.
115 */
116 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
117
118 u64 cvm_oct_tx_poll_interval;
119
120 static void cvm_oct_rx_refill_worker(struct work_struct *work);
121 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
122
123 static void cvm_oct_rx_refill_worker(struct work_struct *work)
124 {
125 /*
126 * FPA 0 may have been drained, try to refill it if we need
127 * more than num_packet_buffers / 2, otherwise normal receive
128 * processing will refill it. If it were drained, no packets
129 * could be received so cvm_oct_napi_poll would never be
130 * invoked to do the refill.
131 */
132 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
133
134 if (!atomic_read(&cvm_oct_poll_queue_stopping))
135 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
136 }
137
138 static void cvm_oct_periodic_worker(struct work_struct *work)
139 {
140 struct octeon_ethernet *priv = container_of(work,
141 struct octeon_ethernet,
142 port_periodic_work.work);
143
144 if (priv->poll)
145 priv->poll(cvm_oct_device[priv->port]);
146
147 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
148 cvm_oct_device[priv->port]);
149
150 if (!atomic_read(&cvm_oct_poll_queue_stopping))
151 schedule_delayed_work(&priv->port_periodic_work, HZ);
152 }
153
154 static void cvm_oct_configure_common_hw(void)
155 {
156 /* Setup the FPA */
157 cvmx_fpa_enable();
158 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
159 num_packet_buffers);
160 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
161 num_packet_buffers);
162 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
163 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
164 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
165
166 #ifdef __LITTLE_ENDIAN
167 {
168 union cvmx_ipd_ctl_status ipd_ctl_status;
169
170 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
171 ipd_ctl_status.s.pkt_lend = 1;
172 ipd_ctl_status.s.wqe_lend = 1;
173 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
174 }
175 #endif
176
177 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
178 }
179
180 /**
181 * cvm_oct_free_work- Free a work queue entry
182 *
183 * @work_queue_entry: Work queue entry to free
184 *
185 * Returns Zero on success, Negative on failure.
186 */
187 int cvm_oct_free_work(void *work_queue_entry)
188 {
189 cvmx_wqe_t *work = work_queue_entry;
190
191 int segments = work->word2.s.bufs;
192 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
193
194 while (segments--) {
195 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
196 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
197 if (unlikely(!segment_ptr.s.i))
198 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
199 segment_ptr.s.pool,
200 CVMX_FPA_PACKET_POOL_SIZE / 128);
201 segment_ptr = next_ptr;
202 }
203 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
204
205 return 0;
206 }
207 EXPORT_SYMBOL(cvm_oct_free_work);
208
209 /**
210 * cvm_oct_common_get_stats - get the low level ethernet statistics
211 * @dev: Device to get the statistics from
212 *
213 * Returns Pointer to the statistics
214 */
215 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
216 {
217 cvmx_pip_port_status_t rx_status;
218 cvmx_pko_port_status_t tx_status;
219 struct octeon_ethernet *priv = netdev_priv(dev);
220
221 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
222 if (octeon_is_simulation()) {
223 /* The simulator doesn't support statistics */
224 memset(&rx_status, 0, sizeof(rx_status));
225 memset(&tx_status, 0, sizeof(tx_status));
226 } else {
227 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
228 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
229 }
230
231 priv->stats.rx_packets += rx_status.inb_packets;
232 priv->stats.tx_packets += tx_status.packets;
233 priv->stats.rx_bytes += rx_status.inb_octets;
234 priv->stats.tx_bytes += tx_status.octets;
235 priv->stats.multicast += rx_status.multicast_packets;
236 priv->stats.rx_crc_errors += rx_status.inb_errors;
237 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
238 priv->stats.rx_dropped += rx_status.dropped_packets;
239 }
240
241 return &priv->stats;
242 }
243
244 /**
245 * cvm_oct_common_change_mtu - change the link MTU
246 * @dev: Device to change
247 * @new_mtu: The new MTU
248 *
249 * Returns Zero on success
250 */
251 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
252 {
253 struct octeon_ethernet *priv = netdev_priv(dev);
254 int interface = INTERFACE(priv->port);
255 #if IS_ENABLED(CONFIG_VLAN_8021Q)
256 int vlan_bytes = VLAN_HLEN;
257 #else
258 int vlan_bytes = 0;
259 #endif
260 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
261
262 dev->mtu = new_mtu;
263
264 if ((interface < 2) &&
265 (cvmx_helper_interface_get_mode(interface) !=
266 CVMX_HELPER_INTERFACE_MODE_SPI)) {
267 int index = INDEX(priv->port);
268 /* Add ethernet header and FCS, and VLAN if configured. */
269 int max_packet = new_mtu + mtu_overhead;
270
271 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
272 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
273 /* Signal errors on packets larger than the MTU */
274 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
275 max_packet);
276 } else {
277 /*
278 * Set the hardware to truncate packets larger
279 * than the MTU and smaller the 64 bytes.
280 */
281 union cvmx_pip_frm_len_chkx frm_len_chk;
282
283 frm_len_chk.u64 = 0;
284 frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
285 frm_len_chk.s.maxlen = max_packet;
286 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
287 frm_len_chk.u64);
288 }
289 /*
290 * Set the hardware to truncate packets larger than
291 * the MTU. The jabber register must be set to a
292 * multiple of 8 bytes, so round up.
293 */
294 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
295 (max_packet + 7) & ~7u);
296 }
297 return 0;
298 }
299
300 /**
301 * cvm_oct_common_set_multicast_list - set the multicast list
302 * @dev: Device to work on
303 */
304 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
305 {
306 union cvmx_gmxx_prtx_cfg gmx_cfg;
307 struct octeon_ethernet *priv = netdev_priv(dev);
308 int interface = INTERFACE(priv->port);
309
310 if ((interface < 2) &&
311 (cvmx_helper_interface_get_mode(interface) !=
312 CVMX_HELPER_INTERFACE_MODE_SPI)) {
313 union cvmx_gmxx_rxx_adr_ctl control;
314 int index = INDEX(priv->port);
315
316 control.u64 = 0;
317 control.s.bcst = 1; /* Allow broadcast MAC addresses */
318
319 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
320 (dev->flags & IFF_PROMISC))
321 /* Force accept multicast packets */
322 control.s.mcst = 2;
323 else
324 /* Force reject multicast packets */
325 control.s.mcst = 1;
326
327 if (dev->flags & IFF_PROMISC)
328 /*
329 * Reject matches if promisc. Since CAM is
330 * shut off, should accept everything.
331 */
332 control.s.cam_mode = 0;
333 else
334 /* Filter packets based on the CAM */
335 control.s.cam_mode = 1;
336
337 gmx_cfg.u64 =
338 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
339 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
340 gmx_cfg.u64 & ~1ull);
341
342 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
343 control.u64);
344 if (dev->flags & IFF_PROMISC)
345 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
346 (index, interface), 0);
347 else
348 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
349 (index, interface), 1);
350
351 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
352 gmx_cfg.u64);
353 }
354 }
355
356 static int cvm_oct_set_mac_filter(struct net_device *dev)
357 {
358 struct octeon_ethernet *priv = netdev_priv(dev);
359 union cvmx_gmxx_prtx_cfg gmx_cfg;
360 int interface = INTERFACE(priv->port);
361
362 if ((interface < 2) &&
363 (cvmx_helper_interface_get_mode(interface) !=
364 CVMX_HELPER_INTERFACE_MODE_SPI)) {
365 int i;
366 u8 *ptr = dev->dev_addr;
367 u64 mac = 0;
368 int index = INDEX(priv->port);
369
370 for (i = 0; i < 6; i++)
371 mac = (mac << 8) | (u64)ptr[i];
372
373 gmx_cfg.u64 =
374 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
375 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
376 gmx_cfg.u64 & ~1ull);
377
378 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
379 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
380 ptr[0]);
381 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
382 ptr[1]);
383 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
384 ptr[2]);
385 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
386 ptr[3]);
387 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
388 ptr[4]);
389 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
390 ptr[5]);
391 cvm_oct_common_set_multicast_list(dev);
392 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
393 gmx_cfg.u64);
394 }
395 return 0;
396 }
397
398 /**
399 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
400 * @dev: The device in question.
401 * @addr: Socket address.
402 *
403 * Returns Zero on success
404 */
405 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
406 {
407 int r = eth_mac_addr(dev, addr);
408
409 if (r)
410 return r;
411 return cvm_oct_set_mac_filter(dev);
412 }
413
414 /**
415 * cvm_oct_common_init - per network device initialization
416 * @dev: Device to initialize
417 *
418 * Returns Zero on success
419 */
420 int cvm_oct_common_init(struct net_device *dev)
421 {
422 struct octeon_ethernet *priv = netdev_priv(dev);
423 const u8 *mac = NULL;
424
425 if (priv->of_node)
426 mac = of_get_mac_address(priv->of_node);
427
428 if (mac)
429 ether_addr_copy(dev->dev_addr, mac);
430 else
431 eth_hw_addr_random(dev);
432
433 /*
434 * Force the interface to use the POW send if always_use_pow
435 * was specified or it is in the pow send list.
436 */
437 if ((pow_send_group != -1) &&
438 (always_use_pow || strstr(pow_send_list, dev->name)))
439 priv->queue = -1;
440
441 if (priv->queue != -1)
442 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
443
444 /* We do our own locking, Linux doesn't need to */
445 dev->features |= NETIF_F_LLTX;
446 dev->ethtool_ops = &cvm_oct_ethtool_ops;
447
448 cvm_oct_set_mac_filter(dev);
449 dev_set_mtu(dev, dev->mtu);
450
451 /*
452 * Zero out stats for port so we won't mistakenly show
453 * counters from the bootloader.
454 */
455 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
456 sizeof(struct net_device_stats));
457
458 if (dev->netdev_ops->ndo_stop)
459 dev->netdev_ops->ndo_stop(dev);
460
461 return 0;
462 }
463
464 void cvm_oct_common_uninit(struct net_device *dev)
465 {
466 if (dev->phydev)
467 phy_disconnect(dev->phydev);
468 }
469
470 int cvm_oct_common_open(struct net_device *dev,
471 void (*link_poll)(struct net_device *))
472 {
473 union cvmx_gmxx_prtx_cfg gmx_cfg;
474 struct octeon_ethernet *priv = netdev_priv(dev);
475 int interface = INTERFACE(priv->port);
476 int index = INDEX(priv->port);
477 cvmx_helper_link_info_t link_info;
478 int rv;
479
480 rv = cvm_oct_phy_setup_device(dev);
481 if (rv)
482 return rv;
483
484 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
485 gmx_cfg.s.en = 1;
486 if (octeon_has_feature(OCTEON_FEATURE_PKND))
487 gmx_cfg.s.pknd = priv->port;
488 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
489
490 if (octeon_is_simulation())
491 return 0;
492
493 if (dev->phydev) {
494 int r = phy_read_status(dev->phydev);
495
496 if (r == 0 && dev->phydev->link == 0)
497 netif_carrier_off(dev);
498 cvm_oct_adjust_link(dev);
499 } else {
500 link_info = cvmx_helper_link_get(priv->port);
501 if (!link_info.s.link_up)
502 netif_carrier_off(dev);
503 priv->poll = link_poll;
504 link_poll(dev);
505 }
506
507 return 0;
508 }
509
510 void cvm_oct_link_poll(struct net_device *dev)
511 {
512 struct octeon_ethernet *priv = netdev_priv(dev);
513 cvmx_helper_link_info_t link_info;
514
515 link_info = cvmx_helper_link_get(priv->port);
516 if (link_info.u64 == priv->link_info)
517 return;
518
519 if (cvmx_helper_link_set(priv->port, link_info))
520 link_info.u64 = priv->link_info;
521 else
522 priv->link_info = link_info.u64;
523
524 if (link_info.s.link_up) {
525 if (!netif_carrier_ok(dev))
526 netif_carrier_on(dev);
527 } else if (netif_carrier_ok(dev)) {
528 netif_carrier_off(dev);
529 }
530 cvm_oct_note_carrier(priv, link_info);
531 }
532
533 static int cvm_oct_xaui_open(struct net_device *dev)
534 {
535 return cvm_oct_common_open(dev, cvm_oct_link_poll);
536 }
537
538 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
539 .ndo_init = cvm_oct_common_init,
540 .ndo_uninit = cvm_oct_common_uninit,
541 .ndo_start_xmit = cvm_oct_xmit,
542 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
543 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
544 .ndo_do_ioctl = cvm_oct_ioctl,
545 .ndo_change_mtu = cvm_oct_common_change_mtu,
546 .ndo_get_stats = cvm_oct_common_get_stats,
547 #ifdef CONFIG_NET_POLL_CONTROLLER
548 .ndo_poll_controller = cvm_oct_poll_controller,
549 #endif
550 };
551
552 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
553 .ndo_init = cvm_oct_common_init,
554 .ndo_uninit = cvm_oct_common_uninit,
555 .ndo_open = cvm_oct_xaui_open,
556 .ndo_stop = cvm_oct_common_stop,
557 .ndo_start_xmit = cvm_oct_xmit,
558 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
559 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
560 .ndo_do_ioctl = cvm_oct_ioctl,
561 .ndo_change_mtu = cvm_oct_common_change_mtu,
562 .ndo_get_stats = cvm_oct_common_get_stats,
563 #ifdef CONFIG_NET_POLL_CONTROLLER
564 .ndo_poll_controller = cvm_oct_poll_controller,
565 #endif
566 };
567
568 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
569 .ndo_init = cvm_oct_sgmii_init,
570 .ndo_uninit = cvm_oct_common_uninit,
571 .ndo_open = cvm_oct_sgmii_open,
572 .ndo_stop = cvm_oct_common_stop,
573 .ndo_start_xmit = cvm_oct_xmit,
574 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
575 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
576 .ndo_do_ioctl = cvm_oct_ioctl,
577 .ndo_change_mtu = cvm_oct_common_change_mtu,
578 .ndo_get_stats = cvm_oct_common_get_stats,
579 #ifdef CONFIG_NET_POLL_CONTROLLER
580 .ndo_poll_controller = cvm_oct_poll_controller,
581 #endif
582 };
583
584 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
585 .ndo_init = cvm_oct_spi_init,
586 .ndo_uninit = cvm_oct_spi_uninit,
587 .ndo_start_xmit = cvm_oct_xmit,
588 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
589 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
590 .ndo_do_ioctl = cvm_oct_ioctl,
591 .ndo_change_mtu = cvm_oct_common_change_mtu,
592 .ndo_get_stats = cvm_oct_common_get_stats,
593 #ifdef CONFIG_NET_POLL_CONTROLLER
594 .ndo_poll_controller = cvm_oct_poll_controller,
595 #endif
596 };
597
598 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
599 .ndo_init = cvm_oct_common_init,
600 .ndo_uninit = cvm_oct_common_uninit,
601 .ndo_open = cvm_oct_rgmii_open,
602 .ndo_stop = cvm_oct_common_stop,
603 .ndo_start_xmit = cvm_oct_xmit,
604 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
605 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
606 .ndo_do_ioctl = cvm_oct_ioctl,
607 .ndo_change_mtu = cvm_oct_common_change_mtu,
608 .ndo_get_stats = cvm_oct_common_get_stats,
609 #ifdef CONFIG_NET_POLL_CONTROLLER
610 .ndo_poll_controller = cvm_oct_poll_controller,
611 #endif
612 };
613
614 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
615 .ndo_init = cvm_oct_common_init,
616 .ndo_start_xmit = cvm_oct_xmit_pow,
617 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
618 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
619 .ndo_do_ioctl = cvm_oct_ioctl,
620 .ndo_change_mtu = cvm_oct_common_change_mtu,
621 .ndo_get_stats = cvm_oct_common_get_stats,
622 #ifdef CONFIG_NET_POLL_CONTROLLER
623 .ndo_poll_controller = cvm_oct_poll_controller,
624 #endif
625 };
626
627 static struct device_node *cvm_oct_of_get_child(
628 const struct device_node *parent, int reg_val)
629 {
630 struct device_node *node = NULL;
631 int size;
632 const __be32 *addr;
633
634 for (;;) {
635 node = of_get_next_child(parent, node);
636 if (!node)
637 break;
638 addr = of_get_property(node, "reg", &size);
639 if (addr && (be32_to_cpu(*addr) == reg_val))
640 break;
641 }
642 return node;
643 }
644
645 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
646 int interface, int port)
647 {
648 struct device_node *ni, *np;
649
650 ni = cvm_oct_of_get_child(pip, interface);
651 if (!ni)
652 return NULL;
653
654 np = cvm_oct_of_get_child(ni, port);
655 of_node_put(ni);
656
657 return np;
658 }
659
660 static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
661 {
662 u32 delay_value;
663
664 if (!of_property_read_u32(np, "rx-delay", &delay_value))
665 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
666 if (!of_property_read_u32(np, "tx-delay", &delay_value))
667 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
668 }
669
670 static int cvm_oct_probe(struct platform_device *pdev)
671 {
672 int num_interfaces;
673 int interface;
674 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
675 int qos;
676 struct device_node *pip;
677 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
678
679 #if IS_ENABLED(CONFIG_VLAN_8021Q)
680 mtu_overhead += VLAN_HLEN;
681 #endif
682
683 octeon_mdiobus_force_mod_depencency();
684
685 pip = pdev->dev.of_node;
686 if (!pip) {
687 pr_err("Error: No 'pip' in /aliases\n");
688 return -EINVAL;
689 }
690
691 cvm_oct_configure_common_hw();
692
693 cvmx_helper_initialize_packet_io_global();
694
695 if (receive_group_order) {
696 if (receive_group_order > 4)
697 receive_group_order = 4;
698 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
699 } else {
700 pow_receive_groups = BIT(pow_receive_group);
701 }
702
703 /* Change the input group for all ports before input is enabled */
704 num_interfaces = cvmx_helper_get_number_of_interfaces();
705 for (interface = 0; interface < num_interfaces; interface++) {
706 int num_ports = cvmx_helper_ports_on_interface(interface);
707 int port;
708
709 for (port = cvmx_helper_get_ipd_port(interface, 0);
710 port < cvmx_helper_get_ipd_port(interface, num_ports);
711 port++) {
712 union cvmx_pip_prt_tagx pip_prt_tagx;
713
714 pip_prt_tagx.u64 =
715 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
716
717 if (receive_group_order) {
718 int tag_mask;
719
720 /* We support only 16 groups at the moment, so
721 * always disable the two additional "hidden"
722 * tag_mask bits on CN68XX.
723 */
724 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
725 pip_prt_tagx.u64 |= 0x3ull << 44;
726
727 tag_mask = ~((1 << receive_group_order) - 1);
728 pip_prt_tagx.s.grptagbase = 0;
729 pip_prt_tagx.s.grptagmask = tag_mask;
730 pip_prt_tagx.s.grptag = 1;
731 pip_prt_tagx.s.tag_mode = 0;
732 pip_prt_tagx.s.inc_prt_flag = 1;
733 pip_prt_tagx.s.ip6_dprt_flag = 1;
734 pip_prt_tagx.s.ip4_dprt_flag = 1;
735 pip_prt_tagx.s.ip6_sprt_flag = 1;
736 pip_prt_tagx.s.ip4_sprt_flag = 1;
737 pip_prt_tagx.s.ip6_dst_flag = 1;
738 pip_prt_tagx.s.ip4_dst_flag = 1;
739 pip_prt_tagx.s.ip6_src_flag = 1;
740 pip_prt_tagx.s.ip4_src_flag = 1;
741 pip_prt_tagx.s.grp = 0;
742 } else {
743 pip_prt_tagx.s.grptag = 0;
744 pip_prt_tagx.s.grp = pow_receive_group;
745 }
746
747 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
748 pip_prt_tagx.u64);
749 }
750 }
751
752 cvmx_helper_ipd_and_packet_input_enable();
753
754 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
755
756 /*
757 * Initialize the FAU used for counting packet buffers that
758 * need to be freed.
759 */
760 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
761
762 /* Initialize the FAU used for counting tx SKBs that need to be freed */
763 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
764
765 if ((pow_send_group != -1)) {
766 struct net_device *dev;
767
768 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
769 if (dev) {
770 /* Initialize the device private structure. */
771 struct octeon_ethernet *priv = netdev_priv(dev);
772
773 SET_NETDEV_DEV(dev, &pdev->dev);
774 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
775 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
776 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
777 priv->queue = -1;
778 strcpy(dev->name, "pow%d");
779 for (qos = 0; qos < 16; qos++)
780 skb_queue_head_init(&priv->tx_free_list[qos]);
781 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
782 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
783
784 if (register_netdev(dev) < 0) {
785 pr_err("Failed to register ethernet device for POW\n");
786 free_netdev(dev);
787 } else {
788 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
789 pr_info("%s: POW send group %d, receive group %d\n",
790 dev->name, pow_send_group,
791 pow_receive_group);
792 }
793 } else {
794 pr_err("Failed to allocate ethernet device for POW\n");
795 }
796 }
797
798 num_interfaces = cvmx_helper_get_number_of_interfaces();
799 for (interface = 0; interface < num_interfaces; interface++) {
800 cvmx_helper_interface_mode_t imode =
801 cvmx_helper_interface_get_mode(interface);
802 int num_ports = cvmx_helper_ports_on_interface(interface);
803 int port;
804 int port_index;
805
806 for (port_index = 0,
807 port = cvmx_helper_get_ipd_port(interface, 0);
808 port < cvmx_helper_get_ipd_port(interface, num_ports);
809 port_index++, port++) {
810 struct octeon_ethernet *priv;
811 struct net_device *dev =
812 alloc_etherdev(sizeof(struct octeon_ethernet));
813 if (!dev) {
814 pr_err("Failed to allocate ethernet device for port %d\n",
815 port);
816 continue;
817 }
818
819 /* Initialize the device private structure. */
820 SET_NETDEV_DEV(dev, &pdev->dev);
821 priv = netdev_priv(dev);
822 priv->netdev = dev;
823 priv->of_node = cvm_oct_node_for_port(pip, interface,
824 port_index);
825
826 INIT_DELAYED_WORK(&priv->port_periodic_work,
827 cvm_oct_periodic_worker);
828 priv->imode = imode;
829 priv->port = port;
830 priv->queue = cvmx_pko_get_base_queue(priv->port);
831 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
832 for (qos = 0; qos < 16; qos++)
833 skb_queue_head_init(&priv->tx_free_list[qos]);
834 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
835 qos++)
836 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
837 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
838 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
839
840 switch (priv->imode) {
841 /* These types don't support ports to IPD/PKO */
842 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
843 case CVMX_HELPER_INTERFACE_MODE_PCIE:
844 case CVMX_HELPER_INTERFACE_MODE_PICMG:
845 break;
846
847 case CVMX_HELPER_INTERFACE_MODE_NPI:
848 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
849 strcpy(dev->name, "npi%d");
850 break;
851
852 case CVMX_HELPER_INTERFACE_MODE_XAUI:
853 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
854 strcpy(dev->name, "xaui%d");
855 break;
856
857 case CVMX_HELPER_INTERFACE_MODE_LOOP:
858 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
859 strcpy(dev->name, "loop%d");
860 break;
861
862 case CVMX_HELPER_INTERFACE_MODE_SGMII:
863 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
864 strcpy(dev->name, "eth%d");
865 break;
866
867 case CVMX_HELPER_INTERFACE_MODE_SPI:
868 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
869 strcpy(dev->name, "spi%d");
870 break;
871
872 case CVMX_HELPER_INTERFACE_MODE_RGMII:
873 case CVMX_HELPER_INTERFACE_MODE_GMII:
874 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
875 strcpy(dev->name, "eth%d");
876 cvm_set_rgmii_delay(priv->of_node, interface,
877 port_index);
878 break;
879 }
880
881 if (!dev->netdev_ops) {
882 free_netdev(dev);
883 } else if (register_netdev(dev) < 0) {
884 pr_err("Failed to register ethernet device for interface %d, port %d\n",
885 interface, priv->port);
886 free_netdev(dev);
887 } else {
888 cvm_oct_device[priv->port] = dev;
889 fau -=
890 cvmx_pko_get_num_queues(priv->port) *
891 sizeof(u32);
892 schedule_delayed_work(&priv->port_periodic_work, HZ);
893 }
894 }
895 }
896
897 cvm_oct_tx_initialize();
898 cvm_oct_rx_initialize();
899
900 /*
901 * 150 uS: about 10 1500-byte packets at 1GE.
902 */
903 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
904
905 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
906
907 return 0;
908 }
909
910 static int cvm_oct_remove(struct platform_device *pdev)
911 {
912 int port;
913
914 cvmx_ipd_disable();
915
916 atomic_inc_return(&cvm_oct_poll_queue_stopping);
917 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
918
919 cvm_oct_rx_shutdown();
920 cvm_oct_tx_shutdown();
921
922 cvmx_pko_disable();
923
924 /* Free the ethernet devices */
925 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
926 if (cvm_oct_device[port]) {
927 struct net_device *dev = cvm_oct_device[port];
928 struct octeon_ethernet *priv = netdev_priv(dev);
929
930 cancel_delayed_work_sync(&priv->port_periodic_work);
931
932 cvm_oct_tx_shutdown_dev(dev);
933 unregister_netdev(dev);
934 free_netdev(dev);
935 cvm_oct_device[port] = NULL;
936 }
937 }
938
939 cvmx_pko_shutdown();
940
941 cvmx_ipd_free_ptr();
942
943 /* Free the HW pools */
944 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
945 num_packet_buffers);
946 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
947 num_packet_buffers);
948 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
949 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
950 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
951 return 0;
952 }
953
954 static const struct of_device_id cvm_oct_match[] = {
955 {
956 .compatible = "cavium,octeon-3860-pip",
957 },
958 {},
959 };
960 MODULE_DEVICE_TABLE(of, cvm_oct_match);
961
962 static struct platform_driver cvm_oct_driver = {
963 .probe = cvm_oct_probe,
964 .remove = cvm_oct_remove,
965 .driver = {
966 .name = KBUILD_MODNAME,
967 .of_match_table = cvm_oct_match,
968 },
969 };
970
971 module_platform_driver(cvm_oct_driver);
972
973 MODULE_LICENSE("GPL");
974 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
975 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");