]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/octeon/ethernet.c
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / octeon / ethernet.c
1 /**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/platform_device.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/phy.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/of_net.h>
36
37 #include <net/dst.h>
38
39 #include <asm/octeon/octeon.h>
40
41 #include "ethernet-defines.h"
42 #include "octeon-ethernet.h"
43 #include "ethernet-mem.h"
44 #include "ethernet-rx.h"
45 #include "ethernet-tx.h"
46 #include "ethernet-mdio.h"
47 #include "ethernet-util.h"
48
49 #include <asm/octeon/cvmx-pip.h>
50 #include <asm/octeon/cvmx-pko.h>
51 #include <asm/octeon/cvmx-fau.h>
52 #include <asm/octeon/cvmx-ipd.h>
53 #include <asm/octeon/cvmx-helper.h>
54
55 #include <asm/octeon/cvmx-gmxx-defs.h>
56 #include <asm/octeon/cvmx-smix-defs.h>
57
58 static int num_packet_buffers = 1024;
59 module_param(num_packet_buffers, int, 0444);
60 MODULE_PARM_DESC(num_packet_buffers, "\n"
61 "\tNumber of packet buffers to allocate and store in the\n"
62 "\tFPA. By default, 1024 packet buffers are used.\n");
63
64 int pow_receive_group = 15;
65 module_param(pow_receive_group, int, 0444);
66 MODULE_PARM_DESC(pow_receive_group, "\n"
67 "\tPOW group to receive packets from. All ethernet hardware\n"
68 "\twill be configured to send incoming packets to this POW\n"
69 "\tgroup. Also any other software can submit packets to this\n"
70 "\tgroup for the kernel to process.");
71
72 int pow_send_group = -1;
73 module_param(pow_send_group, int, 0644);
74 MODULE_PARM_DESC(pow_send_group, "\n"
75 "\tPOW group to send packets to other software on. This\n"
76 "\tcontrols the creation of the virtual device pow0.\n"
77 "\talways_use_pow also depends on this value.");
78
79 int always_use_pow;
80 module_param(always_use_pow, int, 0444);
81 MODULE_PARM_DESC(always_use_pow, "\n"
82 "\tWhen set, always send to the pow group. This will cause\n"
83 "\tpackets sent to real ethernet devices to be sent to the\n"
84 "\tPOW group instead of the hardware. Unless some other\n"
85 "\tapplication changes the config, packets will still be\n"
86 "\treceived from the low level hardware. Use this option\n"
87 "\tto allow a CVMX app to intercept all packets from the\n"
88 "\tlinux kernel. You must specify pow_send_group along with\n"
89 "\tthis option.");
90
91 char pow_send_list[128] = "";
92 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
93 MODULE_PARM_DESC(pow_send_list, "\n"
94 "\tComma separated list of ethernet devices that should use the\n"
95 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
96 "\tis a per port version of always_use_pow. always_use_pow takes\n"
97 "\tprecedence over this list. For example, setting this to\n"
98 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
99 "\tusing the pow_send_group.");
100
101 int rx_napi_weight = 32;
102 module_param(rx_napi_weight, int, 0444);
103 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
104
105 /**
106 * cvm_oct_poll_queue - Workqueue for polling operations.
107 */
108 struct workqueue_struct *cvm_oct_poll_queue;
109
110 /**
111 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
112 *
113 * Set to one right before cvm_oct_poll_queue is destroyed.
114 */
115 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
116
117 /**
118 * Array of every ethernet device owned by this driver indexed by
119 * the ipd input port number.
120 */
121 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
122
123 u64 cvm_oct_tx_poll_interval;
124
125 static void cvm_oct_rx_refill_worker(struct work_struct *work);
126 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
127
128 static void cvm_oct_rx_refill_worker(struct work_struct *work)
129 {
130 /*
131 * FPA 0 may have been drained, try to refill it if we need
132 * more than num_packet_buffers / 2, otherwise normal receive
133 * processing will refill it. If it were drained, no packets
134 * could be received so cvm_oct_napi_poll would never be
135 * invoked to do the refill.
136 */
137 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
138
139 if (!atomic_read(&cvm_oct_poll_queue_stopping))
140 queue_delayed_work(cvm_oct_poll_queue,
141 &cvm_oct_rx_refill_work, HZ);
142 }
143
144 static void cvm_oct_periodic_worker(struct work_struct *work)
145 {
146 struct octeon_ethernet *priv = container_of(work,
147 struct octeon_ethernet,
148 port_periodic_work.work);
149
150 if (priv->poll)
151 priv->poll(cvm_oct_device[priv->port]);
152
153 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
154 cvm_oct_device[priv->port]);
155
156 if (!atomic_read(&cvm_oct_poll_queue_stopping))
157 queue_delayed_work(cvm_oct_poll_queue,
158 &priv->port_periodic_work, HZ);
159 }
160
161 static void cvm_oct_configure_common_hw(void)
162 {
163 /* Setup the FPA */
164 cvmx_fpa_enable();
165 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
166 num_packet_buffers);
167 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
168 num_packet_buffers);
169 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
170 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
171 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
172
173 #ifdef __LITTLE_ENDIAN
174 {
175 union cvmx_ipd_ctl_status ipd_ctl_status;
176 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
177 ipd_ctl_status.s.pkt_lend = 1;
178 ipd_ctl_status.s.wqe_lend = 1;
179 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
180 }
181 #endif
182
183 if (USE_RED)
184 cvmx_helper_setup_red(num_packet_buffers / 4,
185 num_packet_buffers / 8);
186
187 }
188
189 /**
190 * cvm_oct_free_work- Free a work queue entry
191 *
192 * @work_queue_entry: Work queue entry to free
193 *
194 * Returns Zero on success, Negative on failure.
195 */
196 int cvm_oct_free_work(void *work_queue_entry)
197 {
198 cvmx_wqe_t *work = work_queue_entry;
199
200 int segments = work->word2.s.bufs;
201 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
202
203 while (segments--) {
204 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
205 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
206 if (unlikely(!segment_ptr.s.i))
207 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
208 segment_ptr.s.pool,
209 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
210 128));
211 segment_ptr = next_ptr;
212 }
213 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
214
215 return 0;
216 }
217 EXPORT_SYMBOL(cvm_oct_free_work);
218
219 /**
220 * cvm_oct_common_get_stats - get the low level ethernet statistics
221 * @dev: Device to get the statistics from
222 *
223 * Returns Pointer to the statistics
224 */
225 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
226 {
227 cvmx_pip_port_status_t rx_status;
228 cvmx_pko_port_status_t tx_status;
229 struct octeon_ethernet *priv = netdev_priv(dev);
230
231 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
232 if (octeon_is_simulation()) {
233 /* The simulator doesn't support statistics */
234 memset(&rx_status, 0, sizeof(rx_status));
235 memset(&tx_status, 0, sizeof(tx_status));
236 } else {
237 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
238 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
239 }
240
241 priv->stats.rx_packets += rx_status.inb_packets;
242 priv->stats.tx_packets += tx_status.packets;
243 priv->stats.rx_bytes += rx_status.inb_octets;
244 priv->stats.tx_bytes += tx_status.octets;
245 priv->stats.multicast += rx_status.multicast_packets;
246 priv->stats.rx_crc_errors += rx_status.inb_errors;
247 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
248
249 /*
250 * The drop counter must be incremented atomically
251 * since the RX tasklet also increments it.
252 */
253 #ifdef CONFIG_64BIT
254 atomic64_add(rx_status.dropped_packets,
255 (atomic64_t *)&priv->stats.rx_dropped);
256 #else
257 atomic_add(rx_status.dropped_packets,
258 (atomic_t *)&priv->stats.rx_dropped);
259 #endif
260 }
261
262 return &priv->stats;
263 }
264
265 /**
266 * cvm_oct_common_change_mtu - change the link MTU
267 * @dev: Device to change
268 * @new_mtu: The new MTU
269 *
270 * Returns Zero on success
271 */
272 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
273 {
274 struct octeon_ethernet *priv = netdev_priv(dev);
275 int interface = INTERFACE(priv->port);
276 int index = INDEX(priv->port);
277 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
278 int vlan_bytes = 4;
279 #else
280 int vlan_bytes = 0;
281 #endif
282
283 /*
284 * Limit the MTU to make sure the ethernet packets are between
285 * 64 bytes and 65535 bytes.
286 */
287 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
288 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
289 pr_err("MTU must be between %d and %d.\n",
290 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
291 return -EINVAL;
292 }
293 dev->mtu = new_mtu;
294
295 if ((interface < 2)
296 && (cvmx_helper_interface_get_mode(interface) !=
297 CVMX_HELPER_INTERFACE_MODE_SPI)) {
298 /* Add ethernet header and FCS, and VLAN if configured. */
299 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
300
301 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
302 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
303 /* Signal errors on packets larger than the MTU */
304 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
305 max_packet);
306 } else {
307 /*
308 * Set the hardware to truncate packets larger
309 * than the MTU and smaller the 64 bytes.
310 */
311 union cvmx_pip_frm_len_chkx frm_len_chk;
312
313 frm_len_chk.u64 = 0;
314 frm_len_chk.s.minlen = 64;
315 frm_len_chk.s.maxlen = max_packet;
316 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
317 frm_len_chk.u64);
318 }
319 /*
320 * Set the hardware to truncate packets larger than
321 * the MTU. The jabber register must be set to a
322 * multiple of 8 bytes, so round up.
323 */
324 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
325 (max_packet + 7) & ~7u);
326 }
327 return 0;
328 }
329
330 /**
331 * cvm_oct_common_set_multicast_list - set the multicast list
332 * @dev: Device to work on
333 */
334 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
335 {
336 union cvmx_gmxx_prtx_cfg gmx_cfg;
337 struct octeon_ethernet *priv = netdev_priv(dev);
338 int interface = INTERFACE(priv->port);
339 int index = INDEX(priv->port);
340
341 if ((interface < 2)
342 && (cvmx_helper_interface_get_mode(interface) !=
343 CVMX_HELPER_INTERFACE_MODE_SPI)) {
344 union cvmx_gmxx_rxx_adr_ctl control;
345
346 control.u64 = 0;
347 control.s.bcst = 1; /* Allow broadcast MAC addresses */
348
349 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
350 (dev->flags & IFF_PROMISC))
351 /* Force accept multicast packets */
352 control.s.mcst = 2;
353 else
354 /* Force reject multicast packets */
355 control.s.mcst = 1;
356
357 if (dev->flags & IFF_PROMISC)
358 /*
359 * Reject matches if promisc. Since CAM is
360 * shut off, should accept everything.
361 */
362 control.s.cam_mode = 0;
363 else
364 /* Filter packets based on the CAM */
365 control.s.cam_mode = 1;
366
367 gmx_cfg.u64 =
368 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
369 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
370 gmx_cfg.u64 & ~1ull);
371
372 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
373 control.u64);
374 if (dev->flags & IFF_PROMISC)
375 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
376 (index, interface), 0);
377 else
378 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
379 (index, interface), 1);
380
381 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
382 gmx_cfg.u64);
383 }
384 }
385
386 /**
387 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
388 * @dev: The device in question.
389 * @addr: Address structure to change it too.
390
391 * Returns Zero on success
392 */
393 static int cvm_oct_set_mac_filter(struct net_device *dev)
394 {
395 struct octeon_ethernet *priv = netdev_priv(dev);
396 union cvmx_gmxx_prtx_cfg gmx_cfg;
397 int interface = INTERFACE(priv->port);
398 int index = INDEX(priv->port);
399
400 if ((interface < 2)
401 && (cvmx_helper_interface_get_mode(interface) !=
402 CVMX_HELPER_INTERFACE_MODE_SPI)) {
403 int i;
404 uint8_t *ptr = dev->dev_addr;
405 uint64_t mac = 0;
406
407 for (i = 0; i < 6; i++)
408 mac = (mac << 8) | (uint64_t)ptr[i];
409
410 gmx_cfg.u64 =
411 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
412 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
413 gmx_cfg.u64 & ~1ull);
414
415 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
416 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
417 ptr[0]);
418 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
419 ptr[1]);
420 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
421 ptr[2]);
422 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
423 ptr[3]);
424 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
425 ptr[4]);
426 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
427 ptr[5]);
428 cvm_oct_common_set_multicast_list(dev);
429 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
430 gmx_cfg.u64);
431 }
432 return 0;
433 }
434
435 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
436 {
437 int r = eth_mac_addr(dev, addr);
438
439 if (r)
440 return r;
441 return cvm_oct_set_mac_filter(dev);
442 }
443
444 /**
445 * cvm_oct_common_init - per network device initialization
446 * @dev: Device to initialize
447 *
448 * Returns Zero on success
449 */
450 int cvm_oct_common_init(struct net_device *dev)
451 {
452 struct octeon_ethernet *priv = netdev_priv(dev);
453 const u8 *mac = NULL;
454
455 if (priv->of_node)
456 mac = of_get_mac_address(priv->of_node);
457
458 if (mac)
459 ether_addr_copy(dev->dev_addr, mac);
460 else
461 eth_hw_addr_random(dev);
462
463 /*
464 * Force the interface to use the POW send if always_use_pow
465 * was specified or it is in the pow send list.
466 */
467 if ((pow_send_group != -1)
468 && (always_use_pow || strstr(pow_send_list, dev->name)))
469 priv->queue = -1;
470
471 if (priv->queue != -1) {
472 dev->features |= NETIF_F_SG;
473 if (USE_HW_TCPUDP_CHECKSUM)
474 dev->features |= NETIF_F_IP_CSUM;
475 }
476
477 /* We do our own locking, Linux doesn't need to */
478 dev->features |= NETIF_F_LLTX;
479 dev->ethtool_ops = &cvm_oct_ethtool_ops;
480
481 cvm_oct_set_mac_filter(dev);
482 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
483
484 /*
485 * Zero out stats for port so we won't mistakenly show
486 * counters from the bootloader.
487 */
488 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
489 sizeof(struct net_device_stats));
490
491 return 0;
492 }
493
494 void cvm_oct_common_uninit(struct net_device *dev)
495 {
496 struct octeon_ethernet *priv = netdev_priv(dev);
497
498 if (priv->phydev)
499 phy_disconnect(priv->phydev);
500 }
501
502 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
503 .ndo_init = cvm_oct_common_init,
504 .ndo_uninit = cvm_oct_common_uninit,
505 .ndo_start_xmit = cvm_oct_xmit,
506 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
507 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
508 .ndo_do_ioctl = cvm_oct_ioctl,
509 .ndo_change_mtu = cvm_oct_common_change_mtu,
510 .ndo_get_stats = cvm_oct_common_get_stats,
511 #ifdef CONFIG_NET_POLL_CONTROLLER
512 .ndo_poll_controller = cvm_oct_poll_controller,
513 #endif
514 };
515 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
516 .ndo_init = cvm_oct_xaui_init,
517 .ndo_uninit = cvm_oct_xaui_uninit,
518 .ndo_open = cvm_oct_xaui_open,
519 .ndo_stop = cvm_oct_xaui_stop,
520 .ndo_start_xmit = cvm_oct_xmit,
521 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
522 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
523 .ndo_do_ioctl = cvm_oct_ioctl,
524 .ndo_change_mtu = cvm_oct_common_change_mtu,
525 .ndo_get_stats = cvm_oct_common_get_stats,
526 #ifdef CONFIG_NET_POLL_CONTROLLER
527 .ndo_poll_controller = cvm_oct_poll_controller,
528 #endif
529 };
530 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
531 .ndo_init = cvm_oct_sgmii_init,
532 .ndo_uninit = cvm_oct_sgmii_uninit,
533 .ndo_open = cvm_oct_sgmii_open,
534 .ndo_stop = cvm_oct_sgmii_stop,
535 .ndo_start_xmit = cvm_oct_xmit,
536 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
537 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
538 .ndo_do_ioctl = cvm_oct_ioctl,
539 .ndo_change_mtu = cvm_oct_common_change_mtu,
540 .ndo_get_stats = cvm_oct_common_get_stats,
541 #ifdef CONFIG_NET_POLL_CONTROLLER
542 .ndo_poll_controller = cvm_oct_poll_controller,
543 #endif
544 };
545 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
546 .ndo_init = cvm_oct_spi_init,
547 .ndo_uninit = cvm_oct_spi_uninit,
548 .ndo_start_xmit = cvm_oct_xmit,
549 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
550 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
551 .ndo_do_ioctl = cvm_oct_ioctl,
552 .ndo_change_mtu = cvm_oct_common_change_mtu,
553 .ndo_get_stats = cvm_oct_common_get_stats,
554 #ifdef CONFIG_NET_POLL_CONTROLLER
555 .ndo_poll_controller = cvm_oct_poll_controller,
556 #endif
557 };
558 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
559 .ndo_init = cvm_oct_rgmii_init,
560 .ndo_uninit = cvm_oct_rgmii_uninit,
561 .ndo_open = cvm_oct_rgmii_open,
562 .ndo_stop = cvm_oct_rgmii_stop,
563 .ndo_start_xmit = cvm_oct_xmit,
564 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
565 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
566 .ndo_do_ioctl = cvm_oct_ioctl,
567 .ndo_change_mtu = cvm_oct_common_change_mtu,
568 .ndo_get_stats = cvm_oct_common_get_stats,
569 #ifdef CONFIG_NET_POLL_CONTROLLER
570 .ndo_poll_controller = cvm_oct_poll_controller,
571 #endif
572 };
573 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
574 .ndo_init = cvm_oct_common_init,
575 .ndo_start_xmit = cvm_oct_xmit_pow,
576 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
577 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
578 .ndo_do_ioctl = cvm_oct_ioctl,
579 .ndo_change_mtu = cvm_oct_common_change_mtu,
580 .ndo_get_stats = cvm_oct_common_get_stats,
581 #ifdef CONFIG_NET_POLL_CONTROLLER
582 .ndo_poll_controller = cvm_oct_poll_controller,
583 #endif
584 };
585
586 static struct device_node *cvm_oct_of_get_child(
587 const struct device_node *parent, int reg_val)
588 {
589 struct device_node *node = NULL;
590 int size;
591 const __be32 *addr;
592
593 for (;;) {
594 node = of_get_next_child(parent, node);
595 if (!node)
596 break;
597 addr = of_get_property(node, "reg", &size);
598 if (addr && (be32_to_cpu(*addr) == reg_val))
599 break;
600 }
601 return node;
602 }
603
604 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
605 int interface, int port)
606 {
607 struct device_node *ni, *np;
608
609 ni = cvm_oct_of_get_child(pip, interface);
610 if (!ni)
611 return NULL;
612
613 np = cvm_oct_of_get_child(ni, port);
614 of_node_put(ni);
615
616 return np;
617 }
618
619 static int cvm_oct_probe(struct platform_device *pdev)
620 {
621 int num_interfaces;
622 int interface;
623 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
624 int qos;
625 struct device_node *pip;
626
627 octeon_mdiobus_force_mod_depencency();
628 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
629
630 pip = pdev->dev.of_node;
631 if (!pip) {
632 pr_err("Error: No 'pip' in /aliases\n");
633 return -EINVAL;
634 }
635
636 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
637 if (cvm_oct_poll_queue == NULL) {
638 pr_err("octeon-ethernet: Cannot create workqueue");
639 return -ENOMEM;
640 }
641
642 cvm_oct_configure_common_hw();
643
644 cvmx_helper_initialize_packet_io_global();
645
646 /* Change the input group for all ports before input is enabled */
647 num_interfaces = cvmx_helper_get_number_of_interfaces();
648 for (interface = 0; interface < num_interfaces; interface++) {
649 int num_ports = cvmx_helper_ports_on_interface(interface);
650 int port;
651
652 for (port = cvmx_helper_get_ipd_port(interface, 0);
653 port < cvmx_helper_get_ipd_port(interface, num_ports);
654 port++) {
655 union cvmx_pip_prt_tagx pip_prt_tagx;
656
657 pip_prt_tagx.u64 =
658 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
659 pip_prt_tagx.s.grp = pow_receive_group;
660 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
661 pip_prt_tagx.u64);
662 }
663 }
664
665 cvmx_helper_ipd_and_packet_input_enable();
666
667 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
668
669 /*
670 * Initialize the FAU used for counting packet buffers that
671 * need to be freed.
672 */
673 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
674
675 /* Initialize the FAU used for counting tx SKBs that need to be freed */
676 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
677
678 if ((pow_send_group != -1)) {
679 struct net_device *dev;
680
681 pr_info("\tConfiguring device for POW only access\n");
682 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
683 if (dev) {
684 /* Initialize the device private structure. */
685 struct octeon_ethernet *priv = netdev_priv(dev);
686
687 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
688 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
689 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
690 priv->queue = -1;
691 strcpy(dev->name, "pow%d");
692 for (qos = 0; qos < 16; qos++)
693 skb_queue_head_init(&priv->tx_free_list[qos]);
694
695 if (register_netdev(dev) < 0) {
696 pr_err("Failed to register ethernet device for POW\n");
697 free_netdev(dev);
698 } else {
699 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
700 pr_info("%s: POW send group %d, receive group %d\n",
701 dev->name, pow_send_group,
702 pow_receive_group);
703 }
704 } else {
705 pr_err("Failed to allocate ethernet device for POW\n");
706 }
707 }
708
709 num_interfaces = cvmx_helper_get_number_of_interfaces();
710 for (interface = 0; interface < num_interfaces; interface++) {
711 cvmx_helper_interface_mode_t imode =
712 cvmx_helper_interface_get_mode(interface);
713 int num_ports = cvmx_helper_ports_on_interface(interface);
714 int port;
715 int port_index;
716
717 for (port_index = 0,
718 port = cvmx_helper_get_ipd_port(interface, 0);
719 port < cvmx_helper_get_ipd_port(interface, num_ports);
720 port_index++, port++) {
721 struct octeon_ethernet *priv;
722 struct net_device *dev =
723 alloc_etherdev(sizeof(struct octeon_ethernet));
724 if (!dev) {
725 pr_err("Failed to allocate ethernet device for port %d\n",
726 port);
727 continue;
728 }
729
730 /* Initialize the device private structure. */
731 priv = netdev_priv(dev);
732 priv->netdev = dev;
733 priv->of_node = cvm_oct_node_for_port(pip, interface,
734 port_index);
735
736 INIT_DELAYED_WORK(&priv->port_periodic_work,
737 cvm_oct_periodic_worker);
738 priv->imode = imode;
739 priv->port = port;
740 priv->queue = cvmx_pko_get_base_queue(priv->port);
741 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
742 for (qos = 0; qos < 16; qos++)
743 skb_queue_head_init(&priv->tx_free_list[qos]);
744 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
745 qos++)
746 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
747
748 switch (priv->imode) {
749
750 /* These types don't support ports to IPD/PKO */
751 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
752 case CVMX_HELPER_INTERFACE_MODE_PCIE:
753 case CVMX_HELPER_INTERFACE_MODE_PICMG:
754 break;
755
756 case CVMX_HELPER_INTERFACE_MODE_NPI:
757 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
758 strcpy(dev->name, "npi%d");
759 break;
760
761 case CVMX_HELPER_INTERFACE_MODE_XAUI:
762 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
763 strcpy(dev->name, "xaui%d");
764 break;
765
766 case CVMX_HELPER_INTERFACE_MODE_LOOP:
767 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
768 strcpy(dev->name, "loop%d");
769 break;
770
771 case CVMX_HELPER_INTERFACE_MODE_SGMII:
772 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
773 strcpy(dev->name, "eth%d");
774 break;
775
776 case CVMX_HELPER_INTERFACE_MODE_SPI:
777 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
778 strcpy(dev->name, "spi%d");
779 break;
780
781 case CVMX_HELPER_INTERFACE_MODE_RGMII:
782 case CVMX_HELPER_INTERFACE_MODE_GMII:
783 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
784 strcpy(dev->name, "eth%d");
785 break;
786 }
787
788 if (!dev->netdev_ops) {
789 free_netdev(dev);
790 } else if (register_netdev(dev) < 0) {
791 pr_err("Failed to register ethernet device for interface %d, port %d\n",
792 interface, priv->port);
793 free_netdev(dev);
794 } else {
795 cvm_oct_device[priv->port] = dev;
796 fau -=
797 cvmx_pko_get_num_queues(priv->port) *
798 sizeof(uint32_t);
799 queue_delayed_work(cvm_oct_poll_queue,
800 &priv->port_periodic_work, HZ);
801 }
802 }
803 }
804
805 cvm_oct_tx_initialize();
806 cvm_oct_rx_initialize();
807
808 /*
809 * 150 uS: about 10 1500-byte packets at 1GE.
810 */
811 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
812
813 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
814
815 return 0;
816 }
817
818 static int cvm_oct_remove(struct platform_device *pdev)
819 {
820 int port;
821
822 /* Disable POW interrupt */
823 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
824
825 cvmx_ipd_disable();
826
827 /* Free the interrupt handler */
828 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
829
830 atomic_inc_return(&cvm_oct_poll_queue_stopping);
831 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
832
833 cvm_oct_rx_shutdown();
834 cvm_oct_tx_shutdown();
835
836 cvmx_pko_disable();
837
838 /* Free the ethernet devices */
839 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
840 if (cvm_oct_device[port]) {
841 struct net_device *dev = cvm_oct_device[port];
842 struct octeon_ethernet *priv = netdev_priv(dev);
843
844 cancel_delayed_work_sync(&priv->port_periodic_work);
845
846 cvm_oct_tx_shutdown_dev(dev);
847 unregister_netdev(dev);
848 free_netdev(dev);
849 cvm_oct_device[port] = NULL;
850 }
851 }
852
853 destroy_workqueue(cvm_oct_poll_queue);
854
855 cvmx_pko_shutdown();
856
857 cvmx_ipd_free_ptr();
858
859 /* Free the HW pools */
860 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
861 num_packet_buffers);
862 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
863 num_packet_buffers);
864 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
865 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
866 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
867 return 0;
868 }
869
870 static const struct of_device_id cvm_oct_match[] = {
871 {
872 .compatible = "cavium,octeon-3860-pip",
873 },
874 {},
875 };
876 MODULE_DEVICE_TABLE(of, cvm_oct_match);
877
878 static struct platform_driver cvm_oct_driver = {
879 .probe = cvm_oct_probe,
880 .remove = cvm_oct_remove,
881 .driver = {
882 .name = KBUILD_MODNAME,
883 .of_match_table = cvm_oct_match,
884 },
885 };
886
887 module_platform_driver(cvm_oct_driver);
888
889 MODULE_LICENSE("GPL");
890 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
891 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");