2 * drivers/net/ethernet/freescale/gianfar_ethtool.c
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
14 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/skbuff.h>
31 #include <linux/spinlock.h>
36 #include <asm/uaccess.h>
37 #include <linux/module.h>
38 #include <linux/crc32.h>
39 #include <asm/types.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/phy.h>
43 #include <linux/sort.h>
44 #include <linux/if_vlan.h>
48 extern void gfar_start(struct net_device
*dev
);
49 extern int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
,
52 #define GFAR_MAX_COAL_USECS 0xffff
53 #define GFAR_MAX_COAL_FRAMES 0xff
54 static void gfar_fill_stats(struct net_device
*dev
, struct ethtool_stats
*dummy
,
56 static void gfar_gstrings(struct net_device
*dev
, u32 stringset
, u8
* buf
);
57 static int gfar_gcoalesce(struct net_device
*dev
,
58 struct ethtool_coalesce
*cvals
);
59 static int gfar_scoalesce(struct net_device
*dev
,
60 struct ethtool_coalesce
*cvals
);
61 static void gfar_gringparam(struct net_device
*dev
,
62 struct ethtool_ringparam
*rvals
);
63 static int gfar_sringparam(struct net_device
*dev
,
64 struct ethtool_ringparam
*rvals
);
65 static void gfar_gdrvinfo(struct net_device
*dev
,
66 struct ethtool_drvinfo
*drvinfo
);
68 static const char stat_gstrings
[][ETH_GSTRING_LEN
] = {
69 "rx-dropped-by-kernel",
70 "rx-large-frame-errors",
71 "rx-short-frame-errors",
72 "rx-non-octet-errors",
77 "rx-truncated-frames",
81 "rx-skb-missing-errors",
84 "tx-rx-65-127-frames",
85 "tx-rx-128-255-frames",
86 "tx-rx-256-511-frames",
87 "tx-rx-512-1023-frames",
88 "tx-rx-1024-1518-frames",
89 "tx-rx-1519-1522-good-vlan",
93 "receive-multicast-packet",
94 "receive-broadcast-packet",
95 "rx-control-frame-packets",
96 "rx-pause-frame-packets",
99 "rx-frame-length-error",
101 "rx-carrier-sense-error",
102 "rx-undersize-packets",
103 "rx-oversize-packets",
104 "rx-fragmented-frames",
109 "tx-multicast-packets",
110 "tx-broadcast-packets",
111 "tx-pause-control-frames",
112 "tx-deferral-packets",
113 "tx-excessive-deferral-packets",
114 "tx-single-collision-packets",
115 "tx-multiple-collision-packets",
116 "tx-late-collision-packets",
117 "tx-excessive-collision-packets",
118 "tx-total-collision",
124 "tx-oversize-frames",
125 "tx-undersize-frames",
126 "tx-fragmented-frames",
129 /* Fill in a buffer with the strings which correspond to the
131 static void gfar_gstrings(struct net_device
*dev
, u32 stringset
, u8
* buf
)
133 struct gfar_private
*priv
= netdev_priv(dev
);
135 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
)
136 memcpy(buf
, stat_gstrings
, GFAR_STATS_LEN
* ETH_GSTRING_LEN
);
138 memcpy(buf
, stat_gstrings
,
139 GFAR_EXTRA_STATS_LEN
* ETH_GSTRING_LEN
);
142 /* Fill in an array of 64-bit statistics from various sources.
143 * This array will be appended to the end of the ethtool_stats
144 * structure, and returned to user space
146 static void gfar_fill_stats(struct net_device
*dev
, struct ethtool_stats
*dummy
,
150 struct gfar_private
*priv
= netdev_priv(dev
);
151 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
152 atomic64_t
*extra
= (atomic64_t
*)&priv
->extra_stats
;
154 for (i
= 0; i
< GFAR_EXTRA_STATS_LEN
; i
++)
155 buf
[i
] = atomic64_read(&extra
[i
]);
157 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
158 u32 __iomem
*rmon
= (u32 __iomem
*) ®s
->rmon
;
160 for (; i
< GFAR_STATS_LEN
; i
++, rmon
++)
161 buf
[i
] = (u64
) gfar_read(rmon
);
165 static int gfar_sset_count(struct net_device
*dev
, int sset
)
167 struct gfar_private
*priv
= netdev_priv(dev
);
171 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
)
172 return GFAR_STATS_LEN
;
174 return GFAR_EXTRA_STATS_LEN
;
180 /* Fills in the drvinfo structure with some basic info */
181 static void gfar_gdrvinfo(struct net_device
*dev
,
182 struct ethtool_drvinfo
*drvinfo
)
184 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
185 strlcpy(drvinfo
->version
, gfar_driver_version
,
186 sizeof(drvinfo
->version
));
187 strlcpy(drvinfo
->fw_version
, "N/A", sizeof(drvinfo
->fw_version
));
188 strlcpy(drvinfo
->bus_info
, "N/A", sizeof(drvinfo
->bus_info
));
189 drvinfo
->regdump_len
= 0;
190 drvinfo
->eedump_len
= 0;
194 static int gfar_ssettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
196 struct gfar_private
*priv
= netdev_priv(dev
);
197 struct phy_device
*phydev
= priv
->phydev
;
202 return phy_ethtool_sset(phydev
, cmd
);
206 /* Return the current settings in the ethtool_cmd structure */
207 static int gfar_gsettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
209 struct gfar_private
*priv
= netdev_priv(dev
);
210 struct phy_device
*phydev
= priv
->phydev
;
211 struct gfar_priv_rx_q
*rx_queue
= NULL
;
212 struct gfar_priv_tx_q
*tx_queue
= NULL
;
216 tx_queue
= priv
->tx_queue
[0];
217 rx_queue
= priv
->rx_queue
[0];
219 /* etsec-1.7 and older versions have only one txic
220 * and rxic regs although they support multiple queues */
221 cmd
->maxtxpkt
= get_icft_value(tx_queue
->txic
);
222 cmd
->maxrxpkt
= get_icft_value(rx_queue
->rxic
);
224 return phy_ethtool_gset(phydev
, cmd
);
227 /* Return the length of the register structure */
228 static int gfar_reglen(struct net_device
*dev
)
230 return sizeof (struct gfar
);
233 /* Return a dump of the GFAR register space */
234 static void gfar_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
238 struct gfar_private
*priv
= netdev_priv(dev
);
239 u32 __iomem
*theregs
= (u32 __iomem
*) priv
->gfargrp
[0].regs
;
240 u32
*buf
= (u32
*) regbuf
;
242 for (i
= 0; i
< sizeof (struct gfar
) / sizeof (u32
); i
++)
243 buf
[i
] = gfar_read(&theregs
[i
]);
246 /* Convert microseconds to ethernet clock ticks, which changes
247 * depending on what speed the controller is running at */
248 static unsigned int gfar_usecs2ticks(struct gfar_private
*priv
,
253 /* The timer is different, depending on the interface speed */
254 switch (priv
->phydev
->speed
) {
256 count
= GFAR_GBIT_TIME
;
259 count
= GFAR_100_TIME
;
263 count
= GFAR_10_TIME
;
267 /* Make sure we return a number greater than 0
269 return (usecs
* 1000 + count
- 1) / count
;
272 /* Convert ethernet clock ticks to microseconds */
273 static unsigned int gfar_ticks2usecs(struct gfar_private
*priv
,
278 /* The timer is different, depending on the interface speed */
279 switch (priv
->phydev
->speed
) {
281 count
= GFAR_GBIT_TIME
;
284 count
= GFAR_100_TIME
;
288 count
= GFAR_10_TIME
;
292 /* Make sure we return a number greater than 0 */
293 /* if ticks is > 0 */
294 return (ticks
* count
) / 1000;
297 /* Get the coalescing parameters, and put them in the cvals
299 static int gfar_gcoalesce(struct net_device
*dev
,
300 struct ethtool_coalesce
*cvals
)
302 struct gfar_private
*priv
= netdev_priv(dev
);
303 struct gfar_priv_rx_q
*rx_queue
= NULL
;
304 struct gfar_priv_tx_q
*tx_queue
= NULL
;
305 unsigned long rxtime
;
306 unsigned long rxcount
;
307 unsigned long txtime
;
308 unsigned long txcount
;
310 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_COALESCE
))
313 if (NULL
== priv
->phydev
)
316 rx_queue
= priv
->rx_queue
[0];
317 tx_queue
= priv
->tx_queue
[0];
319 rxtime
= get_ictt_value(rx_queue
->rxic
);
320 rxcount
= get_icft_value(rx_queue
->rxic
);
321 txtime
= get_ictt_value(tx_queue
->txic
);
322 txcount
= get_icft_value(tx_queue
->txic
);
323 cvals
->rx_coalesce_usecs
= gfar_ticks2usecs(priv
, rxtime
);
324 cvals
->rx_max_coalesced_frames
= rxcount
;
326 cvals
->tx_coalesce_usecs
= gfar_ticks2usecs(priv
, txtime
);
327 cvals
->tx_max_coalesced_frames
= txcount
;
329 cvals
->use_adaptive_rx_coalesce
= 0;
330 cvals
->use_adaptive_tx_coalesce
= 0;
332 cvals
->pkt_rate_low
= 0;
333 cvals
->rx_coalesce_usecs_low
= 0;
334 cvals
->rx_max_coalesced_frames_low
= 0;
335 cvals
->tx_coalesce_usecs_low
= 0;
336 cvals
->tx_max_coalesced_frames_low
= 0;
338 /* When the packet rate is below pkt_rate_high but above
339 * pkt_rate_low (both measured in packets per second) the
340 * normal {rx,tx}_* coalescing parameters are used.
343 /* When the packet rate is (measured in packets per second)
344 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
347 cvals
->pkt_rate_high
= 0;
348 cvals
->rx_coalesce_usecs_high
= 0;
349 cvals
->rx_max_coalesced_frames_high
= 0;
350 cvals
->tx_coalesce_usecs_high
= 0;
351 cvals
->tx_max_coalesced_frames_high
= 0;
353 /* How often to do adaptive coalescing packet rate sampling,
354 * measured in seconds. Must not be zero.
356 cvals
->rate_sample_interval
= 0;
361 /* Change the coalescing values.
362 * Both cvals->*_usecs and cvals->*_frames have to be > 0
363 * in order for coalescing to be active
365 static int gfar_scoalesce(struct net_device
*dev
,
366 struct ethtool_coalesce
*cvals
)
368 struct gfar_private
*priv
= netdev_priv(dev
);
371 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_COALESCE
))
374 /* Set up rx coalescing */
375 /* As of now, we will enable/disable coalescing for all
376 * queues together in case of eTSEC2, this will be modified
377 * along with the ethtool interface
379 if ((cvals
->rx_coalesce_usecs
== 0) ||
380 (cvals
->rx_max_coalesced_frames
== 0)) {
381 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
382 priv
->rx_queue
[i
]->rxcoalescing
= 0;
384 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
385 priv
->rx_queue
[i
]->rxcoalescing
= 1;
388 if (NULL
== priv
->phydev
)
391 /* Check the bounds of the values */
392 if (cvals
->rx_coalesce_usecs
> GFAR_MAX_COAL_USECS
) {
393 pr_info("Coalescing is limited to %d microseconds\n",
394 GFAR_MAX_COAL_USECS
);
398 if (cvals
->rx_max_coalesced_frames
> GFAR_MAX_COAL_FRAMES
) {
399 pr_info("Coalescing is limited to %d frames\n",
400 GFAR_MAX_COAL_FRAMES
);
404 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
405 priv
->rx_queue
[i
]->rxic
= mk_ic_value(
406 cvals
->rx_max_coalesced_frames
,
407 gfar_usecs2ticks(priv
, cvals
->rx_coalesce_usecs
));
410 /* Set up tx coalescing */
411 if ((cvals
->tx_coalesce_usecs
== 0) ||
412 (cvals
->tx_max_coalesced_frames
== 0)) {
413 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
414 priv
->tx_queue
[i
]->txcoalescing
= 0;
416 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
417 priv
->tx_queue
[i
]->txcoalescing
= 1;
420 /* Check the bounds of the values */
421 if (cvals
->tx_coalesce_usecs
> GFAR_MAX_COAL_USECS
) {
422 pr_info("Coalescing is limited to %d microseconds\n",
423 GFAR_MAX_COAL_USECS
);
427 if (cvals
->tx_max_coalesced_frames
> GFAR_MAX_COAL_FRAMES
) {
428 pr_info("Coalescing is limited to %d frames\n",
429 GFAR_MAX_COAL_FRAMES
);
433 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
434 priv
->tx_queue
[i
]->txic
= mk_ic_value(
435 cvals
->tx_max_coalesced_frames
,
436 gfar_usecs2ticks(priv
, cvals
->tx_coalesce_usecs
));
439 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
444 /* Fills in rvals with the current ring parameters. Currently,
445 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
446 * jumbo are ignored by the driver */
447 static void gfar_gringparam(struct net_device
*dev
,
448 struct ethtool_ringparam
*rvals
)
450 struct gfar_private
*priv
= netdev_priv(dev
);
451 struct gfar_priv_tx_q
*tx_queue
= NULL
;
452 struct gfar_priv_rx_q
*rx_queue
= NULL
;
454 tx_queue
= priv
->tx_queue
[0];
455 rx_queue
= priv
->rx_queue
[0];
457 rvals
->rx_max_pending
= GFAR_RX_MAX_RING_SIZE
;
458 rvals
->rx_mini_max_pending
= GFAR_RX_MAX_RING_SIZE
;
459 rvals
->rx_jumbo_max_pending
= GFAR_RX_MAX_RING_SIZE
;
460 rvals
->tx_max_pending
= GFAR_TX_MAX_RING_SIZE
;
462 /* Values changeable by the user. The valid values are
463 * in the range 1 to the "*_max_pending" counterpart above.
465 rvals
->rx_pending
= rx_queue
->rx_ring_size
;
466 rvals
->rx_mini_pending
= rx_queue
->rx_ring_size
;
467 rvals
->rx_jumbo_pending
= rx_queue
->rx_ring_size
;
468 rvals
->tx_pending
= tx_queue
->tx_ring_size
;
471 /* Change the current ring parameters, stopping the controller if
472 * necessary so that we don't mess things up while we're in
473 * motion. We wait for the ring to be clean before reallocating
476 static int gfar_sringparam(struct net_device
*dev
,
477 struct ethtool_ringparam
*rvals
)
479 struct gfar_private
*priv
= netdev_priv(dev
);
482 if (rvals
->rx_pending
> GFAR_RX_MAX_RING_SIZE
)
485 if (!is_power_of_2(rvals
->rx_pending
)) {
486 netdev_err(dev
, "Ring sizes must be a power of 2\n");
490 if (rvals
->tx_pending
> GFAR_TX_MAX_RING_SIZE
)
493 if (!is_power_of_2(rvals
->tx_pending
)) {
494 netdev_err(dev
, "Ring sizes must be a power of 2\n");
499 if (dev
->flags
& IFF_UP
) {
502 /* Halt TX and RX, and process the frames which
503 * have already been received
505 local_irq_save(flags
);
513 local_irq_restore(flags
);
515 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
516 gfar_clean_rx_ring(priv
->rx_queue
[i
],
517 priv
->rx_queue
[i
]->rx_ring_size
);
519 /* Now we take down the rings to rebuild them */
523 /* Change the size */
524 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
525 priv
->rx_queue
[i
]->rx_ring_size
= rvals
->rx_pending
;
526 priv
->tx_queue
[i
]->tx_ring_size
= rvals
->tx_pending
;
527 priv
->tx_queue
[i
]->num_txbdfree
=
528 priv
->tx_queue
[i
]->tx_ring_size
;
531 /* Rebuild the rings with the new size */
532 if (dev
->flags
& IFF_UP
) {
533 err
= startup_gfar(dev
);
534 netif_tx_wake_all_queues(dev
);
539 int gfar_set_features(struct net_device
*dev
, netdev_features_t features
)
541 struct gfar_private
*priv
= netdev_priv(dev
);
544 netdev_features_t changed
= dev
->features
^ features
;
546 if (changed
& (NETIF_F_HW_VLAN_TX
|NETIF_F_HW_VLAN_RX
))
547 gfar_vlan_mode(dev
, features
);
549 if (!(changed
& NETIF_F_RXCSUM
))
552 if (dev
->flags
& IFF_UP
) {
553 /* Halt TX and RX, and process the frames which
554 * have already been received
556 local_irq_save(flags
);
564 local_irq_restore(flags
);
566 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
567 gfar_clean_rx_ring(priv
->rx_queue
[i
],
568 priv
->rx_queue
[i
]->rx_ring_size
);
570 /* Now we take down the rings to rebuild them */
573 dev
->features
= features
;
575 err
= startup_gfar(dev
);
576 netif_tx_wake_all_queues(dev
);
581 static uint32_t gfar_get_msglevel(struct net_device
*dev
)
583 struct gfar_private
*priv
= netdev_priv(dev
);
585 return priv
->msg_enable
;
588 static void gfar_set_msglevel(struct net_device
*dev
, uint32_t data
)
590 struct gfar_private
*priv
= netdev_priv(dev
);
592 priv
->msg_enable
= data
;
596 static void gfar_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
598 struct gfar_private
*priv
= netdev_priv(dev
);
600 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) {
601 wol
->supported
= WAKE_MAGIC
;
602 wol
->wolopts
= priv
->wol_en
? WAKE_MAGIC
: 0;
604 wol
->supported
= wol
->wolopts
= 0;
608 static int gfar_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
610 struct gfar_private
*priv
= netdev_priv(dev
);
613 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
617 if (wol
->wolopts
& ~WAKE_MAGIC
)
620 device_set_wakeup_enable(&dev
->dev
, wol
->wolopts
& WAKE_MAGIC
);
622 spin_lock_irqsave(&priv
->bflock
, flags
);
623 priv
->wol_en
= !!device_may_wakeup(&dev
->dev
);
624 spin_unlock_irqrestore(&priv
->bflock
, flags
);
630 static void ethflow_to_filer_rules (struct gfar_private
*priv
, u64 ethflow
)
632 u32 fcr
= 0x0, fpr
= FPR_FILER_MASK
;
634 if (ethflow
& RXH_L2DA
) {
635 fcr
= RQFCR_PID_DAH
|RQFCR_CMP_NOMATCH
|
636 RQFCR_HASH
| RQFCR_AND
| RQFCR_HASHTBL_0
;
637 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
638 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
639 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
640 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
642 fcr
= RQFCR_PID_DAL
| RQFCR_AND
| RQFCR_CMP_NOMATCH
|
643 RQFCR_HASH
| RQFCR_AND
| RQFCR_HASHTBL_0
;
644 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
645 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
646 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
647 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
650 if (ethflow
& RXH_VLAN
) {
651 fcr
= RQFCR_PID_VID
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
652 RQFCR_AND
| RQFCR_HASHTBL_0
;
653 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
654 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
655 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
656 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
659 if (ethflow
& RXH_IP_SRC
) {
660 fcr
= RQFCR_PID_SIA
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
661 RQFCR_AND
| RQFCR_HASHTBL_0
;
662 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
663 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
664 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
665 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
668 if (ethflow
& (RXH_IP_DST
)) {
669 fcr
= RQFCR_PID_DIA
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
670 RQFCR_AND
| RQFCR_HASHTBL_0
;
671 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
672 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
673 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
674 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
677 if (ethflow
& RXH_L3_PROTO
) {
678 fcr
= RQFCR_PID_L4P
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
679 RQFCR_AND
| RQFCR_HASHTBL_0
;
680 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
681 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
682 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
683 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
686 if (ethflow
& RXH_L4_B_0_1
) {
687 fcr
= RQFCR_PID_SPT
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
688 RQFCR_AND
| RQFCR_HASHTBL_0
;
689 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
690 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
691 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
692 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
695 if (ethflow
& RXH_L4_B_2_3
) {
696 fcr
= RQFCR_PID_DPT
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
697 RQFCR_AND
| RQFCR_HASHTBL_0
;
698 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
699 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
700 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
701 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
705 static int gfar_ethflow_to_filer_table(struct gfar_private
*priv
, u64 ethflow
,
708 unsigned int last_rule_idx
= priv
->cur_filer_idx
;
709 unsigned int cmp_rqfpr
;
710 unsigned int *local_rqfpr
;
711 unsigned int *local_rqfcr
;
712 int i
= 0x0, k
= 0x0;
713 int j
= MAX_FILER_IDX
, l
= 0x0;
716 local_rqfpr
= kmalloc_array(MAX_FILER_IDX
+ 1, sizeof(unsigned int),
718 local_rqfcr
= kmalloc_array(MAX_FILER_IDX
+ 1, sizeof(unsigned int),
720 if (!local_rqfpr
|| !local_rqfcr
) {
727 cmp_rqfpr
= RQFPR_IPV4
|RQFPR_TCP
;
730 cmp_rqfpr
= RQFPR_IPV4
|RQFPR_UDP
;
733 cmp_rqfpr
= RQFPR_IPV6
|RQFPR_TCP
;
736 cmp_rqfpr
= RQFPR_IPV6
|RQFPR_UDP
;
739 pr_err("Right now this class is not supported\n");
744 for (i
= 0; i
< MAX_FILER_IDX
+ 1; i
++) {
745 local_rqfpr
[j
] = priv
->ftp_rqfpr
[i
];
746 local_rqfcr
[j
] = priv
->ftp_rqfcr
[i
];
748 if ((priv
->ftp_rqfcr
[i
] ==
749 (RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
)) &&
750 (priv
->ftp_rqfpr
[i
] == cmp_rqfpr
))
754 if (i
== MAX_FILER_IDX
+ 1) {
755 pr_err("No parse rule found, can't create hash rules\n");
760 /* If a match was found, then it begins the starting of a cluster rule
761 * if it was already programmed, we need to overwrite these rules
763 for (l
= i
+1; l
< MAX_FILER_IDX
; l
++) {
764 if ((priv
->ftp_rqfcr
[l
] & RQFCR_CLE
) &&
765 !(priv
->ftp_rqfcr
[l
] & RQFCR_AND
)) {
766 priv
->ftp_rqfcr
[l
] = RQFCR_CLE
| RQFCR_CMP_EXACT
|
767 RQFCR_HASHTBL_0
| RQFCR_PID_MASK
;
768 priv
->ftp_rqfpr
[l
] = FPR_FILER_MASK
;
769 gfar_write_filer(priv
, l
, priv
->ftp_rqfcr
[l
],
774 if (!(priv
->ftp_rqfcr
[l
] & RQFCR_CLE
) &&
775 (priv
->ftp_rqfcr
[l
] & RQFCR_AND
))
778 local_rqfpr
[j
] = priv
->ftp_rqfpr
[l
];
779 local_rqfcr
[j
] = priv
->ftp_rqfcr
[l
];
784 priv
->cur_filer_idx
= l
- 1;
788 ethflow_to_filer_rules(priv
, ethflow
);
790 /* Write back the popped out rules again */
791 for (k
= j
+1; k
< MAX_FILER_IDX
; k
++) {
792 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = local_rqfpr
[k
];
793 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = local_rqfcr
[k
];
794 gfar_write_filer(priv
, priv
->cur_filer_idx
,
795 local_rqfcr
[k
], local_rqfpr
[k
]);
796 if (!priv
->cur_filer_idx
)
798 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
807 static int gfar_set_hash_opts(struct gfar_private
*priv
,
808 struct ethtool_rxnfc
*cmd
)
810 /* write the filer rules here */
811 if (!gfar_ethflow_to_filer_table(priv
, cmd
->data
, cmd
->flow_type
))
817 static int gfar_check_filer_hardware(struct gfar_private
*priv
)
819 struct gfar __iomem
*regs
= NULL
;
822 regs
= priv
->gfargrp
[0].regs
;
824 /* Check if we are in FIFO mode */
825 i
= gfar_read(®s
->ecntrl
);
827 if (i
== ECNTRL_FIFM
) {
828 netdev_notice(priv
->ndev
, "Interface in FIFO mode\n");
829 i
= gfar_read(®s
->rctrl
);
830 i
&= RCTRL_PRSDEP_MASK
| RCTRL_PRSFM
;
831 if (i
== (RCTRL_PRSDEP_MASK
| RCTRL_PRSFM
)) {
832 netdev_info(priv
->ndev
,
833 "Receive Queue Filtering enabled\n");
835 netdev_warn(priv
->ndev
,
836 "Receive Queue Filtering disabled\n");
840 /* Or in standard mode */
842 i
= gfar_read(®s
->rctrl
);
843 i
&= RCTRL_PRSDEP_MASK
;
844 if (i
== RCTRL_PRSDEP_MASK
) {
845 netdev_info(priv
->ndev
,
846 "Receive Queue Filtering enabled\n");
848 netdev_warn(priv
->ndev
,
849 "Receive Queue Filtering disabled\n");
854 /* Sets the properties for arbitrary filer rule
855 * to the first 4 Layer 4 Bytes
857 regs
->rbifx
= 0xC0C1C2C3;
861 static int gfar_comp_asc(const void *a
, const void *b
)
863 return memcmp(a
, b
, 4);
866 static int gfar_comp_desc(const void *a
, const void *b
)
868 return -memcmp(a
, b
, 4);
871 static void gfar_swap(void *a
, void *b
, int size
)
882 /* Write a mask to filer cache */
883 static void gfar_set_mask(u32 mask
, struct filer_table
*tab
)
885 tab
->fe
[tab
->index
].ctrl
= RQFCR_AND
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
886 tab
->fe
[tab
->index
].prop
= mask
;
890 /* Sets parse bits (e.g. IP or TCP) */
891 static void gfar_set_parse_bits(u32 value
, u32 mask
, struct filer_table
*tab
)
893 gfar_set_mask(mask
, tab
);
894 tab
->fe
[tab
->index
].ctrl
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
|
896 tab
->fe
[tab
->index
].prop
= value
;
900 static void gfar_set_general_attribute(u32 value
, u32 mask
, u32 flag
,
901 struct filer_table
*tab
)
903 gfar_set_mask(mask
, tab
);
904 tab
->fe
[tab
->index
].ctrl
= RQFCR_CMP_EXACT
| RQFCR_AND
| flag
;
905 tab
->fe
[tab
->index
].prop
= value
;
909 /* For setting a tuple of value and mask of type flag
911 * IP-Src = 10.0.0.0/255.0.0.0
912 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
914 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
915 * For a don't care mask it gives us a 0
917 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
918 * and MAC stuff on an upper level (due to missing information on this level).
919 * For these guys we can discard them if they are value=0 and mask=0.
921 * Further the all masks are one-padded for better hardware efficiency.
923 static void gfar_set_attribute(u32 value
, u32 mask
, u32 flag
,
924 struct filer_table
*tab
)
931 mask
|= RQFCR_PID_PRI_MASK
;
936 if (!~(mask
| RQFCR_PID_L4P_MASK
))
941 mask
|= RQFCR_PID_L4P_MASK
;
947 mask
|= RQFCR_PID_VID_MASK
;
953 if (!~(mask
| RQFCR_PID_PORT_MASK
))
958 mask
|= RQFCR_PID_PORT_MASK
;
967 mask
|= RQFCR_PID_MAC_MASK
;
969 /* for all real 32bit masks */
977 gfar_set_general_attribute(value
, mask
, flag
, tab
);
980 /* Translates value and mask for UDP, TCP or SCTP */
981 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec
*value
,
982 struct ethtool_tcpip4_spec
*mask
,
983 struct filer_table
*tab
)
985 gfar_set_attribute(value
->ip4src
, mask
->ip4src
, RQFCR_PID_SIA
, tab
);
986 gfar_set_attribute(value
->ip4dst
, mask
->ip4dst
, RQFCR_PID_DIA
, tab
);
987 gfar_set_attribute(value
->pdst
, mask
->pdst
, RQFCR_PID_DPT
, tab
);
988 gfar_set_attribute(value
->psrc
, mask
->psrc
, RQFCR_PID_SPT
, tab
);
989 gfar_set_attribute(value
->tos
, mask
->tos
, RQFCR_PID_TOS
, tab
);
992 /* Translates value and mask for RAW-IP4 */
993 static void gfar_set_user_ip(struct ethtool_usrip4_spec
*value
,
994 struct ethtool_usrip4_spec
*mask
,
995 struct filer_table
*tab
)
997 gfar_set_attribute(value
->ip4src
, mask
->ip4src
, RQFCR_PID_SIA
, tab
);
998 gfar_set_attribute(value
->ip4dst
, mask
->ip4dst
, RQFCR_PID_DIA
, tab
);
999 gfar_set_attribute(value
->tos
, mask
->tos
, RQFCR_PID_TOS
, tab
);
1000 gfar_set_attribute(value
->proto
, mask
->proto
, RQFCR_PID_L4P
, tab
);
1001 gfar_set_attribute(value
->l4_4_bytes
, mask
->l4_4_bytes
, RQFCR_PID_ARB
,
1006 /* Translates value and mask for ETHER spec */
1007 static void gfar_set_ether(struct ethhdr
*value
, struct ethhdr
*mask
,
1008 struct filer_table
*tab
)
1010 u32 upper_temp_mask
= 0;
1011 u32 lower_temp_mask
= 0;
1013 /* Source address */
1014 if (!is_broadcast_ether_addr(mask
->h_source
)) {
1015 if (is_zero_ether_addr(mask
->h_source
)) {
1016 upper_temp_mask
= 0xFFFFFFFF;
1017 lower_temp_mask
= 0xFFFFFFFF;
1019 upper_temp_mask
= mask
->h_source
[0] << 16 |
1020 mask
->h_source
[1] << 8 |
1022 lower_temp_mask
= mask
->h_source
[3] << 16 |
1023 mask
->h_source
[4] << 8 |
1027 gfar_set_attribute(value
->h_source
[0] << 16 |
1028 value
->h_source
[1] << 8 |
1030 upper_temp_mask
, RQFCR_PID_SAH
, tab
);
1031 /* And the same for the lower part */
1032 gfar_set_attribute(value
->h_source
[3] << 16 |
1033 value
->h_source
[4] << 8 |
1035 lower_temp_mask
, RQFCR_PID_SAL
, tab
);
1037 /* Destination address */
1038 if (!is_broadcast_ether_addr(mask
->h_dest
)) {
1039 /* Special for destination is limited broadcast */
1040 if ((is_broadcast_ether_addr(value
->h_dest
) &&
1041 is_zero_ether_addr(mask
->h_dest
))) {
1042 gfar_set_parse_bits(RQFPR_EBC
, RQFPR_EBC
, tab
);
1044 if (is_zero_ether_addr(mask
->h_dest
)) {
1045 upper_temp_mask
= 0xFFFFFFFF;
1046 lower_temp_mask
= 0xFFFFFFFF;
1048 upper_temp_mask
= mask
->h_dest
[0] << 16 |
1049 mask
->h_dest
[1] << 8 |
1051 lower_temp_mask
= mask
->h_dest
[3] << 16 |
1052 mask
->h_dest
[4] << 8 |
1057 gfar_set_attribute(value
->h_dest
[0] << 16 |
1058 value
->h_dest
[1] << 8 |
1060 upper_temp_mask
, RQFCR_PID_DAH
, tab
);
1061 /* And the same for the lower part */
1062 gfar_set_attribute(value
->h_dest
[3] << 16 |
1063 value
->h_dest
[4] << 8 |
1065 lower_temp_mask
, RQFCR_PID_DAL
, tab
);
1069 gfar_set_attribute(value
->h_proto
, mask
->h_proto
, RQFCR_PID_ETY
, tab
);
1072 /* Convert a rule to binary filter format of gianfar */
1073 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec
*rule
,
1074 struct filer_table
*tab
)
1076 u32 vlan
= 0, vlan_mask
= 0;
1077 u32 id
= 0, id_mask
= 0;
1078 u32 cfi
= 0, cfi_mask
= 0;
1079 u32 prio
= 0, prio_mask
= 0;
1080 u32 old_index
= tab
->index
;
1082 /* Check if vlan is wanted */
1083 if ((rule
->flow_type
& FLOW_EXT
) && (rule
->m_ext
.vlan_tci
!= 0xFFFF)) {
1084 if (!rule
->m_ext
.vlan_tci
)
1085 rule
->m_ext
.vlan_tci
= 0xFFFF;
1088 vlan_mask
= RQFPR_VLN
;
1090 /* Separate the fields */
1091 id
= rule
->h_ext
.vlan_tci
& VLAN_VID_MASK
;
1092 id_mask
= rule
->m_ext
.vlan_tci
& VLAN_VID_MASK
;
1093 cfi
= rule
->h_ext
.vlan_tci
& VLAN_CFI_MASK
;
1094 cfi_mask
= rule
->m_ext
.vlan_tci
& VLAN_CFI_MASK
;
1095 prio
= (rule
->h_ext
.vlan_tci
& VLAN_PRIO_MASK
) >>
1097 prio_mask
= (rule
->m_ext
.vlan_tci
& VLAN_PRIO_MASK
) >>
1100 if (cfi
== VLAN_TAG_PRESENT
&& cfi_mask
== VLAN_TAG_PRESENT
) {
1102 vlan_mask
|= RQFPR_CFI
;
1103 } else if (cfi
!= VLAN_TAG_PRESENT
&&
1104 cfi_mask
== VLAN_TAG_PRESENT
) {
1105 vlan_mask
|= RQFPR_CFI
;
1109 switch (rule
->flow_type
& ~FLOW_EXT
) {
1111 gfar_set_parse_bits(RQFPR_IPV4
| RQFPR_TCP
| vlan
,
1112 RQFPR_IPV4
| RQFPR_TCP
| vlan_mask
, tab
);
1113 gfar_set_basic_ip(&rule
->h_u
.tcp_ip4_spec
,
1114 &rule
->m_u
.tcp_ip4_spec
, tab
);
1117 gfar_set_parse_bits(RQFPR_IPV4
| RQFPR_UDP
| vlan
,
1118 RQFPR_IPV4
| RQFPR_UDP
| vlan_mask
, tab
);
1119 gfar_set_basic_ip(&rule
->h_u
.udp_ip4_spec
,
1120 &rule
->m_u
.udp_ip4_spec
, tab
);
1123 gfar_set_parse_bits(RQFPR_IPV4
| vlan
, RQFPR_IPV4
| vlan_mask
,
1125 gfar_set_attribute(132, 0, RQFCR_PID_L4P
, tab
);
1126 gfar_set_basic_ip((struct ethtool_tcpip4_spec
*)&rule
->h_u
,
1127 (struct ethtool_tcpip4_spec
*)&rule
->m_u
,
1131 gfar_set_parse_bits(RQFPR_IPV4
| vlan
, RQFPR_IPV4
| vlan_mask
,
1133 gfar_set_user_ip((struct ethtool_usrip4_spec
*) &rule
->h_u
,
1134 (struct ethtool_usrip4_spec
*) &rule
->m_u
,
1139 gfar_set_parse_bits(vlan
, vlan_mask
, tab
);
1140 gfar_set_ether((struct ethhdr
*) &rule
->h_u
,
1141 (struct ethhdr
*) &rule
->m_u
, tab
);
1147 /* Set the vlan attributes in the end */
1149 gfar_set_attribute(id
, id_mask
, RQFCR_PID_VID
, tab
);
1150 gfar_set_attribute(prio
, prio_mask
, RQFCR_PID_PRI
, tab
);
1153 /* If there has been nothing written till now, it must be a default */
1154 if (tab
->index
== old_index
) {
1155 gfar_set_mask(0xFFFFFFFF, tab
);
1156 tab
->fe
[tab
->index
].ctrl
= 0x20;
1157 tab
->fe
[tab
->index
].prop
= 0x0;
1161 /* Remove last AND */
1162 tab
->fe
[tab
->index
- 1].ctrl
&= (~RQFCR_AND
);
1164 /* Specify which queue to use or to drop */
1165 if (rule
->ring_cookie
== RX_CLS_FLOW_DISC
)
1166 tab
->fe
[tab
->index
- 1].ctrl
|= RQFCR_RJE
;
1168 tab
->fe
[tab
->index
- 1].ctrl
|= (rule
->ring_cookie
<< 10);
1170 /* Only big enough entries can be clustered */
1171 if (tab
->index
> (old_index
+ 2)) {
1172 tab
->fe
[old_index
+ 1].ctrl
|= RQFCR_CLE
;
1173 tab
->fe
[tab
->index
- 1].ctrl
|= RQFCR_CLE
;
1176 /* In rare cases the cache can be full while there is
1179 if (tab
->index
> MAX_FILER_CACHE_IDX
- 1)
1185 /* Copy size filer entries */
1186 static void gfar_copy_filer_entries(struct gfar_filer_entry dst
[0],
1187 struct gfar_filer_entry src
[0], s32 size
)
1191 dst
[size
].ctrl
= src
[size
].ctrl
;
1192 dst
[size
].prop
= src
[size
].prop
;
1196 /* Delete the contents of the filer-table between start and end
1199 static int gfar_trim_filer_entries(u32 begin
, u32 end
, struct filer_table
*tab
)
1203 if (end
> MAX_FILER_CACHE_IDX
|| end
< begin
)
1207 length
= end
- begin
;
1210 while (end
< tab
->index
) {
1211 tab
->fe
[begin
].ctrl
= tab
->fe
[end
].ctrl
;
1212 tab
->fe
[begin
++].prop
= tab
->fe
[end
++].prop
;
1215 /* Fill up with don't cares */
1216 while (begin
< tab
->index
) {
1217 tab
->fe
[begin
].ctrl
= 0x60;
1218 tab
->fe
[begin
].prop
= 0xFFFFFFFF;
1222 tab
->index
-= length
;
1226 /* Make space on the wanted location */
1227 static int gfar_expand_filer_entries(u32 begin
, u32 length
,
1228 struct filer_table
*tab
)
1230 if (length
== 0 || length
+ tab
->index
> MAX_FILER_CACHE_IDX
||
1231 begin
> MAX_FILER_CACHE_IDX
)
1234 gfar_copy_filer_entries(&(tab
->fe
[begin
+ length
]), &(tab
->fe
[begin
]),
1235 tab
->index
- length
+ 1);
1237 tab
->index
+= length
;
1241 static int gfar_get_next_cluster_start(int start
, struct filer_table
*tab
)
1243 for (; (start
< tab
->index
) && (start
< MAX_FILER_CACHE_IDX
- 1);
1245 if ((tab
->fe
[start
].ctrl
& (RQFCR_AND
| RQFCR_CLE
)) ==
1246 (RQFCR_AND
| RQFCR_CLE
))
1252 static int gfar_get_next_cluster_end(int start
, struct filer_table
*tab
)
1254 for (; (start
< tab
->index
) && (start
< MAX_FILER_CACHE_IDX
- 1);
1256 if ((tab
->fe
[start
].ctrl
& (RQFCR_AND
| RQFCR_CLE
)) ==
1263 /* Uses hardwares clustering option to reduce
1264 * the number of filer table entries
1266 static void gfar_cluster_filer(struct filer_table
*tab
)
1268 s32 i
= -1, j
, iend
, jend
;
1270 while ((i
= gfar_get_next_cluster_start(++i
, tab
)) != -1) {
1272 while ((j
= gfar_get_next_cluster_start(++j
, tab
)) != -1) {
1273 /* The cluster entries self and the previous one
1274 * (a mask) must be identical!
1276 if (tab
->fe
[i
].ctrl
!= tab
->fe
[j
].ctrl
)
1278 if (tab
->fe
[i
].prop
!= tab
->fe
[j
].prop
)
1280 if (tab
->fe
[i
- 1].ctrl
!= tab
->fe
[j
- 1].ctrl
)
1282 if (tab
->fe
[i
- 1].prop
!= tab
->fe
[j
- 1].prop
)
1284 iend
= gfar_get_next_cluster_end(i
, tab
);
1285 jend
= gfar_get_next_cluster_end(j
, tab
);
1286 if (jend
== -1 || iend
== -1)
1289 /* First we make some free space, where our cluster
1290 * element should be. Then we copy it there and finally
1291 * delete in from its old location.
1293 if (gfar_expand_filer_entries(iend
, (jend
- j
), tab
) ==
1297 gfar_copy_filer_entries(&(tab
->fe
[iend
+ 1]),
1298 &(tab
->fe
[jend
+ 1]), jend
- j
);
1300 if (gfar_trim_filer_entries(jend
- 1,
1305 /* Mask out cluster bit */
1306 tab
->fe
[iend
].ctrl
&= ~(RQFCR_CLE
);
1311 /* Swaps the masked bits of a1<>a2 and b1<>b2 */
1312 static void gfar_swap_bits(struct gfar_filer_entry
*a1
,
1313 struct gfar_filer_entry
*a2
,
1314 struct gfar_filer_entry
*b1
,
1315 struct gfar_filer_entry
*b2
, u32 mask
)
1318 temp
[0] = a1
->ctrl
& mask
;
1319 temp
[1] = a2
->ctrl
& mask
;
1320 temp
[2] = b1
->ctrl
& mask
;
1321 temp
[3] = b2
->ctrl
& mask
;
1328 a1
->ctrl
|= temp
[1];
1329 a2
->ctrl
|= temp
[0];
1330 b1
->ctrl
|= temp
[3];
1331 b2
->ctrl
|= temp
[2];
1334 /* Generate a list consisting of masks values with their start and
1335 * end of validity and block as indicator for parts belonging
1336 * together (glued by ANDs) in mask_table
1338 static u32
gfar_generate_mask_table(struct gfar_mask_entry
*mask_table
,
1339 struct filer_table
*tab
)
1341 u32 i
, and_index
= 0, block_index
= 1;
1343 for (i
= 0; i
< tab
->index
; i
++) {
1345 /* LSByte of control = 0 sets a mask */
1346 if (!(tab
->fe
[i
].ctrl
& 0xF)) {
1347 mask_table
[and_index
].mask
= tab
->fe
[i
].prop
;
1348 mask_table
[and_index
].start
= i
;
1349 mask_table
[and_index
].block
= block_index
;
1351 mask_table
[and_index
- 1].end
= i
- 1;
1354 /* cluster starts and ends will be separated because they should
1355 * hold their position
1357 if (tab
->fe
[i
].ctrl
& RQFCR_CLE
)
1359 /* A not set AND indicates the end of a depended block */
1360 if (!(tab
->fe
[i
].ctrl
& RQFCR_AND
))
1364 mask_table
[and_index
- 1].end
= i
- 1;
1369 /* Sorts the entries of mask_table by the values of the masks.
1370 * Important: The 0xFF80 flags of the first and last entry of a
1371 * block must hold their position (which queue, CLusterEnable, ReJEct,
1374 static void gfar_sort_mask_table(struct gfar_mask_entry
*mask_table
,
1375 struct filer_table
*temp_table
, u32 and_index
)
1377 /* Pointer to compare function (_asc or _desc) */
1378 int (*gfar_comp
)(const void *, const void *);
1380 u32 i
, size
= 0, start
= 0, prev
= 1;
1381 u32 old_first
, old_last
, new_first
, new_last
;
1383 gfar_comp
= &gfar_comp_desc
;
1385 for (i
= 0; i
< and_index
; i
++) {
1386 if (prev
!= mask_table
[i
].block
) {
1387 old_first
= mask_table
[start
].start
+ 1;
1388 old_last
= mask_table
[i
- 1].end
;
1389 sort(mask_table
+ start
, size
,
1390 sizeof(struct gfar_mask_entry
),
1391 gfar_comp
, &gfar_swap
);
1393 /* Toggle order for every block. This makes the
1394 * thing more efficient!
1396 if (gfar_comp
== gfar_comp_desc
)
1397 gfar_comp
= &gfar_comp_asc
;
1399 gfar_comp
= &gfar_comp_desc
;
1401 new_first
= mask_table
[start
].start
+ 1;
1402 new_last
= mask_table
[i
- 1].end
;
1404 gfar_swap_bits(&temp_table
->fe
[new_first
],
1405 &temp_table
->fe
[old_first
],
1406 &temp_table
->fe
[new_last
],
1407 &temp_table
->fe
[old_last
],
1408 RQFCR_QUEUE
| RQFCR_CLE
|
1409 RQFCR_RJE
| RQFCR_AND
);
1415 prev
= mask_table
[i
].block
;
1419 /* Reduces the number of masks needed in the filer table to save entries
1420 * This is done by sorting the masks of a depended block. A depended block is
1421 * identified by gluing ANDs or CLE. The sorting order toggles after every
1422 * block. Of course entries in scope of a mask must change their location with
1425 static int gfar_optimize_filer_masks(struct filer_table
*tab
)
1427 struct filer_table
*temp_table
;
1428 struct gfar_mask_entry
*mask_table
;
1430 u32 and_index
= 0, previous_mask
= 0, i
= 0, j
= 0, size
= 0;
1433 /* We need a copy of the filer table because
1434 * we want to change its order
1436 temp_table
= kmemdup(tab
, sizeof(*temp_table
), GFP_KERNEL
);
1437 if (temp_table
== NULL
)
1440 mask_table
= kcalloc(MAX_FILER_CACHE_IDX
/ 2 + 1,
1441 sizeof(struct gfar_mask_entry
), GFP_KERNEL
);
1443 if (mask_table
== NULL
) {
1448 and_index
= gfar_generate_mask_table(mask_table
, tab
);
1450 gfar_sort_mask_table(mask_table
, temp_table
, and_index
);
1452 /* Now we can copy the data from our duplicated filer table to
1453 * the real one in the order the mask table says
1455 for (i
= 0; i
< and_index
; i
++) {
1456 size
= mask_table
[i
].end
- mask_table
[i
].start
+ 1;
1457 gfar_copy_filer_entries(&(tab
->fe
[j
]),
1458 &(temp_table
->fe
[mask_table
[i
].start
]), size
);
1462 /* And finally we just have to check for duplicated masks and drop the
1465 for (i
= 0; i
< tab
->index
&& i
< MAX_FILER_CACHE_IDX
; i
++) {
1466 if (tab
->fe
[i
].ctrl
== 0x80) {
1467 previous_mask
= i
++;
1471 for (; i
< tab
->index
&& i
< MAX_FILER_CACHE_IDX
; i
++) {
1472 if (tab
->fe
[i
].ctrl
== 0x80) {
1473 if (tab
->fe
[i
].prop
== tab
->fe
[previous_mask
].prop
) {
1474 /* Two identical ones found!
1475 * So drop the second one!
1477 gfar_trim_filer_entries(i
, i
, tab
);
1479 /* Not identical! */
1485 end
: kfree(temp_table
);
1489 /* Write the bit-pattern from software's buffer to hardware registers */
1490 static int gfar_write_filer_table(struct gfar_private
*priv
,
1491 struct filer_table
*tab
)
1494 if (tab
->index
> MAX_FILER_IDX
- 1)
1497 /* Avoid inconsistent filer table to be processed */
1500 /* Fill regular entries */
1501 for (; i
< MAX_FILER_IDX
- 1 && (tab
->fe
[i
].ctrl
| tab
->fe
[i
].ctrl
);
1503 gfar_write_filer(priv
, i
, tab
->fe
[i
].ctrl
, tab
->fe
[i
].prop
);
1504 /* Fill the rest with fall-troughs */
1505 for (; i
< MAX_FILER_IDX
- 1; i
++)
1506 gfar_write_filer(priv
, i
, 0x60, 0xFFFFFFFF);
1507 /* Last entry must be default accept
1508 * because that's what people expect
1510 gfar_write_filer(priv
, i
, 0x20, 0x0);
1517 static int gfar_check_capability(struct ethtool_rx_flow_spec
*flow
,
1518 struct gfar_private
*priv
)
1521 if (flow
->flow_type
& FLOW_EXT
) {
1522 if (~flow
->m_ext
.data
[0] || ~flow
->m_ext
.data
[1])
1523 netdev_warn(priv
->ndev
,
1524 "User-specific data not supported!\n");
1525 if (~flow
->m_ext
.vlan_etype
)
1526 netdev_warn(priv
->ndev
,
1527 "VLAN-etype not supported!\n");
1529 if (flow
->flow_type
== IP_USER_FLOW
)
1530 if (flow
->h_u
.usr_ip4_spec
.ip_ver
!= ETH_RX_NFC_IP4
)
1531 netdev_warn(priv
->ndev
,
1532 "IP-Version differing from IPv4 not supported!\n");
1537 static int gfar_process_filer_changes(struct gfar_private
*priv
)
1539 struct ethtool_flow_spec_container
*j
;
1540 struct filer_table
*tab
;
1544 /* So index is set to zero, too! */
1545 tab
= kzalloc(sizeof(*tab
), GFP_KERNEL
);
1549 /* Now convert the existing filer data from flow_spec into
1550 * filer tables binary format
1552 list_for_each_entry(j
, &priv
->rx_list
.list
, list
) {
1553 ret
= gfar_convert_to_filer(&j
->fs
, tab
);
1554 if (ret
== -EBUSY
) {
1555 netdev_err(priv
->ndev
,
1556 "Rule not added: No free space!\n");
1560 netdev_err(priv
->ndev
,
1561 "Rule not added: Unsupported Flow-type!\n");
1568 /* Optimizations to save entries */
1569 gfar_cluster_filer(tab
);
1570 gfar_optimize_filer_masks(tab
);
1572 pr_debug("\n\tSummary:\n"
1573 "\tData on hardware: %d\n"
1574 "\tCompression rate: %d%%\n",
1575 tab
->index
, 100 - (100 * tab
->index
) / i
);
1577 /* Write everything to hardware */
1578 ret
= gfar_write_filer_table(priv
, tab
);
1579 if (ret
== -EBUSY
) {
1580 netdev_err(priv
->ndev
, "Rule not added: No free space!\n");
1589 static void gfar_invert_masks(struct ethtool_rx_flow_spec
*flow
)
1593 for (i
= 0; i
< sizeof(flow
->m_u
); i
++)
1594 flow
->m_u
.hdata
[i
] ^= 0xFF;
1596 flow
->m_ext
.vlan_etype
^= 0xFFFF;
1597 flow
->m_ext
.vlan_tci
^= 0xFFFF;
1598 flow
->m_ext
.data
[0] ^= ~0;
1599 flow
->m_ext
.data
[1] ^= ~0;
1602 static int gfar_add_cls(struct gfar_private
*priv
,
1603 struct ethtool_rx_flow_spec
*flow
)
1605 struct ethtool_flow_spec_container
*temp
, *comp
;
1608 temp
= kmalloc(sizeof(*temp
), GFP_KERNEL
);
1611 memcpy(&temp
->fs
, flow
, sizeof(temp
->fs
));
1613 gfar_invert_masks(&temp
->fs
);
1614 ret
= gfar_check_capability(&temp
->fs
, priv
);
1617 /* Link in the new element at the right @location */
1618 if (list_empty(&priv
->rx_list
.list
)) {
1619 ret
= gfar_check_filer_hardware(priv
);
1622 list_add(&temp
->list
, &priv
->rx_list
.list
);
1625 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1626 if (comp
->fs
.location
> flow
->location
) {
1627 list_add_tail(&temp
->list
, &comp
->list
);
1630 if (comp
->fs
.location
== flow
->location
) {
1631 netdev_err(priv
->ndev
,
1632 "Rule not added: ID %d not free!\n",
1638 list_add_tail(&temp
->list
, &priv
->rx_list
.list
);
1642 ret
= gfar_process_filer_changes(priv
);
1645 priv
->rx_list
.count
++;
1649 list_del(&temp
->list
);
1655 static int gfar_del_cls(struct gfar_private
*priv
, u32 loc
)
1657 struct ethtool_flow_spec_container
*comp
;
1660 if (list_empty(&priv
->rx_list
.list
))
1663 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1664 if (comp
->fs
.location
== loc
) {
1665 list_del(&comp
->list
);
1667 priv
->rx_list
.count
--;
1668 gfar_process_filer_changes(priv
);
1677 static int gfar_get_cls(struct gfar_private
*priv
, struct ethtool_rxnfc
*cmd
)
1679 struct ethtool_flow_spec_container
*comp
;
1682 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1683 if (comp
->fs
.location
== cmd
->fs
.location
) {
1684 memcpy(&cmd
->fs
, &comp
->fs
, sizeof(cmd
->fs
));
1685 gfar_invert_masks(&cmd
->fs
);
1694 static int gfar_get_cls_all(struct gfar_private
*priv
,
1695 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
1697 struct ethtool_flow_spec_container
*comp
;
1700 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1701 if (i
== cmd
->rule_cnt
)
1703 rule_locs
[i
] = comp
->fs
.location
;
1707 cmd
->data
= MAX_FILER_IDX
;
1713 static int gfar_set_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1715 struct gfar_private
*priv
= netdev_priv(dev
);
1718 mutex_lock(&priv
->rx_queue_access
);
1722 ret
= gfar_set_hash_opts(priv
, cmd
);
1724 case ETHTOOL_SRXCLSRLINS
:
1725 if ((cmd
->fs
.ring_cookie
!= RX_CLS_FLOW_DISC
&&
1726 cmd
->fs
.ring_cookie
>= priv
->num_rx_queues
) ||
1727 cmd
->fs
.location
>= MAX_FILER_IDX
) {
1731 ret
= gfar_add_cls(priv
, &cmd
->fs
);
1733 case ETHTOOL_SRXCLSRLDEL
:
1734 ret
= gfar_del_cls(priv
, cmd
->fs
.location
);
1740 mutex_unlock(&priv
->rx_queue_access
);
1745 static int gfar_get_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1748 struct gfar_private
*priv
= netdev_priv(dev
);
1752 case ETHTOOL_GRXRINGS
:
1753 cmd
->data
= priv
->num_rx_queues
;
1755 case ETHTOOL_GRXCLSRLCNT
:
1756 cmd
->rule_cnt
= priv
->rx_list
.count
;
1758 case ETHTOOL_GRXCLSRULE
:
1759 ret
= gfar_get_cls(priv
, cmd
);
1761 case ETHTOOL_GRXCLSRLALL
:
1762 ret
= gfar_get_cls_all(priv
, cmd
, rule_locs
);
1772 int gfar_phc_index
= -1;
1773 EXPORT_SYMBOL(gfar_phc_index
);
1775 static int gfar_get_ts_info(struct net_device
*dev
,
1776 struct ethtool_ts_info
*info
)
1778 struct gfar_private
*priv
= netdev_priv(dev
);
1780 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)) {
1781 info
->so_timestamping
= SOF_TIMESTAMPING_RX_SOFTWARE
|
1782 SOF_TIMESTAMPING_SOFTWARE
;
1783 info
->phc_index
= -1;
1786 info
->so_timestamping
= SOF_TIMESTAMPING_TX_HARDWARE
|
1787 SOF_TIMESTAMPING_RX_HARDWARE
|
1788 SOF_TIMESTAMPING_RAW_HARDWARE
;
1789 info
->phc_index
= gfar_phc_index
;
1790 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
1791 (1 << HWTSTAMP_TX_ON
);
1792 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
1793 (1 << HWTSTAMP_FILTER_ALL
);
1797 const struct ethtool_ops gfar_ethtool_ops
= {
1798 .get_settings
= gfar_gsettings
,
1799 .set_settings
= gfar_ssettings
,
1800 .get_drvinfo
= gfar_gdrvinfo
,
1801 .get_regs_len
= gfar_reglen
,
1802 .get_regs
= gfar_get_regs
,
1803 .get_link
= ethtool_op_get_link
,
1804 .get_coalesce
= gfar_gcoalesce
,
1805 .set_coalesce
= gfar_scoalesce
,
1806 .get_ringparam
= gfar_gringparam
,
1807 .set_ringparam
= gfar_sringparam
,
1808 .get_strings
= gfar_gstrings
,
1809 .get_sset_count
= gfar_sset_count
,
1810 .get_ethtool_stats
= gfar_fill_stats
,
1811 .get_msglevel
= gfar_get_msglevel
,
1812 .set_msglevel
= gfar_set_msglevel
,
1814 .get_wol
= gfar_get_wol
,
1815 .set_wol
= gfar_set_wol
,
1817 .set_rxnfc
= gfar_set_nfc
,
1818 .get_rxnfc
= gfar_get_nfc
,
1819 .get_ts_info
= gfar_get_ts_info
,