1 /*********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 *********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/init.h>
31 #include <linux/etherdevice.h>
33 #include <linux/string.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/seq_file.h>
37 #include <linux/proc_fs.h>
40 #include <linux/xfrm.h>
42 #endif /* CONFIG_XFRM */
44 #include <asm/atomic.h>
46 #include <asm/octeon/octeon.h>
48 #include "ethernet-defines.h"
49 #include "octeon-ethernet.h"
50 #include "ethernet-util.h"
55 #include "cvmx-helper.h"
57 #include "cvmx-gmxx-defs.h"
60 * You can define GET_SKBUFF_QOS() to override how the skbuff output
61 * function determines which output queue is used. The default
62 * implementation always uses the base queue for the port. If, for
63 * example, you wanted to use the skb->priority fieid, define
64 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
66 #ifndef GET_SKBUFF_QOS
67 #define GET_SKBUFF_QOS(skb) 0
73 * @skb: Packet to send
74 * @dev: Device info structure
75 * Returns Always returns zero
77 int cvm_oct_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
79 cvmx_pko_command_word0_t pko_command
;
80 union cvmx_buf_ptr hw_buffer
;
82 uint64_t old_scratch2
;
85 struct octeon_ethernet
*priv
= netdev_priv(dev
);
87 int32_t buffers_to_free
;
88 #if REUSE_SKBUFFS_WITHOUT_FREE
89 unsigned char *fpa_head
;
93 * Prefetch the private data structure. It is larger that one
98 /* Start off assuming no drop */
102 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
103 * completely remove "qos" in the event neither interface
104 * supports multiple queues per port.
106 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0
> 1) ||
107 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1
> 1)) {
108 qos
= GET_SKBUFF_QOS(skb
);
111 else if (qos
>= cvmx_pko_get_num_queues(priv
->port
))
116 if (USE_ASYNC_IOBDMA
) {
117 /* Save scratch in case userspace is using it */
119 old_scratch
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
120 old_scratch2
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
+ 8);
123 * Assume we're going to be able t osend this
124 * packet. Fetch and increment the number of pending
125 * packets for output.
127 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH
+ 8,
128 FAU_NUM_PACKET_BUFFERS_TO_FREE
,
130 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH
,
131 priv
->fau
+ qos
* 4, 1);
135 * The CN3XXX series of parts has an errata (GMX-401) which
136 * causes the GMX block to hang if a collision occurs towards
137 * the end of a <68 byte packet. As a workaround for this, we
138 * pad packets to be 68 bytes whenever we are in half duplex
139 * mode. We don't handle the case of having a small packet but
140 * no room to add the padding. The kernel should always give
141 * us at least a cache line
143 if ((skb
->len
< 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX
)) {
144 union cvmx_gmxx_prtx_cfg gmx_prt_cfg
;
145 int interface
= INTERFACE(priv
->port
);
146 int index
= INDEX(priv
->port
);
149 /* We only need to pad packet in half duplex mode */
151 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
152 if (gmx_prt_cfg
.s
.duplex
== 0) {
153 int add_bytes
= 64 - skb
->len
;
154 if ((skb_tail_pointer(skb
) + add_bytes
) <=
155 skb_end_pointer(skb
))
156 memset(__skb_put(skb
, add_bytes
), 0,
162 /* Build the PKO buffer pointer */
164 hw_buffer
.s
.addr
= cvmx_ptr_to_phys(skb
->data
);
165 hw_buffer
.s
.pool
= 0;
167 (unsigned long)skb_end_pointer(skb
) - (unsigned long)skb
->head
;
169 /* Build the PKO command */
171 pko_command
.s
.n2
= 1; /* Don't pollute L2 with the outgoing packet */
172 pko_command
.s
.segs
= 1;
173 pko_command
.s
.total_bytes
= skb
->len
;
174 pko_command
.s
.size0
= CVMX_FAU_OP_SIZE_32
;
175 pko_command
.s
.subone0
= 1;
177 pko_command
.s
.dontfree
= 1;
178 pko_command
.s
.reg0
= priv
->fau
+ qos
* 4;
180 * See if we can put this skb in the FPA pool. Any strange
181 * behavior from the Linux networking stack will most likely
182 * be caused by a bug in the following code. If some field is
183 * in use by the network stack and get carried over when a
184 * buffer is reused, bad thing may happen. If in doubt and
185 * you dont need the absolute best performance, disable the
186 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
187 * shown a 25% increase in performance under some loads.
189 #if REUSE_SKBUFFS_WITHOUT_FREE
190 fpa_head
= skb
->head
+ 128 - ((unsigned long)skb
->head
& 0x7f);
191 if (unlikely(skb
->data
< fpa_head
)) {
193 * printk("TX buffer beginning can't meet FPA
194 * alignment constraints\n");
196 goto dont_put_skbuff_in_hw
;
199 ((skb_end_pointer(skb
) - fpa_head
) < CVMX_FPA_PACKET_POOL_SIZE
)) {
201 printk("TX buffer isn't large enough for the FPA\n");
203 goto dont_put_skbuff_in_hw
;
205 if (unlikely(skb_shared(skb
))) {
207 printk("TX buffer sharing data with someone else\n");
209 goto dont_put_skbuff_in_hw
;
211 if (unlikely(skb_cloned(skb
))) {
213 printk("TX buffer has been cloned\n");
215 goto dont_put_skbuff_in_hw
;
217 if (unlikely(skb_header_cloned(skb
))) {
219 printk("TX buffer header has been cloned\n");
221 goto dont_put_skbuff_in_hw
;
223 if (unlikely(skb
->destructor
)) {
225 printk("TX buffer has a destructor\n");
227 goto dont_put_skbuff_in_hw
;
229 if (unlikely(skb_shinfo(skb
)->nr_frags
)) {
231 printk("TX buffer has fragments\n");
233 goto dont_put_skbuff_in_hw
;
237 sizeof(*skb
) + skb_end_pointer(skb
) - skb
->head
)) {
239 printk("TX buffer truesize has been changed\n");
241 goto dont_put_skbuff_in_hw
;
245 * We can use this buffer in the FPA. We don't need the FAU
248 pko_command
.s
.reg0
= 0;
249 pko_command
.s
.dontfree
= 0;
251 hw_buffer
.s
.back
= (skb
->data
- fpa_head
) >> 7;
252 *(struct sk_buff
**)(fpa_head
- sizeof(void *)) = skb
;
255 * The skbuff will be reused without ever being freed. We must
256 * cleanup a bunch of Linux stuff.
258 dst_release(skb
->dst
);
261 secpath_put(skb
->sp
);
266 #ifdef CONFIG_NET_SCHED
268 #ifdef CONFIG_NET_CLS_ACT
270 #endif /* CONFIG_NET_CLS_ACT */
271 #endif /* CONFIG_NET_SCHED */
273 dont_put_skbuff_in_hw
:
274 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
276 /* Check if we can use the hardware checksumming */
277 if (USE_HW_TCPUDP_CHECKSUM
&& (skb
->protocol
== htons(ETH_P_IP
)) &&
278 (ip_hdr(skb
)->version
== 4) && (ip_hdr(skb
)->ihl
== 5) &&
279 ((ip_hdr(skb
)->frag_off
== 0) || (ip_hdr(skb
)->frag_off
== 1 << 14))
280 && ((ip_hdr(skb
)->protocol
== IP_PROTOCOL_TCP
)
281 || (ip_hdr(skb
)->protocol
== IP_PROTOCOL_UDP
))) {
282 /* Use hardware checksum calc */
283 pko_command
.s
.ipoffp1
= sizeof(struct ethhdr
) + 1;
286 if (USE_ASYNC_IOBDMA
) {
287 /* Get the number of skbuffs in use by the hardware */
289 in_use
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
290 buffers_to_free
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
+ 8);
292 /* Get the number of skbuffs in use by the hardware */
293 in_use
= cvmx_fau_fetch_and_add32(priv
->fau
+ qos
* 4, 1);
295 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
299 * If we're sending faster than the receive can free them then
300 * don't do the HW free.
302 if ((buffers_to_free
< -100) && !pko_command
.s
.dontfree
) {
303 pko_command
.s
.dontfree
= 1;
304 pko_command
.s
.reg0
= priv
->fau
+ qos
* 4;
307 cvmx_pko_send_packet_prepare(priv
->port
, priv
->queue
+ qos
,
308 CVMX_PKO_LOCK_CMD_QUEUE
);
310 /* Drop this packet if we have too many already queued to the HW */
312 (skb_queue_len(&priv
->tx_free_list
[qos
]) >= MAX_OUT_QUEUE_DEPTH
)) {
314 DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name);
318 /* Send the packet to the output queue */
320 (cvmx_pko_send_packet_finish
321 (priv
->port
, priv
->queue
+ qos
, pko_command
, hw_buffer
,
322 CVMX_PKO_LOCK_CMD_QUEUE
))) {
323 DEBUGPRINT("%s: Failed to send the packet\n", dev
->name
);
327 if (USE_ASYNC_IOBDMA
) {
328 /* Restore the scratch area */
329 cvmx_scratch_write64(CVMX_SCR_SCRATCH
, old_scratch
);
330 cvmx_scratch_write64(CVMX_SCR_SCRATCH
+ 8, old_scratch2
);
333 if (unlikely(dropped
)) {
334 dev_kfree_skb_any(skb
);
335 cvmx_fau_atomic_add32(priv
->fau
+ qos
* 4, -1);
336 priv
->stats
.tx_dropped
++;
338 if (USE_SKBUFFS_IN_HW
) {
339 /* Put this packet on the queue to be freed later */
340 if (pko_command
.s
.dontfree
)
341 skb_queue_tail(&priv
->tx_free_list
[qos
], skb
);
343 cvmx_fau_atomic_add32
344 (FAU_NUM_PACKET_BUFFERS_TO_FREE
, -1);
345 cvmx_fau_atomic_add32(priv
->fau
+ qos
* 4, -1);
348 /* Put this packet on the queue to be freed later */
349 skb_queue_tail(&priv
->tx_free_list
[qos
], skb
);
353 /* Free skbuffs not in use by the hardware, possibly two at a time */
354 if (skb_queue_len(&priv
->tx_free_list
[qos
]) > in_use
) {
355 spin_lock(&priv
->tx_free_list
[qos
].lock
);
357 * Check again now that we have the lock. It might
360 if (skb_queue_len(&priv
->tx_free_list
[qos
]) > in_use
)
361 dev_kfree_skb(__skb_dequeue(&priv
->tx_free_list
[qos
]));
362 if (skb_queue_len(&priv
->tx_free_list
[qos
]) > in_use
)
363 dev_kfree_skb(__skb_dequeue(&priv
->tx_free_list
[qos
]));
364 spin_unlock(&priv
->tx_free_list
[qos
].lock
);
371 * Packet transmit to the POW
373 * @skb: Packet to send
374 * @dev: Device info structure
375 * Returns Always returns zero
377 int cvm_oct_xmit_pow(struct sk_buff
*skb
, struct net_device
*dev
)
379 struct octeon_ethernet
*priv
= netdev_priv(dev
);
383 /* Get a work queue entry */
384 cvmx_wqe_t
*work
= cvmx_fpa_alloc(CVMX_FPA_WQE_POOL
);
385 if (unlikely(work
== NULL
)) {
386 DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
388 priv
->stats
.tx_dropped
++;
393 /* Get a packet buffer */
394 packet_buffer
= cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL
);
395 if (unlikely(packet_buffer
== NULL
)) {
396 DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
398 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, DONT_WRITEBACK(1));
399 priv
->stats
.tx_dropped
++;
405 * Calculate where we need to copy the data to. We need to
406 * leave 8 bytes for a next pointer (unused). We also need to
407 * include any configure skip. Then we need to align the IP
408 * packet src and dest into the same 64bit word. The below
409 * calculation may add a little extra, but that doesn't
412 copy_location
= packet_buffer
+ sizeof(uint64_t);
413 copy_location
+= ((CVMX_HELPER_FIRST_MBUFF_SKIP
+ 7) & 0xfff8) + 6;
416 * We have to copy the packet since whoever processes this
417 * packet will free it to a hardware pool. We can't use the
418 * trick of counting outstanding packets like in
421 memcpy(copy_location
, skb
->data
, skb
->len
);
424 * Fill in some of the work queue fields. We may need to add
425 * more if the software at the other end needs them.
427 work
->hw_chksum
= skb
->csum
;
428 work
->len
= skb
->len
;
429 work
->ipprt
= priv
->port
;
430 work
->qos
= priv
->port
& 0x7;
431 work
->grp
= pow_send_group
;
432 work
->tag_type
= CVMX_HELPER_INPUT_TAG_TYPE
;
433 work
->tag
= pow_send_group
; /* FIXME */
434 /* Default to zero. Sets of zero later are commented out */
436 work
->word2
.s
.bufs
= 1;
437 work
->packet_ptr
.u64
= 0;
438 work
->packet_ptr
.s
.addr
= cvmx_ptr_to_phys(copy_location
);
439 work
->packet_ptr
.s
.pool
= CVMX_FPA_PACKET_POOL
;
440 work
->packet_ptr
.s
.size
= CVMX_FPA_PACKET_POOL_SIZE
;
441 work
->packet_ptr
.s
.back
= (copy_location
- packet_buffer
) >> 7;
443 if (skb
->protocol
== htons(ETH_P_IP
)) {
444 work
->word2
.s
.ip_offset
= 14;
446 work
->word2
.s
.vlan_valid
= 0; /* FIXME */
447 work
->word2
.s
.vlan_cfi
= 0; /* FIXME */
448 work
->word2
.s
.vlan_id
= 0; /* FIXME */
449 work
->word2
.s
.dec_ipcomp
= 0; /* FIXME */
451 work
->word2
.s
.tcp_or_udp
=
452 (ip_hdr(skb
)->protocol
== IP_PROTOCOL_TCP
)
453 || (ip_hdr(skb
)->protocol
== IP_PROTOCOL_UDP
);
456 work
->word2
.s
.dec_ipsec
= 0;
457 /* We only support IPv4 right now */
458 work
->word2
.s
.is_v6
= 0;
459 /* Hardware would set to zero */
460 work
->word2
.s
.software
= 0;
461 /* No error, packet is internal */
462 work
->word2
.s
.L4_error
= 0;
464 work
->word2
.s
.is_frag
= !((ip_hdr(skb
)->frag_off
== 0)
465 || (ip_hdr(skb
)->frag_off
==
468 /* Assume Linux is sending a good packet */
469 work
->word2
.s
.IP_exc
= 0;
471 work
->word2
.s
.is_bcast
= (skb
->pkt_type
== PACKET_BROADCAST
);
472 work
->word2
.s
.is_mcast
= (skb
->pkt_type
== PACKET_MULTICAST
);
474 /* This is an IP packet */
475 work
->word2
.s
.not_IP
= 0;
476 /* No error, packet is internal */
477 work
->word2
.s
.rcv_error
= 0;
478 /* No error, packet is internal */
479 work
->word2
.s
.err_code
= 0;
483 * When copying the data, include 4 bytes of the
484 * ethernet header to align the same way hardware
487 memcpy(work
->packet_data
, skb
->data
+ 10,
488 sizeof(work
->packet_data
));
491 work
->word2
.snoip
.vlan_valid
= 0; /* FIXME */
492 work
->word2
.snoip
.vlan_cfi
= 0; /* FIXME */
493 work
->word2
.snoip
.vlan_id
= 0; /* FIXME */
494 work
->word2
.snoip
.software
= 0; /* Hardware would set to zero */
496 work
->word2
.snoip
.is_rarp
= skb
->protocol
== htons(ETH_P_RARP
);
497 work
->word2
.snoip
.is_arp
= skb
->protocol
== htons(ETH_P_ARP
);
498 work
->word2
.snoip
.is_bcast
=
499 (skb
->pkt_type
== PACKET_BROADCAST
);
500 work
->word2
.snoip
.is_mcast
=
501 (skb
->pkt_type
== PACKET_MULTICAST
);
502 work
->word2
.snoip
.not_IP
= 1; /* IP was done up above */
504 /* No error, packet is internal */
505 work
->word2
.snoip
.rcv_error
= 0;
506 /* No error, packet is internal */
507 work
->word2
.snoip
.err_code
= 0;
509 memcpy(work
->packet_data
, skb
->data
, sizeof(work
->packet_data
));
512 /* Submit the packet to the POW */
513 cvmx_pow_work_submit(work
, work
->tag
, work
->tag_type
, work
->qos
,
515 priv
->stats
.tx_packets
++;
516 priv
->stats
.tx_bytes
+= skb
->len
;
522 * Transmit a work queue entry out of the ethernet port. Both
523 * the work queue entry and the packet data can optionally be
524 * freed. The work will be freed on error as well.
526 * @dev: Device to transmit out.
528 * Work queue entry to send
529 * @do_free: True if the work queue entry and packet data should be
530 * freed. If false, neither will be freed.
531 * @qos: Index into the queues for this port to transmit on. This
532 * is used to implement QoS if their are multiple queues per
533 * port. This parameter must be between 0 and the number of
534 * queues per port minus 1. Values outside of this range will
537 * Returns Zero on success, negative on failure.
539 int cvm_oct_transmit_qos(struct net_device
*dev
, void *work_queue_entry
,
540 int do_free
, int qos
)
543 union cvmx_buf_ptr hw_buffer
;
544 cvmx_pko_command_word0_t pko_command
;
546 struct octeon_ethernet
*priv
= netdev_priv(dev
);
547 cvmx_wqe_t
*work
= work_queue_entry
;
549 if (!(dev
->flags
& IFF_UP
)) {
550 DEBUGPRINT("%s: Device not up\n", dev
->name
);
552 cvm_oct_free_work(work
);
556 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
557 remove "qos" in the event neither interface supports
558 multiple queues per port */
559 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0
> 1) ||
560 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1
> 1)) {
563 else if (qos
>= cvmx_pko_get_num_queues(priv
->port
))
568 /* Start off assuming no drop */
571 local_irq_save(flags
);
572 cvmx_pko_send_packet_prepare(priv
->port
, priv
->queue
+ qos
,
573 CVMX_PKO_LOCK_CMD_QUEUE
);
575 /* Build the PKO buffer pointer */
577 hw_buffer
.s
.addr
= work
->packet_ptr
.s
.addr
;
578 hw_buffer
.s
.pool
= CVMX_FPA_PACKET_POOL
;
579 hw_buffer
.s
.size
= CVMX_FPA_PACKET_POOL_SIZE
;
580 hw_buffer
.s
.back
= work
->packet_ptr
.s
.back
;
582 /* Build the PKO command */
584 pko_command
.s
.n2
= 1; /* Don't pollute L2 with the outgoing packet */
585 pko_command
.s
.dontfree
= !do_free
;
586 pko_command
.s
.segs
= work
->word2
.s
.bufs
;
587 pko_command
.s
.total_bytes
= work
->len
;
589 /* Check if we can use the hardware checksumming */
590 if (unlikely(work
->word2
.s
.not_IP
|| work
->word2
.s
.IP_exc
))
591 pko_command
.s
.ipoffp1
= 0;
593 pko_command
.s
.ipoffp1
= sizeof(struct ethhdr
) + 1;
595 /* Send the packet to the output queue */
597 (cvmx_pko_send_packet_finish
598 (priv
->port
, priv
->queue
+ qos
, pko_command
, hw_buffer
,
599 CVMX_PKO_LOCK_CMD_QUEUE
))) {
600 DEBUGPRINT("%s: Failed to send the packet\n", dev
->name
);
603 local_irq_restore(flags
);
605 if (unlikely(dropped
)) {
607 cvm_oct_free_work(work
);
608 priv
->stats
.tx_dropped
++;
610 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, DONT_WRITEBACK(1));
614 EXPORT_SYMBOL(cvm_oct_transmit_qos
);
617 * This function frees all skb that are currenty queued for TX.
619 * @dev: Device being shutdown
621 void cvm_oct_tx_shutdown(struct net_device
*dev
)
623 struct octeon_ethernet
*priv
= netdev_priv(dev
);
627 for (qos
= 0; qos
< 16; qos
++) {
628 spin_lock_irqsave(&priv
->tx_free_list
[qos
].lock
, flags
);
629 while (skb_queue_len(&priv
->tx_free_list
[qos
]))
630 dev_kfree_skb_any(__skb_dequeue
631 (&priv
->tx_free_list
[qos
]));
632 spin_unlock_irqrestore(&priv
->tx_free_list
[qos
].lock
, flags
);