2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
49 #define UETH__VERSION "29-May-2008"
51 #define GETHER_NAPI_WEIGHT 32
54 /* lock is held while accessing port_usb
57 struct gether
*port_usb
;
59 struct net_device
*net
;
60 struct usb_gadget
*gadget
;
62 spinlock_t req_lock
; /* guard {rx,tx}_reqs */
63 struct list_head tx_reqs
, rx_reqs
;
66 struct sk_buff_head rx_frames
;
71 struct sk_buff
*(*wrap
)(struct gether
*, struct sk_buff
*skb
);
72 int (*unwrap
)(struct gether
*,
74 struct sk_buff_head
*list
);
76 struct work_struct work
;
77 struct napi_struct rx_napi
;
80 #define WORK_RX_MEMORY 0
83 u8 host_mac
[ETH_ALEN
];
87 /*-------------------------------------------------------------------------*/
89 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
91 #define DEFAULT_QLEN 2 /* double buffering by default */
93 /* for dual-speed hardware, use deeper queues at high/super speed */
94 static inline int qlen(struct usb_gadget
*gadget
, unsigned qmult
)
96 if (gadget_is_dualspeed(gadget
) && (gadget
->speed
== USB_SPEED_HIGH
||
97 gadget
->speed
== USB_SPEED_SUPER
))
98 return qmult
* DEFAULT_QLEN
;
103 /*-------------------------------------------------------------------------*/
105 /* REVISIT there must be a better way than having two sets
114 #define xprintk(d, level, fmt, args...) \
115 printk(level "%s: " fmt , (d)->net->name , ## args)
119 #define DBG(dev, fmt, args...) \
120 xprintk(dev , KERN_DEBUG , fmt , ## args)
122 #define DBG(dev, fmt, args...) \
129 #define VDBG(dev, fmt, args...) \
133 #define ERROR(dev, fmt, args...) \
134 xprintk(dev , KERN_ERR , fmt , ## args)
135 #define INFO(dev, fmt, args...) \
136 xprintk(dev , KERN_INFO , fmt , ## args)
138 /*-------------------------------------------------------------------------*/
140 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
142 static int ueth_change_mtu(struct net_device
*net
, int new_mtu
)
144 struct eth_dev
*dev
= netdev_priv(net
);
148 /* don't change MTU on "live" link (peer won't know) */
149 spin_lock_irqsave(&dev
->lock
, flags
);
152 else if (new_mtu
<= ETH_HLEN
|| new_mtu
> ETH_FRAME_LEN
)
156 spin_unlock_irqrestore(&dev
->lock
, flags
);
161 static void eth_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*p
)
163 struct eth_dev
*dev
= netdev_priv(net
);
165 strlcpy(p
->driver
, "g_ether", sizeof(p
->driver
));
166 strlcpy(p
->version
, UETH__VERSION
, sizeof(p
->version
));
167 strlcpy(p
->fw_version
, dev
->gadget
->name
, sizeof(p
->fw_version
));
168 strlcpy(p
->bus_info
, dev_name(&dev
->gadget
->dev
), sizeof(p
->bus_info
));
171 /* REVISIT can also support:
172 * - WOL (by tracking suspends and issuing remote wakeup)
173 * - msglevel (implies updated messaging)
174 * - ... probably more ethtool ops
177 static const struct ethtool_ops ops
= {
178 .get_drvinfo
= eth_get_drvinfo
,
179 .get_link
= ethtool_op_get_link
,
182 static void defer_kevent(struct eth_dev
*dev
, int flag
)
184 if (test_and_set_bit(flag
, &dev
->todo
))
186 if (!schedule_work(&dev
->work
))
187 ERROR(dev
, "kevent %d may have been dropped\n", flag
);
189 DBG(dev
, "kevent %d scheduled\n", flag
);
192 static void rx_complete(struct usb_ep
*ep
, struct usb_request
*req
);
195 rx_submit(struct eth_dev
*dev
, struct usb_request
*req
, gfp_t gfp_flags
)
198 int retval
= -ENOMEM
;
203 spin_lock_irqsave(&dev
->lock
, flags
);
205 out
= dev
->port_usb
->out_ep
;
208 spin_unlock_irqrestore(&dev
->lock
, flags
);
214 /* Padding up to RX_EXTRA handles minor disagreements with host.
215 * Normally we use the USB "terminate on short read" convention;
216 * so allow up to (N*maxpacket), since that memory is normally
217 * already allocated. Some hardware doesn't deal well with short
218 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
219 * byte off the end (to force hardware errors on overflow).
221 * RNDIS uses internal framing, and explicitly allows senders to
222 * pad to end-of-packet. That's potentially nice for speed, but
223 * means receivers can't recover lost synch on their own (because
224 * new packets don't only start after a short RX).
226 size
+= sizeof(struct ethhdr
) + dev
->net
->mtu
+ RX_EXTRA
;
227 size
+= dev
->port_usb
->header_len
;
228 size
+= out
->maxpacket
- 1;
229 size
-= size
% out
->maxpacket
;
231 if (dev
->port_usb
->is_fixed
)
232 size
= max_t(size_t, size
, dev
->port_usb
->fixed_out_len
);
234 skb
= alloc_skb(size
+ NET_IP_ALIGN
, gfp_flags
);
236 DBG(dev
, "no rx skb\n");
240 /* Some platforms perform better when IP packets are aligned,
241 * but on at least one, checksumming fails otherwise. Note:
242 * RNDIS headers involve variable numbers of LE32 values.
244 skb_reserve(skb
, NET_IP_ALIGN
);
246 req
->buf
= skb
->data
;
248 req
->complete
= rx_complete
;
251 retval
= usb_ep_queue(out
, req
, gfp_flags
);
252 if (retval
== -ENOMEM
)
254 defer_kevent(dev
, WORK_RX_MEMORY
);
256 DBG(dev
, "rx submit --> %d\n", retval
);
258 dev_kfree_skb_any(skb
);
263 static void rx_complete(struct usb_ep
*ep
, struct usb_request
*req
)
265 struct sk_buff
*skb
= req
->context
;
266 struct eth_dev
*dev
= ep
->driver_data
;
267 int status
= req
->status
;
272 /* normal completion */
274 skb_put(skb
, req
->actual
);
279 spin_lock_irqsave(&dev
->lock
, flags
);
281 status
= dev
->unwrap(dev
->port_usb
,
285 dev_kfree_skb_any(skb
);
288 spin_unlock_irqrestore(&dev
->lock
, flags
);
290 skb_queue_tail(&dev
->rx_frames
, skb
);
296 /* software-driven interface shutdown */
297 case -ECONNRESET
: /* unlink */
298 case -ESHUTDOWN
: /* disconnect etc */
299 VDBG(dev
, "rx shutdown, code %d\n", status
);
302 /* for hardware automagic (such as pxa) */
303 case -ECONNABORTED
: /* endpoint reset */
304 DBG(dev
, "rx %s reset\n", ep
->name
);
305 defer_kevent(dev
, WORK_RX_MEMORY
);
307 dev_kfree_skb_any(skb
);
312 dev
->net
->stats
.rx_over_errors
++;
317 dev_kfree_skb_any(skb
);
318 dev
->net
->stats
.rx_errors
++;
319 DBG(dev
, "rx status %d\n", status
);
324 spin_lock(&dev
->req_lock
);
325 list_add(&req
->list
, &dev
->rx_reqs
);
326 spin_unlock(&dev
->req_lock
);
328 if (rx_queue
&& likely(napi_schedule_prep(&dev
->rx_napi
)))
329 __napi_schedule(&dev
->rx_napi
);
332 static int prealloc(struct list_head
*list
, struct usb_ep
*ep
, unsigned n
)
335 struct usb_request
*req
;
340 /* queue/recycle up to N requests */
342 list_for_each_entry(req
, list
, list
) {
347 req
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
349 return list_empty(list
) ? -ENOMEM
: 0;
350 list_add(&req
->list
, list
);
357 struct list_head
*next
;
359 next
= req
->list
.next
;
360 list_del(&req
->list
);
361 usb_ep_free_request(ep
, req
);
366 req
= container_of(next
, struct usb_request
, list
);
371 static int alloc_requests(struct eth_dev
*dev
, struct gether
*link
, unsigned n
)
375 spin_lock(&dev
->req_lock
);
376 status
= prealloc(&dev
->tx_reqs
, link
->in_ep
, n
);
379 status
= prealloc(&dev
->rx_reqs
, link
->out_ep
, n
);
384 DBG(dev
, "can't alloc requests\n");
386 spin_unlock(&dev
->req_lock
);
390 static void rx_fill(struct eth_dev
*dev
, gfp_t gfp_flags
)
392 struct usb_request
*req
;
396 /* fill unused rxq slots with some skb */
397 spin_lock_irqsave(&dev
->req_lock
, flags
);
398 while (!list_empty(&dev
->rx_reqs
)) {
400 if (++rx_counts
> qlen(dev
->gadget
, dev
->qmult
))
403 req
= container_of(dev
->rx_reqs
.next
,
404 struct usb_request
, list
);
405 list_del_init(&req
->list
);
406 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
408 if (rx_submit(dev
, req
, gfp_flags
) < 0) {
409 spin_lock_irqsave(&dev
->req_lock
, flags
);
410 list_add(&req
->list
, &dev
->rx_reqs
);
411 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
412 defer_kevent(dev
, WORK_RX_MEMORY
);
416 spin_lock_irqsave(&dev
->req_lock
, flags
);
418 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
421 static int gether_poll(struct napi_struct
*napi
, int budget
)
423 struct eth_dev
*dev
= container_of(napi
, struct eth_dev
, rx_napi
);
425 unsigned int work_done
= 0;
428 while ((skb
= skb_dequeue(&dev
->rx_frames
))) {
430 || ETH_HLEN
> skb
->len
431 || skb
->len
> VLAN_ETH_FRAME_LEN
) {
432 dev
->net
->stats
.rx_errors
++;
433 dev
->net
->stats
.rx_length_errors
++;
434 DBG(dev
, "rx length %d\n", skb
->len
);
435 dev_kfree_skb_any(skb
);
438 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
439 dev
->net
->stats
.rx_packets
++;
440 dev
->net
->stats
.rx_bytes
+= skb
->len
;
442 status
= netif_rx_ni(skb
);
445 if (netif_running(dev
->net
)) {
446 rx_fill(dev
, GFP_KERNEL
);
450 if (work_done
< budget
)
451 napi_complete(&dev
->rx_napi
);
456 static void eth_work(struct work_struct
*work
)
458 struct eth_dev
*dev
= container_of(work
, struct eth_dev
, work
);
460 if (test_and_clear_bit(WORK_RX_MEMORY
, &dev
->todo
)) {
461 if (netif_running(dev
->net
))
462 rx_fill(dev
, GFP_KERNEL
);
466 DBG(dev
, "work done, flags = 0x%lx\n", dev
->todo
);
469 static void tx_complete(struct usb_ep
*ep
, struct usb_request
*req
)
471 struct sk_buff
*skb
= req
->context
;
472 struct eth_dev
*dev
= ep
->driver_data
;
474 switch (req
->status
) {
476 dev
->net
->stats
.tx_errors
++;
477 VDBG(dev
, "tx err %d\n", req
->status
);
479 case -ECONNRESET
: /* unlink */
480 case -ESHUTDOWN
: /* disconnect etc */
483 dev
->net
->stats
.tx_bytes
+= skb
->len
;
485 dev
->net
->stats
.tx_packets
++;
487 spin_lock(&dev
->req_lock
);
488 list_add(&req
->list
, &dev
->tx_reqs
);
489 spin_unlock(&dev
->req_lock
);
490 dev_kfree_skb_any(skb
);
492 atomic_dec(&dev
->tx_qlen
);
493 if (netif_carrier_ok(dev
->net
))
494 netif_wake_queue(dev
->net
);
497 static inline int is_promisc(u16 cdc_filter
)
499 return cdc_filter
& USB_CDC_PACKET_TYPE_PROMISCUOUS
;
502 static netdev_tx_t
eth_start_xmit(struct sk_buff
*skb
,
503 struct net_device
*net
)
505 struct eth_dev
*dev
= netdev_priv(net
);
506 int length
= skb
->len
;
508 struct usb_request
*req
= NULL
;
513 spin_lock_irqsave(&dev
->lock
, flags
);
515 in
= dev
->port_usb
->in_ep
;
516 cdc_filter
= dev
->port_usb
->cdc_filter
;
521 spin_unlock_irqrestore(&dev
->lock
, flags
);
524 dev_kfree_skb_any(skb
);
528 /* apply outgoing CDC or RNDIS filters */
529 if (!is_promisc(cdc_filter
)) {
530 u8
*dest
= skb
->data
;
532 if (is_multicast_ether_addr(dest
)) {
535 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
536 * SET_ETHERNET_MULTICAST_FILTERS requests
538 if (is_broadcast_ether_addr(dest
))
539 type
= USB_CDC_PACKET_TYPE_BROADCAST
;
541 type
= USB_CDC_PACKET_TYPE_ALL_MULTICAST
;
542 if (!(cdc_filter
& type
)) {
543 dev_kfree_skb_any(skb
);
547 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
550 spin_lock_irqsave(&dev
->req_lock
, flags
);
552 * this freelist can be empty if an interrupt triggered disconnect()
553 * and reconfigured the gadget (shutting down this queue) after the
554 * network stack decided to xmit but before we got the spinlock.
556 if (list_empty(&dev
->tx_reqs
)) {
557 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
558 return NETDEV_TX_BUSY
;
561 req
= container_of(dev
->tx_reqs
.next
, struct usb_request
, list
);
562 list_del(&req
->list
);
564 /* temporarily stop TX queue when the freelist empties */
565 if (list_empty(&dev
->tx_reqs
))
566 netif_stop_queue(net
);
567 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
569 /* no buffer copies needed, unless the network stack did it
570 * or the hardware can't use skb buffers.
571 * or there's not enough space for extra headers we need
576 spin_lock_irqsave(&dev
->lock
, flags
);
578 skb
= dev
->wrap(dev
->port_usb
, skb
);
579 spin_unlock_irqrestore(&dev
->lock
, flags
);
585 req
->buf
= skb
->data
;
587 req
->complete
= tx_complete
;
589 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
590 if (dev
->port_usb
->is_fixed
&&
591 length
== dev
->port_usb
->fixed_in_len
&&
592 (length
% in
->maxpacket
) == 0)
597 /* use zlp framing on tx for strict CDC-Ether conformance,
598 * though any robust network rx path ignores extra padding.
599 * and some hardware doesn't like to write zlps.
601 if (req
->zero
&& !dev
->zlp
&& (length
% in
->maxpacket
) == 0)
604 req
->length
= length
;
606 /* throttle high/super speed IRQ rate back slightly */
607 if (gadget_is_dualspeed(dev
->gadget
))
608 req
->no_interrupt
= (dev
->gadget
->speed
== USB_SPEED_HIGH
||
609 dev
->gadget
->speed
== USB_SPEED_SUPER
)
610 ? ((atomic_read(&dev
->tx_qlen
) % dev
->qmult
) != 0)
613 retval
= usb_ep_queue(in
, req
, GFP_ATOMIC
);
616 DBG(dev
, "tx queue err %d\n", retval
);
619 net
->trans_start
= jiffies
;
620 atomic_inc(&dev
->tx_qlen
);
624 dev_kfree_skb_any(skb
);
626 dev
->net
->stats
.tx_dropped
++;
627 spin_lock_irqsave(&dev
->req_lock
, flags
);
628 if (list_empty(&dev
->tx_reqs
))
629 netif_start_queue(net
);
630 list_add(&req
->list
, &dev
->tx_reqs
);
631 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
636 /*-------------------------------------------------------------------------*/
638 static void eth_start(struct eth_dev
*dev
, gfp_t gfp_flags
)
640 DBG(dev
, "%s\n", __func__
);
642 /* fill the rx queue */
643 rx_fill(dev
, gfp_flags
);
645 /* and open the tx floodgates */
646 atomic_set(&dev
->tx_qlen
, 0);
647 netif_wake_queue(dev
->net
);
648 napi_enable(&dev
->rx_napi
);
651 static int eth_open(struct net_device
*net
)
653 struct eth_dev
*dev
= netdev_priv(net
);
656 DBG(dev
, "%s\n", __func__
);
657 if (netif_carrier_ok(dev
->net
))
658 eth_start(dev
, GFP_KERNEL
);
660 spin_lock_irq(&dev
->lock
);
661 link
= dev
->port_usb
;
662 if (link
&& link
->open
)
664 spin_unlock_irq(&dev
->lock
);
669 static int eth_stop(struct net_device
*net
)
671 struct eth_dev
*dev
= netdev_priv(net
);
674 VDBG(dev
, "%s\n", __func__
);
675 napi_disable(&dev
->rx_napi
);
676 netif_stop_queue(net
);
678 DBG(dev
, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
679 dev
->net
->stats
.rx_packets
, dev
->net
->stats
.tx_packets
,
680 dev
->net
->stats
.rx_errors
, dev
->net
->stats
.tx_errors
683 /* ensure there are no more active requests */
684 spin_lock_irqsave(&dev
->lock
, flags
);
686 struct gether
*link
= dev
->port_usb
;
687 const struct usb_endpoint_descriptor
*in
;
688 const struct usb_endpoint_descriptor
*out
;
693 /* NOTE: we have no abort-queue primitive we could use
694 * to cancel all pending I/O. Instead, we disable then
695 * reenable the endpoints ... this idiom may leave toggle
696 * wrong, but that's a self-correcting error.
698 * REVISIT: we *COULD* just let the transfers complete at
699 * their own pace; the network stack can handle old packets.
700 * For the moment we leave this here, since it works.
702 in
= link
->in_ep
->desc
;
703 out
= link
->out_ep
->desc
;
704 usb_ep_disable(link
->in_ep
);
705 usb_ep_disable(link
->out_ep
);
706 if (netif_carrier_ok(net
)) {
707 DBG(dev
, "host still using in/out endpoints\n");
708 link
->in_ep
->desc
= in
;
709 link
->out_ep
->desc
= out
;
710 usb_ep_enable(link
->in_ep
);
711 usb_ep_enable(link
->out_ep
);
714 spin_unlock_irqrestore(&dev
->lock
, flags
);
719 /*-------------------------------------------------------------------------*/
721 static int get_ether_addr(const char *str
, u8
*dev_addr
)
726 for (i
= 0; i
< 6; i
++) {
729 if ((*str
== '.') || (*str
== ':'))
731 num
= hex_to_bin(*str
++) << 4;
732 num
|= hex_to_bin(*str
++);
735 if (is_valid_ether_addr(dev_addr
))
738 eth_random_addr(dev_addr
);
742 static int get_ether_addr_str(u8 dev_addr
[ETH_ALEN
], char *str
, int len
)
747 snprintf(str
, len
, "%02x:%02x:%02x:%02x:%02x:%02x",
748 dev_addr
[0], dev_addr
[1], dev_addr
[2],
749 dev_addr
[3], dev_addr
[4], dev_addr
[5]);
753 static const struct net_device_ops eth_netdev_ops
= {
754 .ndo_open
= eth_open
,
755 .ndo_stop
= eth_stop
,
756 .ndo_start_xmit
= eth_start_xmit
,
757 .ndo_change_mtu
= ueth_change_mtu
,
758 .ndo_set_mac_address
= eth_mac_addr
,
759 .ndo_validate_addr
= eth_validate_addr
,
762 static struct device_type gadget_type
= {
767 * gether_setup_name - initialize one ethernet-over-usb link
768 * @g: gadget to associated with these links
769 * @ethaddr: NULL, or a buffer in which the ethernet address of the
770 * host side of the link is recorded
771 * @netname: name for network device (for example, "usb")
774 * This sets up the single network link that may be exported by a
775 * gadget driver using this framework. The link layer addresses are
776 * set up using module parameters.
778 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
780 struct eth_dev
*gether_setup_name(struct usb_gadget
*g
,
781 const char *dev_addr
, const char *host_addr
,
782 u8 ethaddr
[ETH_ALEN
], unsigned qmult
, const char *netname
)
785 struct net_device
*net
;
788 net
= alloc_etherdev(sizeof *dev
);
790 return ERR_PTR(-ENOMEM
);
792 dev
= netdev_priv(net
);
793 netif_napi_add(net
, &dev
->rx_napi
, gether_poll
, GETHER_NAPI_WEIGHT
);
794 spin_lock_init(&dev
->lock
);
795 spin_lock_init(&dev
->req_lock
);
796 INIT_WORK(&dev
->work
, eth_work
);
797 INIT_LIST_HEAD(&dev
->tx_reqs
);
798 INIT_LIST_HEAD(&dev
->rx_reqs
);
800 skb_queue_head_init(&dev
->rx_frames
);
802 /* network device setup */
805 snprintf(net
->name
, sizeof(net
->name
), "%s%%d", netname
);
807 if (get_ether_addr(dev_addr
, net
->dev_addr
))
809 "using random %s ethernet address\n", "self");
810 if (get_ether_addr(host_addr
, dev
->host_mac
))
812 "using random %s ethernet address\n", "host");
815 memcpy(ethaddr
, dev
->host_mac
, ETH_ALEN
);
817 net
->netdev_ops
= ð_netdev_ops
;
819 SET_ETHTOOL_OPS(net
, &ops
);
822 SET_NETDEV_DEV(net
, &g
->dev
);
823 SET_NETDEV_DEVTYPE(net
, &gadget_type
);
825 status
= register_netdev(net
);
827 dev_dbg(&g
->dev
, "register_netdev failed, %d\n", status
);
829 dev
= ERR_PTR(status
);
831 INFO(dev
, "MAC %pM\n", net
->dev_addr
);
832 INFO(dev
, "HOST MAC %pM\n", dev
->host_mac
);
835 * two kinds of host-initiated state changes:
836 * - iff DATA transfer is active, carrier is "on"
837 * - tx queueing enabled if open *and* carrier is "on"
839 netif_carrier_off(net
);
844 EXPORT_SYMBOL(gether_setup_name
);
846 struct net_device
*gether_setup_name_default(const char *netname
)
848 struct net_device
*net
;
851 net
= alloc_etherdev(sizeof(*dev
));
853 return ERR_PTR(-ENOMEM
);
855 dev
= netdev_priv(net
);
856 netif_napi_add(net
, &dev
->rx_napi
, gether_poll
, GETHER_NAPI_WEIGHT
);
857 spin_lock_init(&dev
->lock
);
858 spin_lock_init(&dev
->req_lock
);
859 INIT_WORK(&dev
->work
, eth_work
);
860 INIT_LIST_HEAD(&dev
->tx_reqs
);
861 INIT_LIST_HEAD(&dev
->rx_reqs
);
863 skb_queue_head_init(&dev
->rx_frames
);
865 /* network device setup */
867 dev
->qmult
= QMULT_DEFAULT
;
868 snprintf(net
->name
, sizeof(net
->name
), "%s%%d", netname
);
870 eth_random_addr(dev
->dev_mac
);
871 pr_warn("using random %s ethernet address\n", "self");
872 eth_random_addr(dev
->host_mac
);
873 pr_warn("using random %s ethernet address\n", "host");
875 net
->netdev_ops
= ð_netdev_ops
;
877 SET_ETHTOOL_OPS(net
, &ops
);
878 SET_NETDEV_DEVTYPE(net
, &gadget_type
);
882 EXPORT_SYMBOL(gether_setup_name_default
);
884 int gether_register_netdev(struct net_device
*net
)
887 struct usb_gadget
*g
;
891 if (!net
->dev
.parent
)
893 dev
= netdev_priv(net
);
895 status
= register_netdev(net
);
897 dev_dbg(&g
->dev
, "register_netdev failed, %d\n", status
);
900 INFO(dev
, "HOST MAC %pM\n", dev
->host_mac
);
902 /* two kinds of host-initiated state changes:
903 * - iff DATA transfer is active, carrier is "on"
904 * - tx queueing enabled if open *and* carrier is "on"
906 netif_carrier_off(net
);
908 sa
.sa_family
= net
->type
;
909 memcpy(sa
.sa_data
, dev
->dev_mac
, ETH_ALEN
);
911 status
= dev_set_mac_address(net
, &sa
);
914 pr_warn("cannot set self ethernet address: %d\n", status
);
916 INFO(dev
, "MAC %pM\n", dev
->dev_mac
);
920 EXPORT_SYMBOL(gether_register_netdev
);
922 void gether_set_gadget(struct net_device
*net
, struct usb_gadget
*g
)
926 dev
= netdev_priv(net
);
928 SET_NETDEV_DEV(net
, &g
->dev
);
930 EXPORT_SYMBOL(gether_set_gadget
);
932 int gether_set_dev_addr(struct net_device
*net
, const char *dev_addr
)
935 u8 new_addr
[ETH_ALEN
];
937 dev
= netdev_priv(net
);
938 if (get_ether_addr(dev_addr
, new_addr
))
940 memcpy(dev
->dev_mac
, new_addr
, ETH_ALEN
);
943 EXPORT_SYMBOL(gether_set_dev_addr
);
945 int gether_get_dev_addr(struct net_device
*net
, char *dev_addr
, int len
)
949 dev
= netdev_priv(net
);
950 return get_ether_addr_str(dev
->dev_mac
, dev_addr
, len
);
952 EXPORT_SYMBOL(gether_get_dev_addr
);
954 int gether_set_host_addr(struct net_device
*net
, const char *host_addr
)
957 u8 new_addr
[ETH_ALEN
];
959 dev
= netdev_priv(net
);
960 if (get_ether_addr(host_addr
, new_addr
))
962 memcpy(dev
->host_mac
, new_addr
, ETH_ALEN
);
965 EXPORT_SYMBOL(gether_set_host_addr
);
967 int gether_get_host_addr(struct net_device
*net
, char *host_addr
, int len
)
971 dev
= netdev_priv(net
);
972 return get_ether_addr_str(dev
->host_mac
, host_addr
, len
);
974 EXPORT_SYMBOL(gether_get_host_addr
);
976 int gether_get_host_addr_cdc(struct net_device
*net
, char *host_addr
, int len
)
983 dev
= netdev_priv(net
);
984 snprintf(host_addr
, len
, "%pm", dev
->host_mac
);
986 return strlen(host_addr
);
988 EXPORT_SYMBOL(gether_get_host_addr_cdc
);
990 void gether_get_host_addr_u8(struct net_device
*net
, u8 host_mac
[ETH_ALEN
])
994 dev
= netdev_priv(net
);
995 memcpy(host_mac
, dev
->host_mac
, ETH_ALEN
);
997 EXPORT_SYMBOL(gether_get_host_addr_u8
);
999 void gether_set_qmult(struct net_device
*net
, unsigned qmult
)
1001 struct eth_dev
*dev
;
1003 dev
= netdev_priv(net
);
1006 EXPORT_SYMBOL(gether_set_qmult
);
1008 unsigned gether_get_qmult(struct net_device
*net
)
1010 struct eth_dev
*dev
;
1012 dev
= netdev_priv(net
);
1015 EXPORT_SYMBOL(gether_get_qmult
);
1017 int gether_get_ifname(struct net_device
*net
, char *name
, int len
)
1020 strlcpy(name
, netdev_name(net
), len
);
1022 return strlen(name
);
1024 EXPORT_SYMBOL(gether_get_ifname
);
1027 * gether_cleanup - remove Ethernet-over-USB device
1028 * Context: may sleep
1030 * This is called to free all resources allocated by @gether_setup().
1032 void gether_cleanup(struct eth_dev
*dev
)
1037 unregister_netdev(dev
->net
);
1038 flush_work(&dev
->work
);
1039 free_netdev(dev
->net
);
1041 EXPORT_SYMBOL(gether_cleanup
);
1044 * gether_connect - notify network layer that USB link is active
1045 * @link: the USB link, set up with endpoints, descriptors matching
1046 * current device speed, and any framing wrapper(s) set up.
1047 * Context: irqs blocked
1049 * This is called to activate endpoints and let the network layer know
1050 * the connection is active ("carrier detect"). It may cause the I/O
1051 * queues to open and start letting network packets flow, but will in
1052 * any case activate the endpoints so that they respond properly to the
1055 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1056 * indicate some error code (negative errno), ep->driver_data values
1057 * have been overwritten.
1059 struct net_device
*gether_connect(struct gether
*link
)
1061 struct eth_dev
*dev
= link
->ioport
;
1065 return ERR_PTR(-EINVAL
);
1067 link
->in_ep
->driver_data
= dev
;
1068 result
= usb_ep_enable(link
->in_ep
);
1070 DBG(dev
, "enable %s --> %d\n",
1071 link
->in_ep
->name
, result
);
1075 link
->out_ep
->driver_data
= dev
;
1076 result
= usb_ep_enable(link
->out_ep
);
1078 DBG(dev
, "enable %s --> %d\n",
1079 link
->out_ep
->name
, result
);
1084 result
= alloc_requests(dev
, link
, qlen(dev
->gadget
,
1088 dev
->zlp
= link
->is_zlp_ok
;
1089 DBG(dev
, "qlen %d\n", qlen(dev
->gadget
, dev
->qmult
));
1091 dev
->header_len
= link
->header_len
;
1092 dev
->unwrap
= link
->unwrap
;
1093 dev
->wrap
= link
->wrap
;
1095 spin_lock(&dev
->lock
);
1096 dev
->port_usb
= link
;
1097 if (netif_running(dev
->net
)) {
1104 spin_unlock(&dev
->lock
);
1106 netif_carrier_on(dev
->net
);
1107 if (netif_running(dev
->net
))
1108 eth_start(dev
, GFP_ATOMIC
);
1110 /* on error, disable any endpoints */
1112 (void) usb_ep_disable(link
->out_ep
);
1114 (void) usb_ep_disable(link
->in_ep
);
1117 /* caller is responsible for cleanup on error */
1119 return ERR_PTR(result
);
1122 EXPORT_SYMBOL(gether_connect
);
1125 * gether_disconnect - notify network layer that USB link is inactive
1126 * @link: the USB link, on which gether_connect() was called
1127 * Context: irqs blocked
1129 * This is called to deactivate endpoints and let the network layer know
1130 * the connection went inactive ("no carrier").
1132 * On return, the state is as if gether_connect() had never been called.
1133 * The endpoints are inactive, and accordingly without active USB I/O.
1134 * Pointers to endpoint descriptors and endpoint private data are nulled.
1136 void gether_disconnect(struct gether
*link
)
1138 struct eth_dev
*dev
= link
->ioport
;
1139 struct usb_request
*req
;
1140 struct sk_buff
*skb
;
1146 DBG(dev
, "%s\n", __func__
);
1148 netif_stop_queue(dev
->net
);
1149 netif_carrier_off(dev
->net
);
1151 /* disable endpoints, forcing (synchronous) completion
1152 * of all pending i/o. then free the request objects
1153 * and forget about the endpoints.
1155 usb_ep_disable(link
->in_ep
);
1156 spin_lock(&dev
->req_lock
);
1157 while (!list_empty(&dev
->tx_reqs
)) {
1158 req
= container_of(dev
->tx_reqs
.next
,
1159 struct usb_request
, list
);
1160 list_del(&req
->list
);
1162 spin_unlock(&dev
->req_lock
);
1163 usb_ep_free_request(link
->in_ep
, req
);
1164 spin_lock(&dev
->req_lock
);
1166 spin_unlock(&dev
->req_lock
);
1168 spin_lock(&dev
->rx_frames
.lock
);
1169 while ((skb
= __skb_dequeue(&dev
->rx_frames
)))
1170 dev_kfree_skb_any(skb
);
1171 spin_unlock(&dev
->rx_frames
.lock
);
1173 link
->in_ep
->driver_data
= NULL
;
1174 link
->in_ep
->desc
= NULL
;
1176 usb_ep_disable(link
->out_ep
);
1177 spin_lock(&dev
->req_lock
);
1178 while (!list_empty(&dev
->rx_reqs
)) {
1179 req
= container_of(dev
->rx_reqs
.next
,
1180 struct usb_request
, list
);
1181 list_del(&req
->list
);
1183 spin_unlock(&dev
->req_lock
);
1184 usb_ep_free_request(link
->out_ep
, req
);
1185 spin_lock(&dev
->req_lock
);
1187 spin_unlock(&dev
->req_lock
);
1188 link
->out_ep
->driver_data
= NULL
;
1189 link
->out_ep
->desc
= NULL
;
1191 /* finish forgetting about this USB link episode */
1192 dev
->header_len
= 0;
1196 spin_lock(&dev
->lock
);
1197 dev
->port_usb
= NULL
;
1198 spin_unlock(&dev
->lock
);
1200 EXPORT_SYMBOL(gether_disconnect
);
1202 MODULE_LICENSE("GPL");
1203 MODULE_AUTHOR("David Brownell");