]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/usb/gadget/u_ether.c
Merge tag 'spi-v3.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / gadget / u_ether.c
1 /*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 /* #define VERBOSE_DEBUG */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24
25 #include "u_ether.h"
26
27
28 /*
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
31 *
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
35 * management.
36 *
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
42 *
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
47 */
48
49 #define UETH__VERSION "29-May-2008"
50
51 #define GETHER_NAPI_WEIGHT 32
52
53 struct eth_dev {
54 /* lock is held while accessing port_usb
55 */
56 spinlock_t lock;
57 struct gether *port_usb;
58
59 struct net_device *net;
60 struct usb_gadget *gadget;
61
62 spinlock_t req_lock; /* guard {rx,tx}_reqs */
63 struct list_head tx_reqs, rx_reqs;
64 atomic_t tx_qlen;
65
66 struct sk_buff_head rx_frames;
67
68 unsigned qmult;
69
70 unsigned header_len;
71 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
72 int (*unwrap)(struct gether *,
73 struct sk_buff *skb,
74 struct sk_buff_head *list);
75
76 struct work_struct work;
77 struct napi_struct rx_napi;
78
79 unsigned long todo;
80 #define WORK_RX_MEMORY 0
81
82 bool zlp;
83 u8 host_mac[ETH_ALEN];
84 u8 dev_mac[ETH_ALEN];
85 };
86
87 /*-------------------------------------------------------------------------*/
88
89 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
90
91 #define DEFAULT_QLEN 2 /* double buffering by default */
92
93 /* for dual-speed hardware, use deeper queues at high/super speed */
94 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
95 {
96 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
97 gadget->speed == USB_SPEED_SUPER))
98 return qmult * DEFAULT_QLEN;
99 else
100 return DEFAULT_QLEN;
101 }
102
103 /*-------------------------------------------------------------------------*/
104
105 /* REVISIT there must be a better way than having two sets
106 * of debug calls ...
107 */
108
109 #undef DBG
110 #undef VDBG
111 #undef ERROR
112 #undef INFO
113
114 #define xprintk(d, level, fmt, args...) \
115 printk(level "%s: " fmt , (d)->net->name , ## args)
116
117 #ifdef DEBUG
118 #undef DEBUG
119 #define DBG(dev, fmt, args...) \
120 xprintk(dev , KERN_DEBUG , fmt , ## args)
121 #else
122 #define DBG(dev, fmt, args...) \
123 do { } while (0)
124 #endif /* DEBUG */
125
126 #ifdef VERBOSE_DEBUG
127 #define VDBG DBG
128 #else
129 #define VDBG(dev, fmt, args...) \
130 do { } while (0)
131 #endif /* DEBUG */
132
133 #define ERROR(dev, fmt, args...) \
134 xprintk(dev , KERN_ERR , fmt , ## args)
135 #define INFO(dev, fmt, args...) \
136 xprintk(dev , KERN_INFO , fmt , ## args)
137
138 /*-------------------------------------------------------------------------*/
139
140 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
141
142 static int ueth_change_mtu(struct net_device *net, int new_mtu)
143 {
144 struct eth_dev *dev = netdev_priv(net);
145 unsigned long flags;
146 int status = 0;
147
148 /* don't change MTU on "live" link (peer won't know) */
149 spin_lock_irqsave(&dev->lock, flags);
150 if (dev->port_usb)
151 status = -EBUSY;
152 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
153 status = -ERANGE;
154 else
155 net->mtu = new_mtu;
156 spin_unlock_irqrestore(&dev->lock, flags);
157
158 return status;
159 }
160
161 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
162 {
163 struct eth_dev *dev = netdev_priv(net);
164
165 strlcpy(p->driver, "g_ether", sizeof(p->driver));
166 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
167 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
168 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
169 }
170
171 /* REVISIT can also support:
172 * - WOL (by tracking suspends and issuing remote wakeup)
173 * - msglevel (implies updated messaging)
174 * - ... probably more ethtool ops
175 */
176
177 static const struct ethtool_ops ops = {
178 .get_drvinfo = eth_get_drvinfo,
179 .get_link = ethtool_op_get_link,
180 };
181
182 static void defer_kevent(struct eth_dev *dev, int flag)
183 {
184 if (test_and_set_bit(flag, &dev->todo))
185 return;
186 if (!schedule_work(&dev->work))
187 ERROR(dev, "kevent %d may have been dropped\n", flag);
188 else
189 DBG(dev, "kevent %d scheduled\n", flag);
190 }
191
192 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
193
194 static int
195 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
196 {
197 struct sk_buff *skb;
198 int retval = -ENOMEM;
199 size_t size = 0;
200 struct usb_ep *out;
201 unsigned long flags;
202
203 spin_lock_irqsave(&dev->lock, flags);
204 if (dev->port_usb)
205 out = dev->port_usb->out_ep;
206 else
207 out = NULL;
208 spin_unlock_irqrestore(&dev->lock, flags);
209
210 if (!out)
211 return -ENOTCONN;
212
213
214 /* Padding up to RX_EXTRA handles minor disagreements with host.
215 * Normally we use the USB "terminate on short read" convention;
216 * so allow up to (N*maxpacket), since that memory is normally
217 * already allocated. Some hardware doesn't deal well with short
218 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
219 * byte off the end (to force hardware errors on overflow).
220 *
221 * RNDIS uses internal framing, and explicitly allows senders to
222 * pad to end-of-packet. That's potentially nice for speed, but
223 * means receivers can't recover lost synch on their own (because
224 * new packets don't only start after a short RX).
225 */
226 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
227 size += dev->port_usb->header_len;
228 size += out->maxpacket - 1;
229 size -= size % out->maxpacket;
230
231 if (dev->port_usb->is_fixed)
232 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
233
234 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
235 if (skb == NULL) {
236 DBG(dev, "no rx skb\n");
237 goto enomem;
238 }
239
240 /* Some platforms perform better when IP packets are aligned,
241 * but on at least one, checksumming fails otherwise. Note:
242 * RNDIS headers involve variable numbers of LE32 values.
243 */
244 skb_reserve(skb, NET_IP_ALIGN);
245
246 req->buf = skb->data;
247 req->length = size;
248 req->complete = rx_complete;
249 req->context = skb;
250
251 retval = usb_ep_queue(out, req, gfp_flags);
252 if (retval == -ENOMEM)
253 enomem:
254 defer_kevent(dev, WORK_RX_MEMORY);
255 if (retval) {
256 DBG(dev, "rx submit --> %d\n", retval);
257 if (skb)
258 dev_kfree_skb_any(skb);
259 }
260 return retval;
261 }
262
263 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
264 {
265 struct sk_buff *skb = req->context;
266 struct eth_dev *dev = ep->driver_data;
267 int status = req->status;
268 bool rx_queue = 0;
269
270 switch (status) {
271
272 /* normal completion */
273 case 0:
274 skb_put(skb, req->actual);
275
276 if (dev->unwrap) {
277 unsigned long flags;
278
279 spin_lock_irqsave(&dev->lock, flags);
280 if (dev->port_usb) {
281 status = dev->unwrap(dev->port_usb,
282 skb,
283 &dev->rx_frames);
284 } else {
285 dev_kfree_skb_any(skb);
286 status = -ENOTCONN;
287 }
288 spin_unlock_irqrestore(&dev->lock, flags);
289 } else {
290 skb_queue_tail(&dev->rx_frames, skb);
291 }
292 if (!status)
293 rx_queue = 1;
294 break;
295
296 /* software-driven interface shutdown */
297 case -ECONNRESET: /* unlink */
298 case -ESHUTDOWN: /* disconnect etc */
299 VDBG(dev, "rx shutdown, code %d\n", status);
300 goto quiesce;
301
302 /* for hardware automagic (such as pxa) */
303 case -ECONNABORTED: /* endpoint reset */
304 DBG(dev, "rx %s reset\n", ep->name);
305 defer_kevent(dev, WORK_RX_MEMORY);
306 quiesce:
307 dev_kfree_skb_any(skb);
308 goto clean;
309
310 /* data overrun */
311 case -EOVERFLOW:
312 dev->net->stats.rx_over_errors++;
313 /* FALLTHROUGH */
314
315 default:
316 rx_queue = 1;
317 dev_kfree_skb_any(skb);
318 dev->net->stats.rx_errors++;
319 DBG(dev, "rx status %d\n", status);
320 break;
321 }
322
323 clean:
324 spin_lock(&dev->req_lock);
325 list_add(&req->list, &dev->rx_reqs);
326 spin_unlock(&dev->req_lock);
327
328 if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
329 __napi_schedule(&dev->rx_napi);
330 }
331
332 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
333 {
334 unsigned i;
335 struct usb_request *req;
336
337 if (!n)
338 return -ENOMEM;
339
340 /* queue/recycle up to N requests */
341 i = n;
342 list_for_each_entry(req, list, list) {
343 if (i-- == 0)
344 goto extra;
345 }
346 while (i--) {
347 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
348 if (!req)
349 return list_empty(list) ? -ENOMEM : 0;
350 list_add(&req->list, list);
351 }
352 return 0;
353
354 extra:
355 /* free extras */
356 for (;;) {
357 struct list_head *next;
358
359 next = req->list.next;
360 list_del(&req->list);
361 usb_ep_free_request(ep, req);
362
363 if (next == list)
364 break;
365
366 req = container_of(next, struct usb_request, list);
367 }
368 return 0;
369 }
370
371 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
372 {
373 int status;
374
375 spin_lock(&dev->req_lock);
376 status = prealloc(&dev->tx_reqs, link->in_ep, n);
377 if (status < 0)
378 goto fail;
379 status = prealloc(&dev->rx_reqs, link->out_ep, n);
380 if (status < 0)
381 goto fail;
382 goto done;
383 fail:
384 DBG(dev, "can't alloc requests\n");
385 done:
386 spin_unlock(&dev->req_lock);
387 return status;
388 }
389
390 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
391 {
392 struct usb_request *req;
393 unsigned long flags;
394 int rx_counts = 0;
395
396 /* fill unused rxq slots with some skb */
397 spin_lock_irqsave(&dev->req_lock, flags);
398 while (!list_empty(&dev->rx_reqs)) {
399
400 if (++rx_counts > qlen(dev->gadget, dev->qmult))
401 break;
402
403 req = container_of(dev->rx_reqs.next,
404 struct usb_request, list);
405 list_del_init(&req->list);
406 spin_unlock_irqrestore(&dev->req_lock, flags);
407
408 if (rx_submit(dev, req, gfp_flags) < 0) {
409 spin_lock_irqsave(&dev->req_lock, flags);
410 list_add(&req->list, &dev->rx_reqs);
411 spin_unlock_irqrestore(&dev->req_lock, flags);
412 defer_kevent(dev, WORK_RX_MEMORY);
413 return;
414 }
415
416 spin_lock_irqsave(&dev->req_lock, flags);
417 }
418 spin_unlock_irqrestore(&dev->req_lock, flags);
419 }
420
421 static int gether_poll(struct napi_struct *napi, int budget)
422 {
423 struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
424 struct sk_buff *skb;
425 unsigned int work_done = 0;
426 int status = 0;
427
428 while ((skb = skb_dequeue(&dev->rx_frames))) {
429 if (status < 0
430 || ETH_HLEN > skb->len
431 || skb->len > VLAN_ETH_FRAME_LEN) {
432 dev->net->stats.rx_errors++;
433 dev->net->stats.rx_length_errors++;
434 DBG(dev, "rx length %d\n", skb->len);
435 dev_kfree_skb_any(skb);
436 continue;
437 }
438 skb->protocol = eth_type_trans(skb, dev->net);
439 dev->net->stats.rx_packets++;
440 dev->net->stats.rx_bytes += skb->len;
441
442 status = netif_rx_ni(skb);
443 }
444
445 if (netif_running(dev->net)) {
446 rx_fill(dev, GFP_KERNEL);
447 work_done++;
448 }
449
450 if (work_done < budget)
451 napi_complete(&dev->rx_napi);
452
453 return work_done;
454 }
455
456 static void eth_work(struct work_struct *work)
457 {
458 struct eth_dev *dev = container_of(work, struct eth_dev, work);
459
460 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
461 if (netif_running(dev->net))
462 rx_fill(dev, GFP_KERNEL);
463 }
464
465 if (dev->todo)
466 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
467 }
468
469 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
470 {
471 struct sk_buff *skb = req->context;
472 struct eth_dev *dev = ep->driver_data;
473
474 switch (req->status) {
475 default:
476 dev->net->stats.tx_errors++;
477 VDBG(dev, "tx err %d\n", req->status);
478 /* FALLTHROUGH */
479 case -ECONNRESET: /* unlink */
480 case -ESHUTDOWN: /* disconnect etc */
481 break;
482 case 0:
483 dev->net->stats.tx_bytes += skb->len;
484 }
485 dev->net->stats.tx_packets++;
486
487 spin_lock(&dev->req_lock);
488 list_add(&req->list, &dev->tx_reqs);
489 spin_unlock(&dev->req_lock);
490 dev_kfree_skb_any(skb);
491
492 atomic_dec(&dev->tx_qlen);
493 if (netif_carrier_ok(dev->net))
494 netif_wake_queue(dev->net);
495 }
496
497 static inline int is_promisc(u16 cdc_filter)
498 {
499 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
500 }
501
502 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
503 struct net_device *net)
504 {
505 struct eth_dev *dev = netdev_priv(net);
506 int length = skb->len;
507 int retval;
508 struct usb_request *req = NULL;
509 unsigned long flags;
510 struct usb_ep *in;
511 u16 cdc_filter;
512
513 spin_lock_irqsave(&dev->lock, flags);
514 if (dev->port_usb) {
515 in = dev->port_usb->in_ep;
516 cdc_filter = dev->port_usb->cdc_filter;
517 } else {
518 in = NULL;
519 cdc_filter = 0;
520 }
521 spin_unlock_irqrestore(&dev->lock, flags);
522
523 if (!in) {
524 dev_kfree_skb_any(skb);
525 return NETDEV_TX_OK;
526 }
527
528 /* apply outgoing CDC or RNDIS filters */
529 if (!is_promisc(cdc_filter)) {
530 u8 *dest = skb->data;
531
532 if (is_multicast_ether_addr(dest)) {
533 u16 type;
534
535 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
536 * SET_ETHERNET_MULTICAST_FILTERS requests
537 */
538 if (is_broadcast_ether_addr(dest))
539 type = USB_CDC_PACKET_TYPE_BROADCAST;
540 else
541 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
542 if (!(cdc_filter & type)) {
543 dev_kfree_skb_any(skb);
544 return NETDEV_TX_OK;
545 }
546 }
547 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
548 }
549
550 spin_lock_irqsave(&dev->req_lock, flags);
551 /*
552 * this freelist can be empty if an interrupt triggered disconnect()
553 * and reconfigured the gadget (shutting down this queue) after the
554 * network stack decided to xmit but before we got the spinlock.
555 */
556 if (list_empty(&dev->tx_reqs)) {
557 spin_unlock_irqrestore(&dev->req_lock, flags);
558 return NETDEV_TX_BUSY;
559 }
560
561 req = container_of(dev->tx_reqs.next, struct usb_request, list);
562 list_del(&req->list);
563
564 /* temporarily stop TX queue when the freelist empties */
565 if (list_empty(&dev->tx_reqs))
566 netif_stop_queue(net);
567 spin_unlock_irqrestore(&dev->req_lock, flags);
568
569 /* no buffer copies needed, unless the network stack did it
570 * or the hardware can't use skb buffers.
571 * or there's not enough space for extra headers we need
572 */
573 if (dev->wrap) {
574 unsigned long flags;
575
576 spin_lock_irqsave(&dev->lock, flags);
577 if (dev->port_usb)
578 skb = dev->wrap(dev->port_usb, skb);
579 spin_unlock_irqrestore(&dev->lock, flags);
580 if (!skb)
581 goto drop;
582
583 length = skb->len;
584 }
585 req->buf = skb->data;
586 req->context = skb;
587 req->complete = tx_complete;
588
589 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
590 if (dev->port_usb->is_fixed &&
591 length == dev->port_usb->fixed_in_len &&
592 (length % in->maxpacket) == 0)
593 req->zero = 0;
594 else
595 req->zero = 1;
596
597 /* use zlp framing on tx for strict CDC-Ether conformance,
598 * though any robust network rx path ignores extra padding.
599 * and some hardware doesn't like to write zlps.
600 */
601 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
602 length++;
603
604 req->length = length;
605
606 /* throttle high/super speed IRQ rate back slightly */
607 if (gadget_is_dualspeed(dev->gadget))
608 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
609 dev->gadget->speed == USB_SPEED_SUPER)
610 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
611 : 0;
612
613 retval = usb_ep_queue(in, req, GFP_ATOMIC);
614 switch (retval) {
615 default:
616 DBG(dev, "tx queue err %d\n", retval);
617 break;
618 case 0:
619 net->trans_start = jiffies;
620 atomic_inc(&dev->tx_qlen);
621 }
622
623 if (retval) {
624 dev_kfree_skb_any(skb);
625 drop:
626 dev->net->stats.tx_dropped++;
627 spin_lock_irqsave(&dev->req_lock, flags);
628 if (list_empty(&dev->tx_reqs))
629 netif_start_queue(net);
630 list_add(&req->list, &dev->tx_reqs);
631 spin_unlock_irqrestore(&dev->req_lock, flags);
632 }
633 return NETDEV_TX_OK;
634 }
635
636 /*-------------------------------------------------------------------------*/
637
638 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
639 {
640 DBG(dev, "%s\n", __func__);
641
642 /* fill the rx queue */
643 rx_fill(dev, gfp_flags);
644
645 /* and open the tx floodgates */
646 atomic_set(&dev->tx_qlen, 0);
647 netif_wake_queue(dev->net);
648 napi_enable(&dev->rx_napi);
649 }
650
651 static int eth_open(struct net_device *net)
652 {
653 struct eth_dev *dev = netdev_priv(net);
654 struct gether *link;
655
656 DBG(dev, "%s\n", __func__);
657 if (netif_carrier_ok(dev->net))
658 eth_start(dev, GFP_KERNEL);
659
660 spin_lock_irq(&dev->lock);
661 link = dev->port_usb;
662 if (link && link->open)
663 link->open(link);
664 spin_unlock_irq(&dev->lock);
665
666 return 0;
667 }
668
669 static int eth_stop(struct net_device *net)
670 {
671 struct eth_dev *dev = netdev_priv(net);
672 unsigned long flags;
673
674 VDBG(dev, "%s\n", __func__);
675 napi_disable(&dev->rx_napi);
676 netif_stop_queue(net);
677
678 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
679 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
680 dev->net->stats.rx_errors, dev->net->stats.tx_errors
681 );
682
683 /* ensure there are no more active requests */
684 spin_lock_irqsave(&dev->lock, flags);
685 if (dev->port_usb) {
686 struct gether *link = dev->port_usb;
687 const struct usb_endpoint_descriptor *in;
688 const struct usb_endpoint_descriptor *out;
689
690 if (link->close)
691 link->close(link);
692
693 /* NOTE: we have no abort-queue primitive we could use
694 * to cancel all pending I/O. Instead, we disable then
695 * reenable the endpoints ... this idiom may leave toggle
696 * wrong, but that's a self-correcting error.
697 *
698 * REVISIT: we *COULD* just let the transfers complete at
699 * their own pace; the network stack can handle old packets.
700 * For the moment we leave this here, since it works.
701 */
702 in = link->in_ep->desc;
703 out = link->out_ep->desc;
704 usb_ep_disable(link->in_ep);
705 usb_ep_disable(link->out_ep);
706 if (netif_carrier_ok(net)) {
707 DBG(dev, "host still using in/out endpoints\n");
708 link->in_ep->desc = in;
709 link->out_ep->desc = out;
710 usb_ep_enable(link->in_ep);
711 usb_ep_enable(link->out_ep);
712 }
713 }
714 spin_unlock_irqrestore(&dev->lock, flags);
715
716 return 0;
717 }
718
719 /*-------------------------------------------------------------------------*/
720
721 static int get_ether_addr(const char *str, u8 *dev_addr)
722 {
723 if (str) {
724 unsigned i;
725
726 for (i = 0; i < 6; i++) {
727 unsigned char num;
728
729 if ((*str == '.') || (*str == ':'))
730 str++;
731 num = hex_to_bin(*str++) << 4;
732 num |= hex_to_bin(*str++);
733 dev_addr [i] = num;
734 }
735 if (is_valid_ether_addr(dev_addr))
736 return 0;
737 }
738 eth_random_addr(dev_addr);
739 return 1;
740 }
741
742 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
743 {
744 if (len < 18)
745 return -EINVAL;
746
747 snprintf(str, len, "%02x:%02x:%02x:%02x:%02x:%02x",
748 dev_addr[0], dev_addr[1], dev_addr[2],
749 dev_addr[3], dev_addr[4], dev_addr[5]);
750 return 18;
751 }
752
753 static const struct net_device_ops eth_netdev_ops = {
754 .ndo_open = eth_open,
755 .ndo_stop = eth_stop,
756 .ndo_start_xmit = eth_start_xmit,
757 .ndo_change_mtu = ueth_change_mtu,
758 .ndo_set_mac_address = eth_mac_addr,
759 .ndo_validate_addr = eth_validate_addr,
760 };
761
762 static struct device_type gadget_type = {
763 .name = "gadget",
764 };
765
766 /**
767 * gether_setup_name - initialize one ethernet-over-usb link
768 * @g: gadget to associated with these links
769 * @ethaddr: NULL, or a buffer in which the ethernet address of the
770 * host side of the link is recorded
771 * @netname: name for network device (for example, "usb")
772 * Context: may sleep
773 *
774 * This sets up the single network link that may be exported by a
775 * gadget driver using this framework. The link layer addresses are
776 * set up using module parameters.
777 *
778 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
779 */
780 struct eth_dev *gether_setup_name(struct usb_gadget *g,
781 const char *dev_addr, const char *host_addr,
782 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
783 {
784 struct eth_dev *dev;
785 struct net_device *net;
786 int status;
787
788 net = alloc_etherdev(sizeof *dev);
789 if (!net)
790 return ERR_PTR(-ENOMEM);
791
792 dev = netdev_priv(net);
793 netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
794 spin_lock_init(&dev->lock);
795 spin_lock_init(&dev->req_lock);
796 INIT_WORK(&dev->work, eth_work);
797 INIT_LIST_HEAD(&dev->tx_reqs);
798 INIT_LIST_HEAD(&dev->rx_reqs);
799
800 skb_queue_head_init(&dev->rx_frames);
801
802 /* network device setup */
803 dev->net = net;
804 dev->qmult = qmult;
805 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
806
807 if (get_ether_addr(dev_addr, net->dev_addr))
808 dev_warn(&g->dev,
809 "using random %s ethernet address\n", "self");
810 if (get_ether_addr(host_addr, dev->host_mac))
811 dev_warn(&g->dev,
812 "using random %s ethernet address\n", "host");
813
814 if (ethaddr)
815 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
816
817 net->netdev_ops = &eth_netdev_ops;
818
819 SET_ETHTOOL_OPS(net, &ops);
820
821 dev->gadget = g;
822 SET_NETDEV_DEV(net, &g->dev);
823 SET_NETDEV_DEVTYPE(net, &gadget_type);
824
825 status = register_netdev(net);
826 if (status < 0) {
827 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
828 free_netdev(net);
829 dev = ERR_PTR(status);
830 } else {
831 INFO(dev, "MAC %pM\n", net->dev_addr);
832 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
833
834 /*
835 * two kinds of host-initiated state changes:
836 * - iff DATA transfer is active, carrier is "on"
837 * - tx queueing enabled if open *and* carrier is "on"
838 */
839 netif_carrier_off(net);
840 }
841
842 return dev;
843 }
844 EXPORT_SYMBOL(gether_setup_name);
845
846 struct net_device *gether_setup_name_default(const char *netname)
847 {
848 struct net_device *net;
849 struct eth_dev *dev;
850
851 net = alloc_etherdev(sizeof(*dev));
852 if (!net)
853 return ERR_PTR(-ENOMEM);
854
855 dev = netdev_priv(net);
856 netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
857 spin_lock_init(&dev->lock);
858 spin_lock_init(&dev->req_lock);
859 INIT_WORK(&dev->work, eth_work);
860 INIT_LIST_HEAD(&dev->tx_reqs);
861 INIT_LIST_HEAD(&dev->rx_reqs);
862
863 skb_queue_head_init(&dev->rx_frames);
864
865 /* network device setup */
866 dev->net = net;
867 dev->qmult = QMULT_DEFAULT;
868 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
869
870 eth_random_addr(dev->dev_mac);
871 pr_warn("using random %s ethernet address\n", "self");
872 eth_random_addr(dev->host_mac);
873 pr_warn("using random %s ethernet address\n", "host");
874
875 net->netdev_ops = &eth_netdev_ops;
876
877 SET_ETHTOOL_OPS(net, &ops);
878 SET_NETDEV_DEVTYPE(net, &gadget_type);
879
880 return net;
881 }
882 EXPORT_SYMBOL(gether_setup_name_default);
883
884 int gether_register_netdev(struct net_device *net)
885 {
886 struct eth_dev *dev;
887 struct usb_gadget *g;
888 struct sockaddr sa;
889 int status;
890
891 if (!net->dev.parent)
892 return -EINVAL;
893 dev = netdev_priv(net);
894 g = dev->gadget;
895 status = register_netdev(net);
896 if (status < 0) {
897 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
898 return status;
899 } else {
900 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
901
902 /* two kinds of host-initiated state changes:
903 * - iff DATA transfer is active, carrier is "on"
904 * - tx queueing enabled if open *and* carrier is "on"
905 */
906 netif_carrier_off(net);
907 }
908 sa.sa_family = net->type;
909 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
910 rtnl_lock();
911 status = dev_set_mac_address(net, &sa);
912 rtnl_unlock();
913 if (status)
914 pr_warn("cannot set self ethernet address: %d\n", status);
915 else
916 INFO(dev, "MAC %pM\n", dev->dev_mac);
917
918 return status;
919 }
920 EXPORT_SYMBOL(gether_register_netdev);
921
922 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
923 {
924 struct eth_dev *dev;
925
926 dev = netdev_priv(net);
927 dev->gadget = g;
928 SET_NETDEV_DEV(net, &g->dev);
929 }
930 EXPORT_SYMBOL(gether_set_gadget);
931
932 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
933 {
934 struct eth_dev *dev;
935 u8 new_addr[ETH_ALEN];
936
937 dev = netdev_priv(net);
938 if (get_ether_addr(dev_addr, new_addr))
939 return -EINVAL;
940 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
941 return 0;
942 }
943 EXPORT_SYMBOL(gether_set_dev_addr);
944
945 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
946 {
947 struct eth_dev *dev;
948
949 dev = netdev_priv(net);
950 return get_ether_addr_str(dev->dev_mac, dev_addr, len);
951 }
952 EXPORT_SYMBOL(gether_get_dev_addr);
953
954 int gether_set_host_addr(struct net_device *net, const char *host_addr)
955 {
956 struct eth_dev *dev;
957 u8 new_addr[ETH_ALEN];
958
959 dev = netdev_priv(net);
960 if (get_ether_addr(host_addr, new_addr))
961 return -EINVAL;
962 memcpy(dev->host_mac, new_addr, ETH_ALEN);
963 return 0;
964 }
965 EXPORT_SYMBOL(gether_set_host_addr);
966
967 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
968 {
969 struct eth_dev *dev;
970
971 dev = netdev_priv(net);
972 return get_ether_addr_str(dev->host_mac, host_addr, len);
973 }
974 EXPORT_SYMBOL(gether_get_host_addr);
975
976 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
977 {
978 struct eth_dev *dev;
979
980 if (len < 13)
981 return -EINVAL;
982
983 dev = netdev_priv(net);
984 snprintf(host_addr, len, "%pm", dev->host_mac);
985
986 return strlen(host_addr);
987 }
988 EXPORT_SYMBOL(gether_get_host_addr_cdc);
989
990 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
991 {
992 struct eth_dev *dev;
993
994 dev = netdev_priv(net);
995 memcpy(host_mac, dev->host_mac, ETH_ALEN);
996 }
997 EXPORT_SYMBOL(gether_get_host_addr_u8);
998
999 void gether_set_qmult(struct net_device *net, unsigned qmult)
1000 {
1001 struct eth_dev *dev;
1002
1003 dev = netdev_priv(net);
1004 dev->qmult = qmult;
1005 }
1006 EXPORT_SYMBOL(gether_set_qmult);
1007
1008 unsigned gether_get_qmult(struct net_device *net)
1009 {
1010 struct eth_dev *dev;
1011
1012 dev = netdev_priv(net);
1013 return dev->qmult;
1014 }
1015 EXPORT_SYMBOL(gether_get_qmult);
1016
1017 int gether_get_ifname(struct net_device *net, char *name, int len)
1018 {
1019 rtnl_lock();
1020 strlcpy(name, netdev_name(net), len);
1021 rtnl_unlock();
1022 return strlen(name);
1023 }
1024 EXPORT_SYMBOL(gether_get_ifname);
1025
1026 /**
1027 * gether_cleanup - remove Ethernet-over-USB device
1028 * Context: may sleep
1029 *
1030 * This is called to free all resources allocated by @gether_setup().
1031 */
1032 void gether_cleanup(struct eth_dev *dev)
1033 {
1034 if (!dev)
1035 return;
1036
1037 unregister_netdev(dev->net);
1038 flush_work(&dev->work);
1039 free_netdev(dev->net);
1040 }
1041 EXPORT_SYMBOL(gether_cleanup);
1042
1043 /**
1044 * gether_connect - notify network layer that USB link is active
1045 * @link: the USB link, set up with endpoints, descriptors matching
1046 * current device speed, and any framing wrapper(s) set up.
1047 * Context: irqs blocked
1048 *
1049 * This is called to activate endpoints and let the network layer know
1050 * the connection is active ("carrier detect"). It may cause the I/O
1051 * queues to open and start letting network packets flow, but will in
1052 * any case activate the endpoints so that they respond properly to the
1053 * USB host.
1054 *
1055 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1056 * indicate some error code (negative errno), ep->driver_data values
1057 * have been overwritten.
1058 */
1059 struct net_device *gether_connect(struct gether *link)
1060 {
1061 struct eth_dev *dev = link->ioport;
1062 int result = 0;
1063
1064 if (!dev)
1065 return ERR_PTR(-EINVAL);
1066
1067 link->in_ep->driver_data = dev;
1068 result = usb_ep_enable(link->in_ep);
1069 if (result != 0) {
1070 DBG(dev, "enable %s --> %d\n",
1071 link->in_ep->name, result);
1072 goto fail0;
1073 }
1074
1075 link->out_ep->driver_data = dev;
1076 result = usb_ep_enable(link->out_ep);
1077 if (result != 0) {
1078 DBG(dev, "enable %s --> %d\n",
1079 link->out_ep->name, result);
1080 goto fail1;
1081 }
1082
1083 if (result == 0)
1084 result = alloc_requests(dev, link, qlen(dev->gadget,
1085 dev->qmult));
1086
1087 if (result == 0) {
1088 dev->zlp = link->is_zlp_ok;
1089 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1090
1091 dev->header_len = link->header_len;
1092 dev->unwrap = link->unwrap;
1093 dev->wrap = link->wrap;
1094
1095 spin_lock(&dev->lock);
1096 dev->port_usb = link;
1097 if (netif_running(dev->net)) {
1098 if (link->open)
1099 link->open(link);
1100 } else {
1101 if (link->close)
1102 link->close(link);
1103 }
1104 spin_unlock(&dev->lock);
1105
1106 netif_carrier_on(dev->net);
1107 if (netif_running(dev->net))
1108 eth_start(dev, GFP_ATOMIC);
1109
1110 /* on error, disable any endpoints */
1111 } else {
1112 (void) usb_ep_disable(link->out_ep);
1113 fail1:
1114 (void) usb_ep_disable(link->in_ep);
1115 }
1116 fail0:
1117 /* caller is responsible for cleanup on error */
1118 if (result < 0)
1119 return ERR_PTR(result);
1120 return dev->net;
1121 }
1122 EXPORT_SYMBOL(gether_connect);
1123
1124 /**
1125 * gether_disconnect - notify network layer that USB link is inactive
1126 * @link: the USB link, on which gether_connect() was called
1127 * Context: irqs blocked
1128 *
1129 * This is called to deactivate endpoints and let the network layer know
1130 * the connection went inactive ("no carrier").
1131 *
1132 * On return, the state is as if gether_connect() had never been called.
1133 * The endpoints are inactive, and accordingly without active USB I/O.
1134 * Pointers to endpoint descriptors and endpoint private data are nulled.
1135 */
1136 void gether_disconnect(struct gether *link)
1137 {
1138 struct eth_dev *dev = link->ioport;
1139 struct usb_request *req;
1140 struct sk_buff *skb;
1141
1142 WARN_ON(!dev);
1143 if (!dev)
1144 return;
1145
1146 DBG(dev, "%s\n", __func__);
1147
1148 netif_stop_queue(dev->net);
1149 netif_carrier_off(dev->net);
1150
1151 /* disable endpoints, forcing (synchronous) completion
1152 * of all pending i/o. then free the request objects
1153 * and forget about the endpoints.
1154 */
1155 usb_ep_disable(link->in_ep);
1156 spin_lock(&dev->req_lock);
1157 while (!list_empty(&dev->tx_reqs)) {
1158 req = container_of(dev->tx_reqs.next,
1159 struct usb_request, list);
1160 list_del(&req->list);
1161
1162 spin_unlock(&dev->req_lock);
1163 usb_ep_free_request(link->in_ep, req);
1164 spin_lock(&dev->req_lock);
1165 }
1166 spin_unlock(&dev->req_lock);
1167
1168 spin_lock(&dev->rx_frames.lock);
1169 while ((skb = __skb_dequeue(&dev->rx_frames)))
1170 dev_kfree_skb_any(skb);
1171 spin_unlock(&dev->rx_frames.lock);
1172
1173 link->in_ep->driver_data = NULL;
1174 link->in_ep->desc = NULL;
1175
1176 usb_ep_disable(link->out_ep);
1177 spin_lock(&dev->req_lock);
1178 while (!list_empty(&dev->rx_reqs)) {
1179 req = container_of(dev->rx_reqs.next,
1180 struct usb_request, list);
1181 list_del(&req->list);
1182
1183 spin_unlock(&dev->req_lock);
1184 usb_ep_free_request(link->out_ep, req);
1185 spin_lock(&dev->req_lock);
1186 }
1187 spin_unlock(&dev->req_lock);
1188 link->out_ep->driver_data = NULL;
1189 link->out_ep->desc = NULL;
1190
1191 /* finish forgetting about this USB link episode */
1192 dev->header_len = 0;
1193 dev->unwrap = NULL;
1194 dev->wrap = NULL;
1195
1196 spin_lock(&dev->lock);
1197 dev->port_usb = NULL;
1198 spin_unlock(&dev->lock);
1199 }
1200 EXPORT_SYMBOL(gether_disconnect);
1201
1202 MODULE_LICENSE("GPL");
1203 MODULE_AUTHOR("David Brownell");