2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/usb.h>
19 #include <linux/sched.h>
20 #include <linux/kthread.h>
21 #include <linux/usb/cdc.h>
22 #include <linux/wait.h>
23 #include <linux/if_ether.h>
24 #include <linux/pm_runtime.h>
29 #include "hci_packet.h"
30 #include "gdm_endian.h"
32 #define USB_DEVICE_CDC_DATA(vid, pid) \
33 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
34 USB_DEVICE_ID_MATCH_INT_CLASS | \
35 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
38 .bInterfaceClass = USB_CLASS_COMM,\
39 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
41 #define USB_DEVICE_MASS_DATA(vid, pid) \
42 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
43 USB_DEVICE_ID_MATCH_INT_INFO,\
46 .bInterfaceSubClass = USB_SC_SCSI, \
47 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
48 .bInterfaceProtocol = USB_PR_BULK
50 static const struct usb_device_id id_table
[] = {
51 { USB_DEVICE_CDC_DATA(VID_GCT
, PID_GDM7240
) }, /* GCT GDM7240 */
52 { USB_DEVICE_CDC_DATA(VID_GCT
, PID_GDM7243
) }, /* GCT GDM7243 */
56 MODULE_DEVICE_TABLE(usb
, id_table
);
58 static void do_tx(struct work_struct
*work
);
59 static void do_rx(struct work_struct
*work
);
61 static int gdm_usb_recv(void *priv_dev
,
62 int (*cb
)(void *cb_data
,
63 void *data
, int len
, int context
),
67 static int request_mac_address(struct lte_udev
*udev
)
70 struct hci_packet
*hci
= (struct hci_packet
*)buf
;
71 struct usb_device
*usbdev
= udev
->usbdev
;
75 hci
->cmd_evt
= gdm_cpu_to_dev16(&udev
->gdm_ed
, LTE_GET_INFORMATION
);
76 hci
->len
= gdm_cpu_to_dev16(&udev
->gdm_ed
, 1);
77 hci
->data
[0] = MAC_ADDRESS
;
79 ret
= usb_bulk_msg(usbdev
, usb_sndbulkpipe(usbdev
, 2), buf
, 5,
82 udev
->request_mac_addr
= 1;
87 static struct usb_tx
*alloc_tx_struct(int len
)
89 struct usb_tx
*t
= NULL
;
92 t
= kzalloc(sizeof(*t
), GFP_ATOMIC
);
98 t
->urb
= usb_alloc_urb(0, GFP_ATOMIC
);
102 t
->buf
= kmalloc(len
, GFP_ATOMIC
);
103 if (!t
->urb
|| !t
->buf
) {
111 usb_free_urb(t
->urb
);
121 static struct usb_tx_sdu
*alloc_tx_sdu_struct(void)
123 struct usb_tx_sdu
*t_sdu
;
125 t_sdu
= kzalloc(sizeof(*t_sdu
), GFP_KERNEL
);
129 t_sdu
->buf
= kmalloc(SDU_BUF_SIZE
, GFP_KERNEL
);
138 static void free_tx_struct(struct usb_tx
*t
)
141 usb_free_urb(t
->urb
);
147 static void free_tx_sdu_struct(struct usb_tx_sdu
*t_sdu
)
155 static struct usb_tx_sdu
*get_tx_sdu_struct(struct tx_cxt
*tx
, int *no_spc
)
157 struct usb_tx_sdu
*t_sdu
;
159 if (list_empty(&tx
->free_list
))
162 t_sdu
= list_entry(tx
->free_list
.next
, struct usb_tx_sdu
, list
);
163 list_del(&t_sdu
->list
);
167 *no_spc
= list_empty(&tx
->free_list
) ? 1 : 0;
172 static void put_tx_struct(struct tx_cxt
*tx
, struct usb_tx_sdu
*t_sdu
)
174 list_add_tail(&t_sdu
->list
, &tx
->free_list
);
178 static struct usb_rx
*alloc_rx_struct(void)
180 struct usb_rx
*r
= NULL
;
183 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
189 r
->urb
= usb_alloc_urb(0, GFP_KERNEL
);
190 r
->buf
= kmalloc(RX_BUF_SIZE
, GFP_KERNEL
);
191 if (!r
->urb
|| !r
->buf
) {
199 usb_free_urb(r
->urb
);
209 static void free_rx_struct(struct usb_rx
*r
)
212 usb_free_urb(r
->urb
);
218 static struct usb_rx
*get_rx_struct(struct rx_cxt
*rx
, int *no_spc
)
223 spin_lock_irqsave(&rx
->rx_lock
, flags
);
225 if (list_empty(&rx
->free_list
)) {
226 spin_unlock_irqrestore(&rx
->rx_lock
, flags
);
230 r
= list_entry(rx
->free_list
.next
, struct usb_rx
, free_list
);
231 list_del(&r
->free_list
);
235 *no_spc
= list_empty(&rx
->free_list
) ? 1 : 0;
237 spin_unlock_irqrestore(&rx
->rx_lock
, flags
);
242 static void put_rx_struct(struct rx_cxt
*rx
, struct usb_rx
*r
)
246 spin_lock_irqsave(&rx
->rx_lock
, flags
);
248 list_add_tail(&r
->free_list
, &rx
->free_list
);
251 spin_unlock_irqrestore(&rx
->rx_lock
, flags
);
254 static void release_usb(struct lte_udev
*udev
)
256 struct rx_cxt
*rx
= &udev
->rx
;
257 struct tx_cxt
*tx
= &udev
->tx
;
258 struct usb_tx
*t
, *t_next
;
259 struct usb_rx
*r
, *r_next
;
260 struct usb_tx_sdu
*t_sdu
, *t_sdu_next
;
263 spin_lock_irqsave(&tx
->lock
, flags
);
264 list_for_each_entry_safe(t_sdu
, t_sdu_next
, &tx
->sdu_list
, list
) {
265 list_del(&t_sdu
->list
);
266 free_tx_sdu_struct(t_sdu
);
269 list_for_each_entry_safe(t
, t_next
, &tx
->hci_list
, list
) {
274 list_for_each_entry_safe(t_sdu
, t_sdu_next
, &tx
->free_list
, list
) {
275 list_del(&t_sdu
->list
);
276 free_tx_sdu_struct(t_sdu
);
278 spin_unlock_irqrestore(&tx
->lock
, flags
);
280 spin_lock_irqsave(&rx
->submit_lock
, flags
);
281 list_for_each_entry_safe(r
, r_next
, &rx
->rx_submit_list
,
283 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
284 usb_kill_urb(r
->urb
);
285 spin_lock_irqsave(&rx
->submit_lock
, flags
);
287 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
289 spin_lock_irqsave(&rx
->rx_lock
, flags
);
290 list_for_each_entry_safe(r
, r_next
, &rx
->free_list
, free_list
) {
291 list_del(&r
->free_list
);
294 spin_unlock_irqrestore(&rx
->rx_lock
, flags
);
296 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
297 list_for_each_entry_safe(r
, r_next
, &rx
->to_host_list
, to_host_list
) {
298 if (r
->index
== (void *)udev
) {
299 list_del(&r
->to_host_list
);
303 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
306 static int init_usb(struct lte_udev
*udev
)
310 struct tx_cxt
*tx
= &udev
->tx
;
311 struct rx_cxt
*rx
= &udev
->rx
;
312 struct usb_tx_sdu
*t_sdu
= NULL
;
313 struct usb_rx
*r
= NULL
;
315 udev
->send_complete
= 1;
317 udev
->request_mac_addr
= 0;
318 udev
->usb_state
= PM_NORMAL
;
320 INIT_LIST_HEAD(&tx
->sdu_list
);
321 INIT_LIST_HEAD(&tx
->hci_list
);
322 INIT_LIST_HEAD(&tx
->free_list
);
323 INIT_LIST_HEAD(&rx
->rx_submit_list
);
324 INIT_LIST_HEAD(&rx
->free_list
);
325 INIT_LIST_HEAD(&rx
->to_host_list
);
326 spin_lock_init(&tx
->lock
);
327 spin_lock_init(&rx
->rx_lock
);
328 spin_lock_init(&rx
->submit_lock
);
329 spin_lock_init(&rx
->to_host_lock
);
336 for (i
= 0; i
< MAX_NUM_SDU_BUF
; i
++) {
337 t_sdu
= alloc_tx_sdu_struct();
343 list_add(&t_sdu
->list
, &tx
->free_list
);
347 for (i
= 0; i
< MAX_RX_SUBMIT_COUNT
* 2; i
++) {
348 r
= alloc_rx_struct();
354 list_add(&r
->free_list
, &rx
->free_list
);
357 INIT_DELAYED_WORK(&udev
->work_tx
, do_tx
);
358 INIT_DELAYED_WORK(&udev
->work_rx
, do_rx
);
365 static int set_mac_address(u8
*data
, void *arg
)
367 struct phy_dev
*phy_dev
= arg
;
368 struct lte_udev
*udev
= phy_dev
->priv_dev
;
369 struct tlv
*tlv
= (struct tlv
*)data
;
370 u8 mac_address
[ETH_ALEN
] = {0, };
372 if (tlv
->type
== MAC_ADDRESS
&& udev
->request_mac_addr
) {
373 memcpy(mac_address
, tlv
->data
, tlv
->len
);
375 if (register_lte_device(phy_dev
,
376 &udev
->intf
->dev
, mac_address
) < 0)
377 pr_err("register lte device failed\n");
379 udev
->request_mac_addr
= 0;
387 static void do_rx(struct work_struct
*work
)
389 struct lte_udev
*udev
=
390 container_of(work
, struct lte_udev
, work_rx
.work
);
391 struct rx_cxt
*rx
= &udev
->rx
;
393 struct hci_packet
*hci
;
394 struct phy_dev
*phy_dev
;
400 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
401 if (list_empty(&rx
->to_host_list
)) {
402 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
405 r
= list_entry(rx
->to_host_list
.next
,
406 struct usb_rx
, to_host_list
);
407 list_del(&r
->to_host_list
);
408 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
410 phy_dev
= r
->cb_data
;
411 udev
= phy_dev
->priv_dev
;
412 hci
= (struct hci_packet
*)r
->buf
;
413 cmd_evt
= gdm_dev16_to_cpu(&udev
->gdm_ed
, hci
->cmd_evt
);
416 case LTE_GET_INFORMATION_RESULT
:
417 if (set_mac_address(hci
->data
, r
->cb_data
) == 0) {
418 r
->callback(r
->cb_data
,
420 r
->urb
->actual_length
,
427 ret
= r
->callback(r
->cb_data
,
429 r
->urb
->actual_length
,
433 pr_err("failed to send received data\n");
438 put_rx_struct(rx
, r
);
447 static void remove_rx_submit_list(struct usb_rx
*r
, struct rx_cxt
*rx
)
450 struct usb_rx
*r_remove
, *r_remove_next
;
452 spin_lock_irqsave(&rx
->submit_lock
, flags
);
453 list_for_each_entry_safe(r_remove
, r_remove_next
,
454 &rx
->rx_submit_list
, rx_submit_list
) {
456 list_del(&r
->rx_submit_list
);
460 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
463 static void gdm_usb_rcv_complete(struct urb
*urb
)
465 struct usb_rx
*r
= urb
->context
;
466 struct rx_cxt
*rx
= r
->rx
;
468 struct lte_udev
*udev
= container_of(r
->rx
, struct lte_udev
, rx
);
469 struct usb_device
*usbdev
= udev
->usbdev
;
471 remove_rx_submit_list(r
, rx
);
473 if (!urb
->status
&& r
->callback
) {
474 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
475 list_add_tail(&r
->to_host_list
, &rx
->to_host_list
);
476 schedule_work(&udev
->work_rx
.work
);
477 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
479 if (urb
->status
&& udev
->usb_state
== PM_NORMAL
)
480 dev_err(&urb
->dev
->dev
, "%s: urb status error %d\n",
481 __func__
, urb
->status
);
483 put_rx_struct(rx
, r
);
486 usb_mark_last_busy(usbdev
);
489 static int gdm_usb_recv(void *priv_dev
,
490 int (*cb
)(void *cb_data
,
491 void *data
, int len
, int context
),
495 struct lte_udev
*udev
= priv_dev
;
496 struct usb_device
*usbdev
= udev
->usbdev
;
497 struct rx_cxt
*rx
= &udev
->rx
;
504 pr_err("invalid device\n");
508 r
= get_rx_struct(rx
, &no_spc
);
510 pr_err("Out of Memory\n");
516 r
->cb_data
= cb_data
;
517 r
->index
= (void *)udev
;
520 usb_fill_bulk_urb(r
->urb
,
522 usb_rcvbulkpipe(usbdev
, 0x83),
525 gdm_usb_rcv_complete
,
528 spin_lock_irqsave(&rx
->submit_lock
, flags
);
529 list_add_tail(&r
->rx_submit_list
, &rx
->rx_submit_list
);
530 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
532 if (context
== KERNEL_THREAD
)
533 ret
= usb_submit_urb(r
->urb
, GFP_KERNEL
);
535 ret
= usb_submit_urb(r
->urb
, GFP_ATOMIC
);
538 spin_lock_irqsave(&rx
->submit_lock
, flags
);
539 list_del(&r
->rx_submit_list
);
540 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
542 pr_err("usb_submit_urb failed (%p)\n", r
);
543 put_rx_struct(rx
, r
);
549 static void gdm_usb_send_complete(struct urb
*urb
)
551 struct usb_tx
*t
= urb
->context
;
552 struct tx_cxt
*tx
= t
->tx
;
553 struct lte_udev
*udev
= container_of(tx
, struct lte_udev
, tx
);
556 if (urb
->status
== -ECONNRESET
) {
557 dev_info(&urb
->dev
->dev
, "CONNRESET\n");
562 t
->callback(t
->cb_data
);
566 spin_lock_irqsave(&tx
->lock
, flags
);
567 udev
->send_complete
= 1;
568 schedule_work(&udev
->work_tx
.work
);
569 spin_unlock_irqrestore(&tx
->lock
, flags
);
572 static int send_tx_packet(struct usb_device
*usbdev
, struct usb_tx
*t
, u32 len
)
579 usb_fill_bulk_urb(t
->urb
,
581 usb_sndbulkpipe(usbdev
, 2),
584 gdm_usb_send_complete
,
587 ret
= usb_submit_urb(t
->urb
, GFP_ATOMIC
);
590 dev_err(&usbdev
->dev
, "usb_submit_urb failed: %d\n",
593 usb_mark_last_busy(usbdev
);
598 static u32
packet_aggregation(struct lte_udev
*udev
, u8
*send_buf
)
600 struct tx_cxt
*tx
= &udev
->tx
;
601 struct usb_tx_sdu
*t_sdu
= NULL
;
602 struct multi_sdu
*multi_sdu
= (struct multi_sdu
*)send_buf
;
607 multi_sdu
->cmd_evt
= gdm_cpu_to_dev16(&udev
->gdm_ed
, LTE_TX_MULTI_SDU
);
609 while (num_packet
< MAX_PACKET_IN_MULTI_SDU
) {
610 spin_lock_irqsave(&tx
->lock
, flags
);
611 if (list_empty(&tx
->sdu_list
)) {
612 spin_unlock_irqrestore(&tx
->lock
, flags
);
616 t_sdu
= list_entry(tx
->sdu_list
.next
, struct usb_tx_sdu
, list
);
617 if (send_len
+ t_sdu
->len
> MAX_SDU_SIZE
) {
618 spin_unlock_irqrestore(&tx
->lock
, flags
);
622 list_del(&t_sdu
->list
);
623 spin_unlock_irqrestore(&tx
->lock
, flags
);
625 memcpy(multi_sdu
->data
+ send_len
, t_sdu
->buf
, t_sdu
->len
);
627 send_len
+= (t_sdu
->len
+ 3) & 0xfffc;
630 if (tx
->avail_count
> 10)
631 t_sdu
->callback(t_sdu
->cb_data
);
633 spin_lock_irqsave(&tx
->lock
, flags
);
634 put_tx_struct(tx
, t_sdu
);
635 spin_unlock_irqrestore(&tx
->lock
, flags
);
638 multi_sdu
->len
= gdm_cpu_to_dev16(&udev
->gdm_ed
, send_len
);
639 multi_sdu
->num_packet
= gdm_cpu_to_dev16(&udev
->gdm_ed
, num_packet
);
641 return send_len
+ offsetof(struct multi_sdu
, data
);
644 static void do_tx(struct work_struct
*work
)
646 struct lte_udev
*udev
=
647 container_of(work
, struct lte_udev
, work_tx
.work
);
648 struct usb_device
*usbdev
= udev
->usbdev
;
649 struct tx_cxt
*tx
= &udev
->tx
;
650 struct usb_tx
*t
= NULL
;
655 if (!usb_autopm_get_interface(udev
->intf
))
656 usb_autopm_put_interface(udev
->intf
);
658 if (udev
->usb_state
== PM_SUSPEND
)
661 spin_lock_irqsave(&tx
->lock
, flags
);
662 if (!udev
->send_complete
) {
663 spin_unlock_irqrestore(&tx
->lock
, flags
);
666 udev
->send_complete
= 0;
668 if (!list_empty(&tx
->hci_list
)) {
669 t
= list_entry(tx
->hci_list
.next
, struct usb_tx
, list
);
674 } else if (!list_empty(&tx
->sdu_list
)) {
676 udev
->send_complete
= 1;
677 spin_unlock_irqrestore(&tx
->lock
, flags
);
681 t
= alloc_tx_struct(TX_BUF_SIZE
);
683 spin_unlock_irqrestore(&tx
->lock
, flags
);
693 udev
->send_complete
= 1;
694 spin_unlock_irqrestore(&tx
->lock
, flags
);
697 spin_unlock_irqrestore(&tx
->lock
, flags
);
700 len
= packet_aggregation(udev
, t
->buf
);
702 if (send_tx_packet(usbdev
, t
, len
)) {
703 pr_err("send_tx_packet failed\n");
705 gdm_usb_send_complete(t
->urb
);
709 #define SDU_PARAM_LEN 12
710 static int gdm_usb_sdu_send(void *priv_dev
, void *data
, int len
,
711 unsigned int dft_eps_ID
, unsigned int eps_ID
,
712 void (*cb
)(void *data
), void *cb_data
,
713 int dev_idx
, int nic_type
)
715 struct lte_udev
*udev
= priv_dev
;
716 struct tx_cxt
*tx
= &udev
->tx
;
717 struct usb_tx_sdu
*t_sdu
;
718 struct sdu
*sdu
= NULL
;
724 pr_err("sdu send - invalid device\n");
728 spin_lock_irqsave(&tx
->lock
, flags
);
729 t_sdu
= get_tx_sdu_struct(tx
, &no_spc
);
730 spin_unlock_irqrestore(&tx
->lock
, flags
);
733 pr_err("sdu send - free list empty\n");
737 sdu
= (struct sdu
*)t_sdu
->buf
;
738 sdu
->cmd_evt
= gdm_cpu_to_dev16(&udev
->gdm_ed
, LTE_TX_SDU
);
739 if (nic_type
== NIC_TYPE_ARP
) {
740 send_len
= len
+ SDU_PARAM_LEN
;
741 memcpy(sdu
->data
, data
, len
);
743 send_len
= len
- ETH_HLEN
;
744 send_len
+= SDU_PARAM_LEN
;
745 memcpy(sdu
->data
, data
+ ETH_HLEN
, len
- ETH_HLEN
);
748 sdu
->len
= gdm_cpu_to_dev16(&udev
->gdm_ed
, send_len
);
749 sdu
->dft_eps_ID
= gdm_cpu_to_dev32(&udev
->gdm_ed
, dft_eps_ID
);
750 sdu
->bearer_ID
= gdm_cpu_to_dev32(&udev
->gdm_ed
, eps_ID
);
751 sdu
->nic_type
= gdm_cpu_to_dev32(&udev
->gdm_ed
, nic_type
);
753 t_sdu
->len
= send_len
+ HCI_HEADER_SIZE
;
754 t_sdu
->callback
= cb
;
755 t_sdu
->cb_data
= cb_data
;
757 spin_lock_irqsave(&tx
->lock
, flags
);
758 list_add_tail(&t_sdu
->list
, &tx
->sdu_list
);
759 schedule_work(&udev
->work_tx
.work
);
760 spin_unlock_irqrestore(&tx
->lock
, flags
);
768 static int gdm_usb_hci_send(void *priv_dev
, void *data
, int len
,
769 void (*cb
)(void *data
), void *cb_data
)
771 struct lte_udev
*udev
= priv_dev
;
772 struct tx_cxt
*tx
= &udev
->tx
;
777 pr_err("hci send - invalid device\n");
781 t
= alloc_tx_struct(len
);
783 pr_err("hci_send - out of memory\n");
787 memcpy(t
->buf
, data
, len
);
789 t
->cb_data
= cb_data
;
794 spin_lock_irqsave(&tx
->lock
, flags
);
795 list_add_tail(&t
->list
, &tx
->hci_list
);
796 schedule_work(&udev
->work_tx
.work
);
797 spin_unlock_irqrestore(&tx
->lock
, flags
);
802 static struct gdm_endian
*gdm_usb_get_endian(void *priv_dev
)
804 struct lte_udev
*udev
= priv_dev
;
806 return &udev
->gdm_ed
;
809 static int gdm_usb_probe(struct usb_interface
*intf
,
810 const struct usb_device_id
*id
)
813 struct phy_dev
*phy_dev
= NULL
;
814 struct lte_udev
*udev
= NULL
;
815 u16 idVendor
, idProduct
;
816 int bInterfaceNumber
;
817 struct usb_device
*usbdev
= interface_to_usbdev(intf
);
819 bInterfaceNumber
= intf
->cur_altsetting
->desc
.bInterfaceNumber
;
820 idVendor
= __le16_to_cpu(usbdev
->descriptor
.idVendor
);
821 idProduct
= __le16_to_cpu(usbdev
->descriptor
.idProduct
);
823 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor
, idProduct
);
825 if (bInterfaceNumber
> NETWORK_INTERFACE
) {
826 pr_info("not a network device\n");
830 phy_dev
= kzalloc(sizeof(*phy_dev
), GFP_KERNEL
);
834 udev
= kzalloc(sizeof(*udev
), GFP_KERNEL
);
840 phy_dev
->priv_dev
= (void *)udev
;
841 phy_dev
->send_hci_func
= gdm_usb_hci_send
;
842 phy_dev
->send_sdu_func
= gdm_usb_sdu_send
;
843 phy_dev
->rcv_func
= gdm_usb_recv
;
844 phy_dev
->get_endian
= gdm_usb_get_endian
;
846 udev
->usbdev
= usbdev
;
847 ret
= init_usb(udev
);
849 dev_err(intf
->usb_dev
, "init_usb func failed\n");
854 intf
->needs_remote_wakeup
= 1;
855 usb_enable_autosuspend(usbdev
);
856 pm_runtime_set_autosuspend_delay(&usbdev
->dev
, AUTO_SUSPEND_TIMER
);
858 /* List up hosts with big endians, otherwise,
859 * defaults to little endian
861 if (idProduct
== PID_GDM7243
)
862 gdm_set_endian(&udev
->gdm_ed
, ENDIANNESS_BIG
);
864 gdm_set_endian(&udev
->gdm_ed
, ENDIANNESS_LITTLE
);
866 ret
= request_mac_address(udev
);
868 dev_err(intf
->usb_dev
, "request Mac address failed\n");
869 goto err_mac_address
;
872 start_rx_proc(phy_dev
);
874 usb_set_intfdata(intf
, phy_dev
);
888 static void gdm_usb_disconnect(struct usb_interface
*intf
)
890 struct phy_dev
*phy_dev
;
891 struct lte_udev
*udev
;
892 u16 idVendor
, idProduct
;
893 struct usb_device
*usbdev
;
895 usbdev
= interface_to_usbdev(intf
);
897 idVendor
= __le16_to_cpu(usbdev
->descriptor
.idVendor
);
898 idProduct
= __le16_to_cpu(usbdev
->descriptor
.idProduct
);
900 phy_dev
= usb_get_intfdata(intf
);
902 udev
= phy_dev
->priv_dev
;
903 unregister_lte_device(phy_dev
);
916 static int gdm_usb_suspend(struct usb_interface
*intf
, pm_message_t pm_msg
)
918 struct phy_dev
*phy_dev
;
919 struct lte_udev
*udev
;
922 struct usb_rx
*r_next
;
925 phy_dev
= usb_get_intfdata(intf
);
926 udev
= phy_dev
->priv_dev
;
928 if (udev
->usb_state
!= PM_NORMAL
) {
929 dev_err(intf
->usb_dev
, "usb suspend - invalid state\n");
933 udev
->usb_state
= PM_SUSPEND
;
935 spin_lock_irqsave(&rx
->submit_lock
, flags
);
936 list_for_each_entry_safe(r
, r_next
, &rx
->rx_submit_list
,
938 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
939 usb_kill_urb(r
->urb
);
940 spin_lock_irqsave(&rx
->submit_lock
, flags
);
942 spin_unlock_irqrestore(&rx
->submit_lock
, flags
);
944 cancel_work_sync(&udev
->work_tx
.work
);
945 cancel_work_sync(&udev
->work_rx
.work
);
950 static int gdm_usb_resume(struct usb_interface
*intf
)
952 struct phy_dev
*phy_dev
;
953 struct lte_udev
*udev
;
960 phy_dev
= usb_get_intfdata(intf
);
961 udev
= phy_dev
->priv_dev
;
964 if (udev
->usb_state
!= PM_SUSPEND
) {
965 dev_err(intf
->usb_dev
, "usb resume - invalid state\n");
968 udev
->usb_state
= PM_NORMAL
;
970 spin_lock_irqsave(&rx
->rx_lock
, flags
);
971 issue_count
= rx
->avail_count
- MAX_RX_SUBMIT_COUNT
;
972 spin_unlock_irqrestore(&rx
->rx_lock
, flags
);
974 if (issue_count
>= 0) {
975 for (i
= 0; i
< issue_count
; i
++)
976 gdm_usb_recv(phy_dev
->priv_dev
,
983 spin_lock_irqsave(&tx
->lock
, flags
);
984 schedule_work(&udev
->work_tx
.work
);
985 spin_unlock_irqrestore(&tx
->lock
, flags
);
990 static struct usb_driver gdm_usb_lte_driver
= {
992 .probe
= gdm_usb_probe
,
993 .disconnect
= gdm_usb_disconnect
,
994 .id_table
= id_table
,
995 .supports_autosuspend
= 1,
996 .suspend
= gdm_usb_suspend
,
997 .resume
= gdm_usb_resume
,
998 .reset_resume
= gdm_usb_resume
,
1001 static int __init
gdm_usb_lte_init(void)
1003 if (gdm_lte_event_init() < 0) {
1004 pr_err("error creating event\n");
1008 return usb_register(&gdm_usb_lte_driver
);
1011 static void __exit
gdm_usb_lte_exit(void)
1013 gdm_lte_event_exit();
1015 usb_deregister(&gdm_usb_lte_driver
);
1018 module_init(gdm_usb_lte_init
);
1019 module_exit(gdm_usb_lte_exit
);
1021 MODULE_VERSION(DRIVER_VERSION
);
1022 MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1023 MODULE_LICENSE("GPL");