Merge branch 'x86/ras' into x86/core, to fix conflicts
[deliverable/linux.git] / drivers / staging / gdm724x / gdm_mux.c
CommitLineData
61e12104
WK
1/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
0ec473b5
JP
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
61e12104 16#include <linux/module.h>
61e12104
WK
17#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/tty.h>
22#include <linux/tty_driver.h>
23#include <linux/tty_flip.h>
24#include <linux/slab.h>
25#include <linux/usb/cdc.h>
26
27#include "gdm_mux.h"
61e12104 28
ff52b8fe 29static struct workqueue_struct *mux_rx_wq;
61e12104
WK
30
31static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
32
33#define USB_DEVICE_CDC_DATA(vid, pid) \
34 .match_flags = \
35 USB_DEVICE_ID_MATCH_DEVICE |\
36 USB_DEVICE_ID_MATCH_INT_CLASS |\
37 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
38 .idVendor = vid,\
39 .idProduct = pid,\
40 .bInterfaceClass = USB_CLASS_COMM,\
41 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
42
43static const struct usb_device_id id_table[] = {
44 { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
45 { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
46 { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
47 { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
48 {}
49};
50
51
52MODULE_DEVICE_TABLE(usb, id_table);
53
ff52b8fe 54static int packet_type_to_index(u16 packetType)
61e12104
WK
55{
56 int i;
57
58 for (i = 0; i < TTY_MAX_COUNT; i++) {
59 if (packet_type[i] == packetType)
60 return i;
61 }
62
63 return -1;
64}
65
66static struct mux_tx *alloc_mux_tx(int len)
67{
68 struct mux_tx *t = NULL;
69
70 t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
71 if (!t)
72 return NULL;
73
74 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
75 t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
76 if (!t->urb || !t->buf) {
77 usb_free_urb(t->urb);
78 kfree(t->buf);
79 kfree(t);
80 return NULL;
81 }
82
83 return t;
84}
85
86static void free_mux_tx(struct mux_tx *t)
87{
88 if (t) {
89 usb_free_urb(t->urb);
90 kfree(t->buf);
91 kfree(t);
92 }
93}
94
95static struct mux_rx *alloc_mux_rx(void)
96{
97 struct mux_rx *r = NULL;
98
a4d8c83c 99 r = kzalloc(sizeof(struct mux_rx), GFP_KERNEL);
61e12104
WK
100 if (!r)
101 return NULL;
102
a4d8c83c
AK
103 r->urb = usb_alloc_urb(0, GFP_KERNEL);
104 r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
61e12104
WK
105 if (!r->urb || !r->buf) {
106 usb_free_urb(r->urb);
107 kfree(r->buf);
108 kfree(r);
109 return NULL;
110 }
111
112 return r;
113}
114
115static void free_mux_rx(struct mux_rx *r)
116{
117 if (r) {
118 usb_free_urb(r->urb);
119 kfree(r->buf);
120 kfree(r);
121 }
122}
123
124static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
125{
126 struct mux_rx *r;
127 unsigned long flags;
128
129 spin_lock_irqsave(&rx->free_list_lock, flags);
130
131 if (list_empty(&rx->rx_free_list)) {
132 spin_unlock_irqrestore(&rx->free_list_lock, flags);
133 return NULL;
134 }
135
136 r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
137 list_del(&r->free_list);
138
139 spin_unlock_irqrestore(&rx->free_list_lock, flags);
140
141 return r;
142}
143
144static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
145{
146 unsigned long flags;
147
148 spin_lock_irqsave(&rx->free_list_lock, flags);
149 list_add_tail(&r->free_list, &rx->rx_free_list);
150 spin_unlock_irqrestore(&rx->free_list_lock, flags);
151}
152
153
154static int up_to_host(struct mux_rx *r)
155{
156 struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
157 struct mux_pkt_header *mux_header;
158 unsigned int start_flag;
159 unsigned int payload_size;
160 unsigned short packet_type;
892c89d5 161 int total_len;
61e12104
WK
162 u32 packet_size_sum = r->offset;
163 int index;
164 int ret = TO_HOST_INVALID_PACKET;
165 int len = r->len;
166
167 while (1) {
7b99b5ef
DN
168 mux_header = (struct mux_pkt_header *)(r->buf +
169 packet_size_sum);
61e12104
WK
170 start_flag = __le32_to_cpu(mux_header->start_flag);
171 payload_size = __le32_to_cpu(mux_header->payload_size);
172 packet_type = __le16_to_cpu(mux_header->packet_type);
173
174 if (start_flag != START_FLAG) {
0ec473b5 175 pr_err("invalid START_FLAG %x\n", start_flag);
61e12104
WK
176 break;
177 }
178
892c89d5 179 total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
61e12104
WK
180
181 if (len - packet_size_sum <
892c89d5 182 total_len) {
0ec473b5
JP
183 pr_err("invalid payload : %d %d %04x\n",
184 payload_size, len, packet_type);
61e12104
WK
185 break;
186 }
187
188 index = packet_type_to_index(packet_type);
189 if (index < 0) {
0ec473b5 190 pr_err("invalid index %d\n", index);
61e12104
WK
191 break;
192 }
193
194 ret = r->callback(mux_header->data,
195 payload_size,
196 index,
bf0373f1 197 mux_dev->tty_dev,
61e12104
WK
198 RECV_PACKET_PROCESS_CONTINUE
199 );
200 if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
201 r->offset += packet_size_sum;
202 break;
203 }
204
892c89d5 205 packet_size_sum += total_len;
61e12104
WK
206 if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
207 ret = r->callback(NULL,
208 0,
209 index,
bf0373f1 210 mux_dev->tty_dev,
61e12104
WK
211 RECV_PACKET_PROCESS_COMPLETE
212 );
213 break;
214 }
215 }
216
217 return ret;
218}
219
220static void do_rx(struct work_struct *work)
221{
222 struct mux_dev *mux_dev =
2fc6aa5d 223 container_of(work, struct mux_dev, work_rx.work);
61e12104
WK
224 struct mux_rx *r;
225 struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
226 unsigned long flags;
227 int ret = 0;
228
229 while (1) {
230 spin_lock_irqsave(&rx->to_host_lock, flags);
231 if (list_empty(&rx->to_host_list)) {
232 spin_unlock_irqrestore(&rx->to_host_lock, flags);
233 break;
234 }
7b99b5ef
DN
235 r = list_entry(rx->to_host_list.next, struct mux_rx,
236 to_host_list);
61e12104
WK
237 list_del(&r->to_host_list);
238 spin_unlock_irqrestore(&rx->to_host_lock, flags);
239
240 ret = up_to_host(r);
241 if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
0ec473b5 242 pr_err("failed to send mux data to host\n");
61e12104
WK
243 else
244 put_rx_struct(rx, r);
245 }
246}
247
248static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
249{
250 unsigned long flags;
251 struct mux_rx *r_remove, *r_remove_next;
252
253 spin_lock_irqsave(&rx->submit_list_lock, flags);
7b99b5ef
DN
254 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
255 rx_submit_list) {
61e12104
WK
256 if (r == r_remove)
257 list_del(&r->rx_submit_list);
258 }
259 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
260}
261
262static void gdm_mux_rcv_complete(struct urb *urb)
263{
264 struct mux_rx *r = urb->context;
265 struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
266 struct rx_cxt *rx = &mux_dev->rx;
267 unsigned long flags;
268
269 remove_rx_submit_list(r, rx);
270
271 if (urb->status) {
272 if (mux_dev->usb_state == PM_NORMAL)
3d719423 273 dev_err(&urb->dev->dev, "%s: urb status error %d\n",
0ec473b5 274 __func__, urb->status);
61e12104
WK
275 put_rx_struct(rx, r);
276 } else {
277 r->len = r->urb->actual_length;
278 spin_lock_irqsave(&rx->to_host_lock, flags);
279 list_add_tail(&r->to_host_list, &rx->to_host_list);
280 queue_work(mux_rx_wq, &mux_dev->work_rx.work);
281 spin_unlock_irqrestore(&rx->to_host_lock, flags);
282 }
283}
284
7b99b5ef
DN
285static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
286 int tty_index, struct tty_dev *tty_dev, int complete))
61e12104
WK
287{
288 struct mux_dev *mux_dev = priv_dev;
289 struct usb_device *usbdev = mux_dev->usbdev;
290 struct mux_rx *r;
291 struct rx_cxt *rx = &mux_dev->rx;
292 unsigned long flags;
293 int ret;
294
295 if (!usbdev) {
0ec473b5 296 pr_err("device is disconnected\n");
61e12104
WK
297 return -ENODEV;
298 }
299
300 r = get_rx_struct(rx);
301 if (!r) {
0ec473b5 302 pr_err("get_rx_struct fail\n");
61e12104
WK
303 return -ENOMEM;
304 }
305
306 r->offset = 0;
307 r->mux_dev = (void *)mux_dev;
308 r->callback = cb;
309 mux_dev->rx_cb = cb;
310
311 usb_fill_bulk_urb(r->urb,
312 usbdev,
313 usb_rcvbulkpipe(usbdev, 0x86),
314 r->buf,
315 MUX_RX_MAX_SIZE,
316 gdm_mux_rcv_complete,
317 r);
318
319 spin_lock_irqsave(&rx->submit_list_lock, flags);
320 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
321 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
322
323 ret = usb_submit_urb(r->urb, GFP_KERNEL);
324
325 if (ret) {
326 spin_lock_irqsave(&rx->submit_list_lock, flags);
327 list_del(&r->rx_submit_list);
328 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
329
330 put_rx_struct(rx, r);
331
0ec473b5 332 pr_err("usb_submit_urb ret=%d\n", ret);
61e12104
WK
333 }
334
335 usb_mark_last_busy(usbdev);
336
337 return ret;
338}
339
340static void gdm_mux_send_complete(struct urb *urb)
341{
342 struct mux_tx *t = urb->context;
343
344 if (urb->status == -ECONNRESET) {
3d719423 345 dev_info(&urb->dev->dev, "CONNRESET\n");
61e12104
WK
346 free_mux_tx(t);
347 return;
348 }
349
350 if (t->callback)
351 t->callback(t->cb_data);
352
353 free_mux_tx(t);
354}
355
356static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
357 void (*cb)(void *data), void *cb_data)
358{
359 struct mux_dev *mux_dev = priv_dev;
360 struct usb_device *usbdev = mux_dev->usbdev;
361 struct mux_pkt_header *mux_header;
362 struct mux_tx *t = NULL;
363 static u32 seq_num = 1;
61e12104
WK
364 int total_len;
365 int ret;
366 unsigned long flags;
367
368 if (mux_dev->usb_state == PM_SUSPEND) {
369 ret = usb_autopm_get_interface(mux_dev->intf);
370 if (!ret)
371 usb_autopm_put_interface(mux_dev->intf);
372 }
373
374 spin_lock_irqsave(&mux_dev->write_lock, flags);
375
892c89d5 376 total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
61e12104
WK
377
378 t = alloc_mux_tx(total_len);
379 if (!t) {
0ec473b5 380 pr_err("alloc_mux_tx fail\n");
61e12104
WK
381 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
382 return -ENOMEM;
383 }
384
385 mux_header = (struct mux_pkt_header *)t->buf;
386 mux_header->start_flag = __cpu_to_le32(START_FLAG);
387 mux_header->seq_num = __cpu_to_le32(seq_num++);
388 mux_header->payload_size = __cpu_to_le32((u32)len);
389 mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
390
391 memcpy(t->buf+MUX_HEADER_SIZE, data, len);
892c89d5
SD
392 memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
393 len);
61e12104
WK
394
395 t->len = total_len;
396 t->callback = cb;
397 t->cb_data = cb_data;
398
399 usb_fill_bulk_urb(t->urb,
400 usbdev,
401 usb_sndbulkpipe(usbdev, 5),
402 t->buf,
403 total_len,
404 gdm_mux_send_complete,
405 t);
406
b07dee7c 407 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
61e12104
WK
408
409 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
410
411 if (ret)
0ec473b5 412 pr_err("usb_submit_urb Error: %d\n", ret);
61e12104
WK
413
414 usb_mark_last_busy(usbdev);
415
416 return ret;
417}
418
7b99b5ef
DN
419static int gdm_mux_send_control(void *priv_dev, int request, int value,
420 void *buf, int len)
61e12104
WK
421{
422 struct mux_dev *mux_dev = priv_dev;
423 struct usb_device *usbdev = mux_dev->usbdev;
424 int ret;
425
426 ret = usb_control_msg(usbdev,
427 usb_sndctrlpipe(usbdev, 0),
428 request,
429 USB_RT_ACM,
430 value,
431 2,
432 buf,
433 len,
434 5000
435 );
436
437 if (ret < 0)
0ec473b5 438 pr_err("usb_control_msg error: %d\n", ret);
61e12104
WK
439
440 return ret < 0 ? ret : 0;
441}
442
443static void release_usb(struct mux_dev *mux_dev)
444{
445 struct rx_cxt *rx = &mux_dev->rx;
446 struct mux_rx *r, *r_next;
447 unsigned long flags;
448
449 cancel_delayed_work(&mux_dev->work_rx);
450
451 spin_lock_irqsave(&rx->submit_list_lock, flags);
7b99b5ef
DN
452 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
453 rx_submit_list) {
61e12104
WK
454 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
455 usb_kill_urb(r->urb);
456 spin_lock_irqsave(&rx->submit_list_lock, flags);
457 }
458 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
459
460 spin_lock_irqsave(&rx->free_list_lock, flags);
461 list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
462 list_del(&r->free_list);
463 free_mux_rx(r);
464 }
465 spin_unlock_irqrestore(&rx->free_list_lock, flags);
466
467 spin_lock_irqsave(&rx->to_host_lock, flags);
468 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
469 if (r->mux_dev == (void *)mux_dev) {
470 list_del(&r->to_host_list);
471 free_mux_rx(r);
472 }
473 }
474 spin_unlock_irqrestore(&rx->to_host_lock, flags);
475}
476
477
478static int init_usb(struct mux_dev *mux_dev)
479{
480 struct mux_rx *r;
481 struct rx_cxt *rx = &mux_dev->rx;
482 int ret = 0;
483 int i;
484
485 spin_lock_init(&mux_dev->write_lock);
486 INIT_LIST_HEAD(&rx->to_host_list);
487 INIT_LIST_HEAD(&rx->rx_submit_list);
488 INIT_LIST_HEAD(&rx->rx_free_list);
489 spin_lock_init(&rx->to_host_lock);
490 spin_lock_init(&rx->submit_list_lock);
491 spin_lock_init(&rx->free_list_lock);
492
493 for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
494 r = alloc_mux_rx();
495 if (r == NULL) {
496 ret = -ENOMEM;
497 break;
498 }
499
500 list_add(&r->free_list, &rx->rx_free_list);
501 }
502
503 INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
504
505 return ret;
506}
507
7b99b5ef
DN
508static int gdm_mux_probe(struct usb_interface *intf,
509 const struct usb_device_id *id)
61e12104 510{
234ad182
DC
511 struct mux_dev *mux_dev;
512 struct tty_dev *tty_dev;
61e12104
WK
513 u16 idVendor, idProduct;
514 int bInterfaceNumber;
234ad182 515 int ret;
61e12104
WK
516 int i;
517 struct usb_device *usbdev = interface_to_usbdev(intf);
37d963fb 518
61e12104
WK
519 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
520
521 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
522 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
523
0ec473b5 524 pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
61e12104 525
234ad182
DC
526 if (bInterfaceNumber != 2)
527 return -ENODEV;
61e12104
WK
528
529 mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
234ad182
DC
530 if (!mux_dev)
531 return -ENOMEM;
61e12104
WK
532
533 tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
534 if (!tty_dev) {
61e12104 535 ret = -ENOMEM;
234ad182 536 goto err_free_mux;
61e12104
WK
537 }
538
539 mux_dev->usbdev = usbdev;
540 mux_dev->control_intf = intf;
541
542 ret = init_usb(mux_dev);
234ad182 543 if (ret)
47052577 544 goto err_free_usb;
61e12104
WK
545
546 tty_dev->priv_dev = (void *)mux_dev;
547 tty_dev->send_func = gdm_mux_send;
548 tty_dev->recv_func = gdm_mux_recv;
549 tty_dev->send_control = gdm_mux_send_control;
550
234ad182
DC
551 ret = register_lte_tty_device(tty_dev, &intf->dev);
552 if (ret)
553 goto err_unregister_tty;
61e12104 554
61e12104 555 for (i = 0; i < TTY_MAX_COUNT; i++)
bf0373f1 556 mux_dev->tty_dev = tty_dev;
61e12104 557
234ad182
DC
558 mux_dev->intf = intf;
559 mux_dev->usb_state = PM_NORMAL;
61e12104
WK
560
561 usb_get_dev(usbdev);
562 usb_set_intfdata(intf, tty_dev);
563
234ad182
DC
564 return 0;
565
566err_unregister_tty:
567 unregister_lte_tty_device(tty_dev);
47052577 568err_free_usb:
234ad182 569 release_usb(mux_dev);
234ad182
DC
570 kfree(tty_dev);
571err_free_mux:
572 kfree(mux_dev);
573
61e12104
WK
574 return ret;
575}
576
577static void gdm_mux_disconnect(struct usb_interface *intf)
578{
579 struct tty_dev *tty_dev;
580 struct mux_dev *mux_dev;
581 struct usb_device *usbdev = interface_to_usbdev(intf);
582
583 tty_dev = usb_get_intfdata(intf);
584
585 mux_dev = tty_dev->priv_dev;
586
587 release_usb(mux_dev);
588 unregister_lte_tty_device(tty_dev);
589
590 kfree(mux_dev);
591 kfree(tty_dev);
592
593 usb_put_dev(usbdev);
594}
595
596static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
597{
598 struct tty_dev *tty_dev;
599 struct mux_dev *mux_dev;
600 struct rx_cxt *rx;
601 struct mux_rx *r, *r_next;
602 unsigned long flags;
603
604 tty_dev = usb_get_intfdata(intf);
605 mux_dev = tty_dev->priv_dev;
606 rx = &mux_dev->rx;
607
608 if (mux_dev->usb_state != PM_NORMAL) {
df02b50a 609 dev_err(intf->usb_dev, "usb suspend - invalid state\n");
61e12104
WK
610 return -1;
611 }
612
613 mux_dev->usb_state = PM_SUSPEND;
614
615
616 spin_lock_irqsave(&rx->submit_list_lock, flags);
7b99b5ef
DN
617 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
618 rx_submit_list) {
61e12104
WK
619 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
620 usb_kill_urb(r->urb);
621 spin_lock_irqsave(&rx->submit_list_lock, flags);
622 }
623 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
624
625 return 0;
626}
627
628static int gdm_mux_resume(struct usb_interface *intf)
629{
630 struct tty_dev *tty_dev;
631 struct mux_dev *mux_dev;
632 u8 i;
633
634 tty_dev = usb_get_intfdata(intf);
635 mux_dev = tty_dev->priv_dev;
636
637 if (mux_dev->usb_state != PM_SUSPEND) {
df02b50a 638 dev_err(intf->usb_dev, "usb resume - invalid state\n");
61e12104
WK
639 return -1;
640 }
641
642 mux_dev->usb_state = PM_NORMAL;
643
644 for (i = 0; i < MAX_ISSUE_NUM; i++)
645 gdm_mux_recv(mux_dev, mux_dev->rx_cb);
646
647 return 0;
648}
649
650static struct usb_driver gdm_mux_driver = {
651 .name = "gdm_mux",
652 .probe = gdm_mux_probe,
653 .disconnect = gdm_mux_disconnect,
654 .id_table = id_table,
655 .supports_autosuspend = 1,
656 .suspend = gdm_mux_suspend,
657 .resume = gdm_mux_resume,
658 .reset_resume = gdm_mux_resume,
659};
660
661static int __init gdm_usb_mux_init(void)
662{
663
664 mux_rx_wq = create_workqueue("mux_rx_wq");
665 if (mux_rx_wq == NULL) {
0ec473b5 666 pr_err("work queue create fail\n");
61e12104
WK
667 return -1;
668 }
669
670 register_lte_tty_driver();
671
672 return usb_register(&gdm_mux_driver);
673}
674
675static void __exit gdm_usb_mux_exit(void)
676{
677 unregister_lte_tty_driver();
678
679 if (mux_rx_wq) {
680 flush_workqueue(mux_rx_wq);
681 destroy_workqueue(mux_rx_wq);
682 }
683
684 usb_deregister(&gdm_mux_driver);
685}
686
687module_init(gdm_usb_mux_init);
688module_exit(gdm_usb_mux_exit);
689
690MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
691MODULE_LICENSE("GPL");
This page took 0.251404 seconds and 5 git commands to generate.