2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/usb.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/tty.h>
22 #include <linux/tty_driver.h>
23 #include <linux/tty_flip.h>
24 #include <linux/slab.h>
25 #include <linux/usb/cdc.h>
30 struct workqueue_struct
*mux_rx_wq
;
32 static u16 packet_type
[TTY_MAX_COUNT
] = {0xF011, 0xF010};
34 #define USB_DEVICE_CDC_DATA(vid, pid) \
36 USB_DEVICE_ID_MATCH_DEVICE |\
37 USB_DEVICE_ID_MATCH_INT_CLASS |\
38 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
41 .bInterfaceClass = USB_CLASS_COMM,\
42 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
44 static const struct usb_device_id id_table
[] = {
45 { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
46 { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
47 { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
48 { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
53 MODULE_DEVICE_TABLE(usb
, id_table
);
55 int packet_type_to_index(u16 packetType
)
59 for (i
= 0; i
< TTY_MAX_COUNT
; i
++) {
60 if (packet_type
[i
] == packetType
)
67 static struct mux_tx
*alloc_mux_tx(int len
)
69 struct mux_tx
*t
= NULL
;
71 t
= kzalloc(sizeof(struct mux_tx
), GFP_ATOMIC
);
75 t
->urb
= usb_alloc_urb(0, GFP_ATOMIC
);
76 t
->buf
= kmalloc(MUX_TX_MAX_SIZE
, GFP_ATOMIC
);
77 if (!t
->urb
|| !t
->buf
) {
87 static void free_mux_tx(struct mux_tx
*t
)
96 static struct mux_rx
*alloc_mux_rx(void)
98 struct mux_rx
*r
= NULL
;
100 r
= kzalloc(sizeof(struct mux_rx
), GFP_ATOMIC
);
104 r
->urb
= usb_alloc_urb(0, GFP_ATOMIC
);
105 r
->buf
= kmalloc(MUX_RX_MAX_SIZE
, GFP_ATOMIC
);
106 if (!r
->urb
|| !r
->buf
) {
107 usb_free_urb(r
->urb
);
116 static void free_mux_rx(struct mux_rx
*r
)
119 usb_free_urb(r
->urb
);
125 static struct mux_rx
*get_rx_struct(struct rx_cxt
*rx
)
130 spin_lock_irqsave(&rx
->free_list_lock
, flags
);
132 if (list_empty(&rx
->rx_free_list
)) {
133 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
137 r
= list_entry(rx
->rx_free_list
.prev
, struct mux_rx
, free_list
);
138 list_del(&r
->free_list
);
140 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
145 static void put_rx_struct(struct rx_cxt
*rx
, struct mux_rx
*r
)
149 spin_lock_irqsave(&rx
->free_list_lock
, flags
);
150 list_add_tail(&r
->free_list
, &rx
->rx_free_list
);
151 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
155 static int up_to_host(struct mux_rx
*r
)
157 struct mux_dev
*mux_dev
= (struct mux_dev
*)r
->mux_dev
;
158 struct mux_pkt_header
*mux_header
;
159 unsigned int start_flag
;
160 unsigned int payload_size
;
161 unsigned short packet_type
;
164 u32 packet_size_sum
= r
->offset
;
166 int ret
= TO_HOST_INVALID_PACKET
;
170 mux_header
= (struct mux_pkt_header
*)(r
->buf
+ packet_size_sum
);
171 start_flag
= __le32_to_cpu(mux_header
->start_flag
);
172 payload_size
= __le32_to_cpu(mux_header
->payload_size
);
173 packet_type
= __le16_to_cpu(mux_header
->packet_type
);
175 if (start_flag
!= START_FLAG
) {
176 pr_err("invalid START_FLAG %x\n", start_flag
);
180 remain
= (MUX_HEADER_SIZE
+ payload_size
) % 4;
181 dummy_cnt
= remain
? (4-remain
) : 0;
183 if (len
- packet_size_sum
<
184 MUX_HEADER_SIZE
+ payload_size
+ dummy_cnt
) {
185 pr_err("invalid payload : %d %d %04x\n",
186 payload_size
, len
, packet_type
);
190 index
= packet_type_to_index(packet_type
);
192 pr_err("invalid index %d\n", index
);
196 ret
= r
->callback(mux_header
->data
,
199 mux_dev
->minor
[index
],
200 RECV_PACKET_PROCESS_CONTINUE
202 if (ret
== TO_HOST_BUFFER_REQUEST_FAIL
) {
203 r
->offset
+= packet_size_sum
;
207 packet_size_sum
+= MUX_HEADER_SIZE
+ payload_size
+ dummy_cnt
;
208 if (len
- packet_size_sum
<= MUX_HEADER_SIZE
+ 2) {
209 ret
= r
->callback(NULL
,
212 mux_dev
->minor
[index
],
213 RECV_PACKET_PROCESS_COMPLETE
222 static void do_rx(struct work_struct
*work
)
224 struct mux_dev
*mux_dev
=
225 container_of(work
, struct mux_dev
, work_rx
.work
);
227 struct rx_cxt
*rx
= (struct rx_cxt
*)&mux_dev
->rx
;
232 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
233 if (list_empty(&rx
->to_host_list
)) {
234 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
237 r
= list_entry(rx
->to_host_list
.next
, struct mux_rx
, to_host_list
);
238 list_del(&r
->to_host_list
);
239 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
242 if (ret
== TO_HOST_BUFFER_REQUEST_FAIL
)
243 pr_err("failed to send mux data to host\n");
245 put_rx_struct(rx
, r
);
249 static void remove_rx_submit_list(struct mux_rx
*r
, struct rx_cxt
*rx
)
252 struct mux_rx
*r_remove
, *r_remove_next
;
254 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
255 list_for_each_entry_safe(r_remove
, r_remove_next
, &rx
->rx_submit_list
, rx_submit_list
) {
257 list_del(&r
->rx_submit_list
);
259 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
262 static void gdm_mux_rcv_complete(struct urb
*urb
)
264 struct mux_rx
*r
= urb
->context
;
265 struct mux_dev
*mux_dev
= (struct mux_dev
*)r
->mux_dev
;
266 struct rx_cxt
*rx
= &mux_dev
->rx
;
269 remove_rx_submit_list(r
, rx
);
272 if (mux_dev
->usb_state
== PM_NORMAL
)
273 pr_err("%s: urb status error %d\n",
274 __func__
, urb
->status
);
275 put_rx_struct(rx
, r
);
277 r
->len
= r
->urb
->actual_length
;
278 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
279 list_add_tail(&r
->to_host_list
, &rx
->to_host_list
);
280 queue_work(mux_rx_wq
, &mux_dev
->work_rx
.work
);
281 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
285 static int gdm_mux_recv(void *priv_dev
,
286 int (*cb
)(void *data
, int len
, int tty_index
, int minor
, int complete
)
289 struct mux_dev
*mux_dev
= priv_dev
;
290 struct usb_device
*usbdev
= mux_dev
->usbdev
;
292 struct rx_cxt
*rx
= &mux_dev
->rx
;
297 pr_err("device is disconnected\n");
301 r
= get_rx_struct(rx
);
303 pr_err("get_rx_struct fail\n");
308 r
->mux_dev
= (void *)mux_dev
;
312 usb_fill_bulk_urb(r
->urb
,
314 usb_rcvbulkpipe(usbdev
, 0x86),
317 gdm_mux_rcv_complete
,
320 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
321 list_add_tail(&r
->rx_submit_list
, &rx
->rx_submit_list
);
322 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
324 ret
= usb_submit_urb(r
->urb
, GFP_KERNEL
);
327 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
328 list_del(&r
->rx_submit_list
);
329 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
331 put_rx_struct(rx
, r
);
333 pr_err("usb_submit_urb ret=%d\n", ret
);
336 usb_mark_last_busy(usbdev
);
341 static void gdm_mux_send_complete(struct urb
*urb
)
343 struct mux_tx
*t
= urb
->context
;
345 if (urb
->status
== -ECONNRESET
) {
346 pr_info("CONNRESET\n");
352 t
->callback(t
->cb_data
);
357 static int gdm_mux_send(void *priv_dev
, void *data
, int len
, int tty_index
,
358 void (*cb
)(void *data
), void *cb_data
)
360 struct mux_dev
*mux_dev
= priv_dev
;
361 struct usb_device
*usbdev
= mux_dev
->usbdev
;
362 struct mux_pkt_header
*mux_header
;
363 struct mux_tx
*t
= NULL
;
364 static u32 seq_num
= 1;
371 if (mux_dev
->usb_state
== PM_SUSPEND
) {
372 ret
= usb_autopm_get_interface(mux_dev
->intf
);
374 usb_autopm_put_interface(mux_dev
->intf
);
377 spin_lock_irqsave(&mux_dev
->write_lock
, flags
);
379 remain
= (MUX_HEADER_SIZE
+ len
) % 4;
380 dummy_cnt
= remain
? (4 - remain
) : 0;
382 total_len
= len
+ MUX_HEADER_SIZE
+ dummy_cnt
;
384 t
= alloc_mux_tx(total_len
);
386 pr_err("alloc_mux_tx fail\n");
387 spin_unlock_irqrestore(&mux_dev
->write_lock
, flags
);
391 mux_header
= (struct mux_pkt_header
*)t
->buf
;
392 mux_header
->start_flag
= __cpu_to_le32(START_FLAG
);
393 mux_header
->seq_num
= __cpu_to_le32(seq_num
++);
394 mux_header
->payload_size
= __cpu_to_le32((u32
)len
);
395 mux_header
->packet_type
= __cpu_to_le16(packet_type
[tty_index
]);
397 memcpy(t
->buf
+MUX_HEADER_SIZE
, data
, len
);
398 memset(t
->buf
+MUX_HEADER_SIZE
+len
, 0, dummy_cnt
);
402 t
->cb_data
= cb_data
;
404 usb_fill_bulk_urb(t
->urb
,
406 usb_sndbulkpipe(usbdev
, 5),
409 gdm_mux_send_complete
,
412 ret
= usb_submit_urb(t
->urb
, GFP_ATOMIC
);
414 spin_unlock_irqrestore(&mux_dev
->write_lock
, flags
);
417 pr_err("usb_submit_urb Error: %d\n", ret
);
419 usb_mark_last_busy(usbdev
);
424 static int gdm_mux_send_control(void *priv_dev
, int request
, int value
, void *buf
, int len
)
426 struct mux_dev
*mux_dev
= priv_dev
;
427 struct usb_device
*usbdev
= mux_dev
->usbdev
;
430 ret
= usb_control_msg(usbdev
,
431 usb_sndctrlpipe(usbdev
, 0),
442 pr_err("usb_control_msg error: %d\n", ret
);
444 return ret
< 0 ? ret
: 0;
447 static void release_usb(struct mux_dev
*mux_dev
)
449 struct rx_cxt
*rx
= &mux_dev
->rx
;
450 struct mux_rx
*r
, *r_next
;
453 cancel_delayed_work(&mux_dev
->work_rx
);
455 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
456 list_for_each_entry_safe(r
, r_next
, &rx
->rx_submit_list
, rx_submit_list
) {
457 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
458 usb_kill_urb(r
->urb
);
459 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
461 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
463 spin_lock_irqsave(&rx
->free_list_lock
, flags
);
464 list_for_each_entry_safe(r
, r_next
, &rx
->rx_free_list
, free_list
) {
465 list_del(&r
->free_list
);
468 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
470 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
471 list_for_each_entry_safe(r
, r_next
, &rx
->to_host_list
, to_host_list
) {
472 if (r
->mux_dev
== (void *)mux_dev
) {
473 list_del(&r
->to_host_list
);
477 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
481 static int init_usb(struct mux_dev
*mux_dev
)
484 struct rx_cxt
*rx
= &mux_dev
->rx
;
488 spin_lock_init(&mux_dev
->write_lock
);
489 INIT_LIST_HEAD(&rx
->to_host_list
);
490 INIT_LIST_HEAD(&rx
->rx_submit_list
);
491 INIT_LIST_HEAD(&rx
->rx_free_list
);
492 spin_lock_init(&rx
->to_host_lock
);
493 spin_lock_init(&rx
->submit_list_lock
);
494 spin_lock_init(&rx
->free_list_lock
);
496 for (i
= 0; i
< MAX_ISSUE_NUM
* 2; i
++) {
503 list_add(&r
->free_list
, &rx
->rx_free_list
);
506 INIT_DELAYED_WORK(&mux_dev
->work_rx
, do_rx
);
511 static int gdm_mux_probe(struct usb_interface
*intf
, const struct usb_device_id
*id
)
513 struct mux_dev
*mux_dev
= NULL
;
514 struct tty_dev
*tty_dev
= NULL
;
515 u16 idVendor
, idProduct
;
516 int bInterfaceNumber
;
519 struct usb_device
*usbdev
= interface_to_usbdev(intf
);
520 bInterfaceNumber
= intf
->cur_altsetting
->desc
.bInterfaceNumber
;
522 idVendor
= __le16_to_cpu(usbdev
->descriptor
.idVendor
);
523 idProduct
= __le16_to_cpu(usbdev
->descriptor
.idProduct
);
525 pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor
, idProduct
);
527 if (bInterfaceNumber
!= 2) {
532 mux_dev
= kzalloc(sizeof(struct mux_dev
), GFP_KERNEL
);
538 tty_dev
= kzalloc(sizeof(struct tty_dev
), GFP_KERNEL
);
545 mux_dev
->usbdev
= usbdev
;
546 mux_dev
->control_intf
= intf
;
548 ret
= init_usb(mux_dev
);
552 tty_dev
->priv_dev
= (void *)mux_dev
;
553 tty_dev
->send_func
= gdm_mux_send
;
554 tty_dev
->recv_func
= gdm_mux_recv
;
555 tty_dev
->send_control
= gdm_mux_send_control
;
557 if (register_lte_tty_device(tty_dev
, &intf
->dev
) < 0) {
558 unregister_lte_tty_device(tty_dev
);
559 mux_dev
= tty_dev
->priv_dev
;
564 for (i
= 0; i
< TTY_MAX_COUNT
; i
++)
565 mux_dev
->minor
[i
] = tty_dev
->minor
[i
];
571 release_usb(mux_dev
);
575 mux_dev
->intf
= intf
;
576 mux_dev
->usb_state
= PM_NORMAL
;
580 usb_set_intfdata(intf
, tty_dev
);
585 static void gdm_mux_disconnect(struct usb_interface
*intf
)
587 struct tty_dev
*tty_dev
;
588 struct mux_dev
*mux_dev
;
589 struct usb_device
*usbdev
= interface_to_usbdev(intf
);
591 tty_dev
= usb_get_intfdata(intf
);
593 mux_dev
= tty_dev
->priv_dev
;
595 release_usb(mux_dev
);
596 unregister_lte_tty_device(tty_dev
);
604 static int gdm_mux_suspend(struct usb_interface
*intf
, pm_message_t pm_msg
)
606 struct tty_dev
*tty_dev
;
607 struct mux_dev
*mux_dev
;
609 struct mux_rx
*r
, *r_next
;
612 tty_dev
= usb_get_intfdata(intf
);
613 mux_dev
= tty_dev
->priv_dev
;
616 if (mux_dev
->usb_state
!= PM_NORMAL
) {
617 pr_err("usb suspend - invalid state\n");
621 mux_dev
->usb_state
= PM_SUSPEND
;
624 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
625 list_for_each_entry_safe(r
, r_next
, &rx
->rx_submit_list
, rx_submit_list
) {
626 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
627 usb_kill_urb(r
->urb
);
628 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
630 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
635 static int gdm_mux_resume(struct usb_interface
*intf
)
637 struct tty_dev
*tty_dev
;
638 struct mux_dev
*mux_dev
;
641 tty_dev
= usb_get_intfdata(intf
);
642 mux_dev
= tty_dev
->priv_dev
;
644 if (mux_dev
->usb_state
!= PM_SUSPEND
) {
645 pr_err("usb resume - invalid state\n");
649 mux_dev
->usb_state
= PM_NORMAL
;
651 for (i
= 0; i
< MAX_ISSUE_NUM
; i
++)
652 gdm_mux_recv(mux_dev
, mux_dev
->rx_cb
);
657 static struct usb_driver gdm_mux_driver
= {
659 .probe
= gdm_mux_probe
,
660 .disconnect
= gdm_mux_disconnect
,
661 .id_table
= id_table
,
662 .supports_autosuspend
= 1,
663 .suspend
= gdm_mux_suspend
,
664 .resume
= gdm_mux_resume
,
665 .reset_resume
= gdm_mux_resume
,
668 static int __init
gdm_usb_mux_init(void)
671 mux_rx_wq
= create_workqueue("mux_rx_wq");
672 if (mux_rx_wq
== NULL
) {
673 pr_err("work queue create fail\n");
677 register_lte_tty_driver();
679 return usb_register(&gdm_mux_driver
);
682 static void __exit
gdm_usb_mux_exit(void)
684 unregister_lte_tty_driver();
687 flush_workqueue(mux_rx_wq
);
688 destroy_workqueue(mux_rx_wq
);
691 usb_deregister(&gdm_mux_driver
);
694 module_init(gdm_usb_mux_init
);
695 module_exit(gdm_usb_mux_exit
);
697 MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
698 MODULE_LICENSE("GPL");