Merge 3.11-rc3 into staging-next
[deliverable/linux.git] / drivers / staging / gdm724x / gdm_mux.c
1 /*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/usb.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/tty.h>
22 #include <linux/tty_driver.h>
23 #include <linux/tty_flip.h>
24 #include <linux/slab.h>
25 #include <linux/usb/cdc.h>
26
27 #include "gdm_mux.h"
28 #include "gdm_tty.h"
29
30 struct workqueue_struct *mux_rx_wq;
31
32 static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
33
34 #define USB_DEVICE_CDC_DATA(vid, pid) \
35 .match_flags = \
36 USB_DEVICE_ID_MATCH_DEVICE |\
37 USB_DEVICE_ID_MATCH_INT_CLASS |\
38 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
39 .idVendor = vid,\
40 .idProduct = pid,\
41 .bInterfaceClass = USB_CLASS_COMM,\
42 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
43
44 static const struct usb_device_id id_table[] = {
45 { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
46 { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
47 { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
48 { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
49 {}
50 };
51
52
53 MODULE_DEVICE_TABLE(usb, id_table);
54
55 int packet_type_to_index(u16 packetType)
56 {
57 int i;
58
59 for (i = 0; i < TTY_MAX_COUNT; i++) {
60 if (packet_type[i] == packetType)
61 return i;
62 }
63
64 return -1;
65 }
66
67 static struct mux_tx *alloc_mux_tx(int len)
68 {
69 struct mux_tx *t = NULL;
70
71 t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
72 if (!t)
73 return NULL;
74
75 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
76 t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
77 if (!t->urb || !t->buf) {
78 usb_free_urb(t->urb);
79 kfree(t->buf);
80 kfree(t);
81 return NULL;
82 }
83
84 return t;
85 }
86
87 static void free_mux_tx(struct mux_tx *t)
88 {
89 if (t) {
90 usb_free_urb(t->urb);
91 kfree(t->buf);
92 kfree(t);
93 }
94 }
95
96 static struct mux_rx *alloc_mux_rx(void)
97 {
98 struct mux_rx *r = NULL;
99
100 r = kzalloc(sizeof(struct mux_rx), GFP_ATOMIC);
101 if (!r)
102 return NULL;
103
104 r->urb = usb_alloc_urb(0, GFP_ATOMIC);
105 r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_ATOMIC);
106 if (!r->urb || !r->buf) {
107 usb_free_urb(r->urb);
108 kfree(r->buf);
109 kfree(r);
110 return NULL;
111 }
112
113 return r;
114 }
115
116 static void free_mux_rx(struct mux_rx *r)
117 {
118 if (r) {
119 usb_free_urb(r->urb);
120 kfree(r->buf);
121 kfree(r);
122 }
123 }
124
125 static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
126 {
127 struct mux_rx *r;
128 unsigned long flags;
129
130 spin_lock_irqsave(&rx->free_list_lock, flags);
131
132 if (list_empty(&rx->rx_free_list)) {
133 spin_unlock_irqrestore(&rx->free_list_lock, flags);
134 return NULL;
135 }
136
137 r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
138 list_del(&r->free_list);
139
140 spin_unlock_irqrestore(&rx->free_list_lock, flags);
141
142 return r;
143 }
144
145 static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
146 {
147 unsigned long flags;
148
149 spin_lock_irqsave(&rx->free_list_lock, flags);
150 list_add_tail(&r->free_list, &rx->rx_free_list);
151 spin_unlock_irqrestore(&rx->free_list_lock, flags);
152 }
153
154
155 static int up_to_host(struct mux_rx *r)
156 {
157 struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
158 struct mux_pkt_header *mux_header;
159 unsigned int start_flag;
160 unsigned int payload_size;
161 unsigned short packet_type;
162 int remain;
163 int dummy_cnt;
164 u32 packet_size_sum = r->offset;
165 int index;
166 int ret = TO_HOST_INVALID_PACKET;
167 int len = r->len;
168
169 while (1) {
170 mux_header = (struct mux_pkt_header *)(r->buf + packet_size_sum);
171 start_flag = __le32_to_cpu(mux_header->start_flag);
172 payload_size = __le32_to_cpu(mux_header->payload_size);
173 packet_type = __le16_to_cpu(mux_header->packet_type);
174
175 if (start_flag != START_FLAG) {
176 pr_err("invalid START_FLAG %x\n", start_flag);
177 break;
178 }
179
180 remain = (MUX_HEADER_SIZE + payload_size) % 4;
181 dummy_cnt = remain ? (4-remain) : 0;
182
183 if (len - packet_size_sum <
184 MUX_HEADER_SIZE + payload_size + dummy_cnt) {
185 pr_err("invalid payload : %d %d %04x\n",
186 payload_size, len, packet_type);
187 break;
188 }
189
190 index = packet_type_to_index(packet_type);
191 if (index < 0) {
192 pr_err("invalid index %d\n", index);
193 break;
194 }
195
196 ret = r->callback(mux_header->data,
197 payload_size,
198 index,
199 mux_dev->minor[index],
200 RECV_PACKET_PROCESS_CONTINUE
201 );
202 if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
203 r->offset += packet_size_sum;
204 break;
205 }
206
207 packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
208 if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
209 ret = r->callback(NULL,
210 0,
211 index,
212 mux_dev->minor[index],
213 RECV_PACKET_PROCESS_COMPLETE
214 );
215 break;
216 }
217 }
218
219 return ret;
220 }
221
222 static void do_rx(struct work_struct *work)
223 {
224 struct mux_dev *mux_dev =
225 container_of(work, struct mux_dev , work_rx.work);
226 struct mux_rx *r;
227 struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
228 unsigned long flags;
229 int ret = 0;
230
231 while (1) {
232 spin_lock_irqsave(&rx->to_host_lock, flags);
233 if (list_empty(&rx->to_host_list)) {
234 spin_unlock_irqrestore(&rx->to_host_lock, flags);
235 break;
236 }
237 r = list_entry(rx->to_host_list.next, struct mux_rx, to_host_list);
238 list_del(&r->to_host_list);
239 spin_unlock_irqrestore(&rx->to_host_lock, flags);
240
241 ret = up_to_host(r);
242 if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
243 pr_err("failed to send mux data to host\n");
244 else
245 put_rx_struct(rx, r);
246 }
247 }
248
249 static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
250 {
251 unsigned long flags;
252 struct mux_rx *r_remove, *r_remove_next;
253
254 spin_lock_irqsave(&rx->submit_list_lock, flags);
255 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) {
256 if (r == r_remove)
257 list_del(&r->rx_submit_list);
258 }
259 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
260 }
261
262 static void gdm_mux_rcv_complete(struct urb *urb)
263 {
264 struct mux_rx *r = urb->context;
265 struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
266 struct rx_cxt *rx = &mux_dev->rx;
267 unsigned long flags;
268
269 remove_rx_submit_list(r, rx);
270
271 if (urb->status) {
272 if (mux_dev->usb_state == PM_NORMAL)
273 pr_err("%s: urb status error %d\n",
274 __func__, urb->status);
275 put_rx_struct(rx, r);
276 } else {
277 r->len = r->urb->actual_length;
278 spin_lock_irqsave(&rx->to_host_lock, flags);
279 list_add_tail(&r->to_host_list, &rx->to_host_list);
280 queue_work(mux_rx_wq, &mux_dev->work_rx.work);
281 spin_unlock_irqrestore(&rx->to_host_lock, flags);
282 }
283 }
284
285 static int gdm_mux_recv(void *priv_dev,
286 int (*cb)(void *data, int len, int tty_index, int minor, int complete)
287 )
288 {
289 struct mux_dev *mux_dev = priv_dev;
290 struct usb_device *usbdev = mux_dev->usbdev;
291 struct mux_rx *r;
292 struct rx_cxt *rx = &mux_dev->rx;
293 unsigned long flags;
294 int ret;
295
296 if (!usbdev) {
297 pr_err("device is disconnected\n");
298 return -ENODEV;
299 }
300
301 r = get_rx_struct(rx);
302 if (!r) {
303 pr_err("get_rx_struct fail\n");
304 return -ENOMEM;
305 }
306
307 r->offset = 0;
308 r->mux_dev = (void *)mux_dev;
309 r->callback = cb;
310 mux_dev->rx_cb = cb;
311
312 usb_fill_bulk_urb(r->urb,
313 usbdev,
314 usb_rcvbulkpipe(usbdev, 0x86),
315 r->buf,
316 MUX_RX_MAX_SIZE,
317 gdm_mux_rcv_complete,
318 r);
319
320 spin_lock_irqsave(&rx->submit_list_lock, flags);
321 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
322 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
323
324 ret = usb_submit_urb(r->urb, GFP_KERNEL);
325
326 if (ret) {
327 spin_lock_irqsave(&rx->submit_list_lock, flags);
328 list_del(&r->rx_submit_list);
329 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
330
331 put_rx_struct(rx, r);
332
333 pr_err("usb_submit_urb ret=%d\n", ret);
334 }
335
336 usb_mark_last_busy(usbdev);
337
338 return ret;
339 }
340
341 static void gdm_mux_send_complete(struct urb *urb)
342 {
343 struct mux_tx *t = urb->context;
344
345 if (urb->status == -ECONNRESET) {
346 pr_info("CONNRESET\n");
347 free_mux_tx(t);
348 return;
349 }
350
351 if (t->callback)
352 t->callback(t->cb_data);
353
354 free_mux_tx(t);
355 }
356
357 static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
358 void (*cb)(void *data), void *cb_data)
359 {
360 struct mux_dev *mux_dev = priv_dev;
361 struct usb_device *usbdev = mux_dev->usbdev;
362 struct mux_pkt_header *mux_header;
363 struct mux_tx *t = NULL;
364 static u32 seq_num = 1;
365 int remain;
366 int dummy_cnt;
367 int total_len;
368 int ret;
369 unsigned long flags;
370
371 if (mux_dev->usb_state == PM_SUSPEND) {
372 ret = usb_autopm_get_interface(mux_dev->intf);
373 if (!ret)
374 usb_autopm_put_interface(mux_dev->intf);
375 }
376
377 spin_lock_irqsave(&mux_dev->write_lock, flags);
378
379 remain = (MUX_HEADER_SIZE + len) % 4;
380 dummy_cnt = remain ? (4 - remain) : 0;
381
382 total_len = len + MUX_HEADER_SIZE + dummy_cnt;
383
384 t = alloc_mux_tx(total_len);
385 if (!t) {
386 pr_err("alloc_mux_tx fail\n");
387 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
388 return -ENOMEM;
389 }
390
391 mux_header = (struct mux_pkt_header *)t->buf;
392 mux_header->start_flag = __cpu_to_le32(START_FLAG);
393 mux_header->seq_num = __cpu_to_le32(seq_num++);
394 mux_header->payload_size = __cpu_to_le32((u32)len);
395 mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
396
397 memcpy(t->buf+MUX_HEADER_SIZE, data, len);
398 memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
399
400 t->len = total_len;
401 t->callback = cb;
402 t->cb_data = cb_data;
403
404 usb_fill_bulk_urb(t->urb,
405 usbdev,
406 usb_sndbulkpipe(usbdev, 5),
407 t->buf,
408 total_len,
409 gdm_mux_send_complete,
410 t);
411
412 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
413
414 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
415
416 if (ret)
417 pr_err("usb_submit_urb Error: %d\n", ret);
418
419 usb_mark_last_busy(usbdev);
420
421 return ret;
422 }
423
424 static int gdm_mux_send_control(void *priv_dev, int request, int value, void *buf, int len)
425 {
426 struct mux_dev *mux_dev = priv_dev;
427 struct usb_device *usbdev = mux_dev->usbdev;
428 int ret;
429
430 ret = usb_control_msg(usbdev,
431 usb_sndctrlpipe(usbdev, 0),
432 request,
433 USB_RT_ACM,
434 value,
435 2,
436 buf,
437 len,
438 5000
439 );
440
441 if (ret < 0)
442 pr_err("usb_control_msg error: %d\n", ret);
443
444 return ret < 0 ? ret : 0;
445 }
446
447 static void release_usb(struct mux_dev *mux_dev)
448 {
449 struct rx_cxt *rx = &mux_dev->rx;
450 struct mux_rx *r, *r_next;
451 unsigned long flags;
452
453 cancel_delayed_work(&mux_dev->work_rx);
454
455 spin_lock_irqsave(&rx->submit_list_lock, flags);
456 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
457 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
458 usb_kill_urb(r->urb);
459 spin_lock_irqsave(&rx->submit_list_lock, flags);
460 }
461 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
462
463 spin_lock_irqsave(&rx->free_list_lock, flags);
464 list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
465 list_del(&r->free_list);
466 free_mux_rx(r);
467 }
468 spin_unlock_irqrestore(&rx->free_list_lock, flags);
469
470 spin_lock_irqsave(&rx->to_host_lock, flags);
471 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
472 if (r->mux_dev == (void *)mux_dev) {
473 list_del(&r->to_host_list);
474 free_mux_rx(r);
475 }
476 }
477 spin_unlock_irqrestore(&rx->to_host_lock, flags);
478 }
479
480
481 static int init_usb(struct mux_dev *mux_dev)
482 {
483 struct mux_rx *r;
484 struct rx_cxt *rx = &mux_dev->rx;
485 int ret = 0;
486 int i;
487
488 spin_lock_init(&mux_dev->write_lock);
489 INIT_LIST_HEAD(&rx->to_host_list);
490 INIT_LIST_HEAD(&rx->rx_submit_list);
491 INIT_LIST_HEAD(&rx->rx_free_list);
492 spin_lock_init(&rx->to_host_lock);
493 spin_lock_init(&rx->submit_list_lock);
494 spin_lock_init(&rx->free_list_lock);
495
496 for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
497 r = alloc_mux_rx();
498 if (r == NULL) {
499 ret = -ENOMEM;
500 break;
501 }
502
503 list_add(&r->free_list, &rx->rx_free_list);
504 }
505
506 INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
507
508 return ret;
509 }
510
511 static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id *id)
512 {
513 struct mux_dev *mux_dev = NULL;
514 struct tty_dev *tty_dev = NULL;
515 u16 idVendor, idProduct;
516 int bInterfaceNumber;
517 int ret = 0;
518 int i;
519 struct usb_device *usbdev = interface_to_usbdev(intf);
520 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
521
522 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
523 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
524
525 pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
526
527 if (bInterfaceNumber != 2) {
528 ret = -ENODEV;
529 goto out;
530 }
531
532 mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
533 if (!mux_dev) {
534 ret = -ENOMEM;
535 goto out;
536 }
537
538 tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
539 if (!tty_dev) {
540 kfree(mux_dev);
541 ret = -ENOMEM;
542 goto out;
543 }
544
545 mux_dev->usbdev = usbdev;
546 mux_dev->control_intf = intf;
547
548 ret = init_usb(mux_dev);
549 if (ret < 0)
550 goto out;
551
552 tty_dev->priv_dev = (void *)mux_dev;
553 tty_dev->send_func = gdm_mux_send;
554 tty_dev->recv_func = gdm_mux_recv;
555 tty_dev->send_control = gdm_mux_send_control;
556
557 if (register_lte_tty_device(tty_dev, &intf->dev) < 0) {
558 unregister_lte_tty_device(tty_dev);
559 mux_dev = tty_dev->priv_dev;
560
561 ret = -1;
562 goto out;
563 }
564 for (i = 0; i < TTY_MAX_COUNT; i++)
565 mux_dev->minor[i] = tty_dev->minor[i];
566
567 out:
568 if (ret < 0) {
569 kfree(tty_dev);
570 if (mux_dev) {
571 release_usb(mux_dev);
572 kfree(mux_dev);
573 }
574 } else {
575 mux_dev->intf = intf;
576 mux_dev->usb_state = PM_NORMAL;
577 }
578
579 usb_get_dev(usbdev);
580 usb_set_intfdata(intf, tty_dev);
581
582 return ret;
583 }
584
585 static void gdm_mux_disconnect(struct usb_interface *intf)
586 {
587 struct tty_dev *tty_dev;
588 struct mux_dev *mux_dev;
589 struct usb_device *usbdev = interface_to_usbdev(intf);
590
591 tty_dev = usb_get_intfdata(intf);
592
593 mux_dev = tty_dev->priv_dev;
594
595 release_usb(mux_dev);
596 unregister_lte_tty_device(tty_dev);
597
598 kfree(mux_dev);
599 kfree(tty_dev);
600
601 usb_put_dev(usbdev);
602 }
603
604 static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
605 {
606 struct tty_dev *tty_dev;
607 struct mux_dev *mux_dev;
608 struct rx_cxt *rx;
609 struct mux_rx *r, *r_next;
610 unsigned long flags;
611
612 tty_dev = usb_get_intfdata(intf);
613 mux_dev = tty_dev->priv_dev;
614 rx = &mux_dev->rx;
615
616 if (mux_dev->usb_state != PM_NORMAL) {
617 pr_err("usb suspend - invalid state\n");
618 return -1;
619 }
620
621 mux_dev->usb_state = PM_SUSPEND;
622
623
624 spin_lock_irqsave(&rx->submit_list_lock, flags);
625 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
626 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
627 usb_kill_urb(r->urb);
628 spin_lock_irqsave(&rx->submit_list_lock, flags);
629 }
630 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
631
632 return 0;
633 }
634
635 static int gdm_mux_resume(struct usb_interface *intf)
636 {
637 struct tty_dev *tty_dev;
638 struct mux_dev *mux_dev;
639 u8 i;
640
641 tty_dev = usb_get_intfdata(intf);
642 mux_dev = tty_dev->priv_dev;
643
644 if (mux_dev->usb_state != PM_SUSPEND) {
645 pr_err("usb resume - invalid state\n");
646 return -1;
647 }
648
649 mux_dev->usb_state = PM_NORMAL;
650
651 for (i = 0; i < MAX_ISSUE_NUM; i++)
652 gdm_mux_recv(mux_dev, mux_dev->rx_cb);
653
654 return 0;
655 }
656
657 static struct usb_driver gdm_mux_driver = {
658 .name = "gdm_mux",
659 .probe = gdm_mux_probe,
660 .disconnect = gdm_mux_disconnect,
661 .id_table = id_table,
662 .supports_autosuspend = 1,
663 .suspend = gdm_mux_suspend,
664 .resume = gdm_mux_resume,
665 .reset_resume = gdm_mux_resume,
666 };
667
668 static int __init gdm_usb_mux_init(void)
669 {
670
671 mux_rx_wq = create_workqueue("mux_rx_wq");
672 if (mux_rx_wq == NULL) {
673 pr_err("work queue create fail\n");
674 return -1;
675 }
676
677 register_lte_tty_driver();
678
679 return usb_register(&gdm_mux_driver);
680 }
681
682 static void __exit gdm_usb_mux_exit(void)
683 {
684 unregister_lte_tty_driver();
685
686 if (mux_rx_wq) {
687 flush_workqueue(mux_rx_wq);
688 destroy_workqueue(mux_rx_wq);
689 }
690
691 usb_deregister(&gdm_mux_driver);
692 }
693
694 module_init(gdm_usb_mux_init);
695 module_exit(gdm_usb_mux_exit);
696
697 MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
698 MODULE_LICENSE("GPL");
This page took 0.04622 seconds and 6 git commands to generate.