staging: gdm7240: fix error handling of probe()
[deliverable/linux.git] / drivers / staging / gdm724x / gdm_mux.c
CommitLineData
61e12104
WK
1/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
0ec473b5
JP
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
61e12104 16#include <linux/module.h>
61e12104
WK
17#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/tty.h>
22#include <linux/tty_driver.h>
23#include <linux/tty_flip.h>
24#include <linux/slab.h>
25#include <linux/usb/cdc.h>
26
27#include "gdm_mux.h"
61e12104
WK
28
29struct workqueue_struct *mux_rx_wq;
30
31static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
32
33#define USB_DEVICE_CDC_DATA(vid, pid) \
34 .match_flags = \
35 USB_DEVICE_ID_MATCH_DEVICE |\
36 USB_DEVICE_ID_MATCH_INT_CLASS |\
37 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
38 .idVendor = vid,\
39 .idProduct = pid,\
40 .bInterfaceClass = USB_CLASS_COMM,\
41 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
42
43static const struct usb_device_id id_table[] = {
44 { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
45 { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
46 { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
47 { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
48 {}
49};
50
51
52MODULE_DEVICE_TABLE(usb, id_table);
53
54int packet_type_to_index(u16 packetType)
55{
56 int i;
57
58 for (i = 0; i < TTY_MAX_COUNT; i++) {
59 if (packet_type[i] == packetType)
60 return i;
61 }
62
63 return -1;
64}
65
66static struct mux_tx *alloc_mux_tx(int len)
67{
68 struct mux_tx *t = NULL;
69
70 t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
71 if (!t)
72 return NULL;
73
74 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
75 t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
76 if (!t->urb || !t->buf) {
77 usb_free_urb(t->urb);
78 kfree(t->buf);
79 kfree(t);
80 return NULL;
81 }
82
83 return t;
84}
85
86static void free_mux_tx(struct mux_tx *t)
87{
88 if (t) {
89 usb_free_urb(t->urb);
90 kfree(t->buf);
91 kfree(t);
92 }
93}
94
95static struct mux_rx *alloc_mux_rx(void)
96{
97 struct mux_rx *r = NULL;
98
99 r = kzalloc(sizeof(struct mux_rx), GFP_ATOMIC);
100 if (!r)
101 return NULL;
102
103 r->urb = usb_alloc_urb(0, GFP_ATOMIC);
104 r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_ATOMIC);
105 if (!r->urb || !r->buf) {
106 usb_free_urb(r->urb);
107 kfree(r->buf);
108 kfree(r);
109 return NULL;
110 }
111
112 return r;
113}
114
115static void free_mux_rx(struct mux_rx *r)
116{
117 if (r) {
118 usb_free_urb(r->urb);
119 kfree(r->buf);
120 kfree(r);
121 }
122}
123
124static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
125{
126 struct mux_rx *r;
127 unsigned long flags;
128
129 spin_lock_irqsave(&rx->free_list_lock, flags);
130
131 if (list_empty(&rx->rx_free_list)) {
132 spin_unlock_irqrestore(&rx->free_list_lock, flags);
133 return NULL;
134 }
135
136 r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
137 list_del(&r->free_list);
138
139 spin_unlock_irqrestore(&rx->free_list_lock, flags);
140
141 return r;
142}
143
144static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
145{
146 unsigned long flags;
147
148 spin_lock_irqsave(&rx->free_list_lock, flags);
149 list_add_tail(&r->free_list, &rx->rx_free_list);
150 spin_unlock_irqrestore(&rx->free_list_lock, flags);
151}
152
153
154static int up_to_host(struct mux_rx *r)
155{
156 struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
157 struct mux_pkt_header *mux_header;
158 unsigned int start_flag;
159 unsigned int payload_size;
160 unsigned short packet_type;
161 int remain;
162 int dummy_cnt;
163 u32 packet_size_sum = r->offset;
164 int index;
165 int ret = TO_HOST_INVALID_PACKET;
166 int len = r->len;
167
168 while (1) {
169 mux_header = (struct mux_pkt_header *)(r->buf + packet_size_sum);
170 start_flag = __le32_to_cpu(mux_header->start_flag);
171 payload_size = __le32_to_cpu(mux_header->payload_size);
172 packet_type = __le16_to_cpu(mux_header->packet_type);
173
174 if (start_flag != START_FLAG) {
0ec473b5 175 pr_err("invalid START_FLAG %x\n", start_flag);
61e12104
WK
176 break;
177 }
178
179 remain = (MUX_HEADER_SIZE + payload_size) % 4;
180 dummy_cnt = remain ? (4-remain) : 0;
181
182 if (len - packet_size_sum <
183 MUX_HEADER_SIZE + payload_size + dummy_cnt) {
0ec473b5
JP
184 pr_err("invalid payload : %d %d %04x\n",
185 payload_size, len, packet_type);
61e12104
WK
186 break;
187 }
188
189 index = packet_type_to_index(packet_type);
190 if (index < 0) {
0ec473b5 191 pr_err("invalid index %d\n", index);
61e12104
WK
192 break;
193 }
194
195 ret = r->callback(mux_header->data,
196 payload_size,
197 index,
bf0373f1 198 mux_dev->tty_dev,
61e12104
WK
199 RECV_PACKET_PROCESS_CONTINUE
200 );
201 if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
202 r->offset += packet_size_sum;
203 break;
204 }
205
206 packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
207 if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
208 ret = r->callback(NULL,
209 0,
210 index,
bf0373f1 211 mux_dev->tty_dev,
61e12104
WK
212 RECV_PACKET_PROCESS_COMPLETE
213 );
214 break;
215 }
216 }
217
218 return ret;
219}
220
221static void do_rx(struct work_struct *work)
222{
223 struct mux_dev *mux_dev =
224 container_of(work, struct mux_dev , work_rx.work);
225 struct mux_rx *r;
226 struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
227 unsigned long flags;
228 int ret = 0;
229
230 while (1) {
231 spin_lock_irqsave(&rx->to_host_lock, flags);
232 if (list_empty(&rx->to_host_list)) {
233 spin_unlock_irqrestore(&rx->to_host_lock, flags);
234 break;
235 }
236 r = list_entry(rx->to_host_list.next, struct mux_rx, to_host_list);
237 list_del(&r->to_host_list);
238 spin_unlock_irqrestore(&rx->to_host_lock, flags);
239
240 ret = up_to_host(r);
241 if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
0ec473b5 242 pr_err("failed to send mux data to host\n");
61e12104
WK
243 else
244 put_rx_struct(rx, r);
245 }
246}
247
248static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
249{
250 unsigned long flags;
251 struct mux_rx *r_remove, *r_remove_next;
252
253 spin_lock_irqsave(&rx->submit_list_lock, flags);
254 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) {
255 if (r == r_remove)
256 list_del(&r->rx_submit_list);
257 }
258 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
259}
260
261static void gdm_mux_rcv_complete(struct urb *urb)
262{
263 struct mux_rx *r = urb->context;
264 struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
265 struct rx_cxt *rx = &mux_dev->rx;
266 unsigned long flags;
267
268 remove_rx_submit_list(r, rx);
269
270 if (urb->status) {
271 if (mux_dev->usb_state == PM_NORMAL)
0ec473b5
JP
272 pr_err("%s: urb status error %d\n",
273 __func__, urb->status);
61e12104
WK
274 put_rx_struct(rx, r);
275 } else {
276 r->len = r->urb->actual_length;
277 spin_lock_irqsave(&rx->to_host_lock, flags);
278 list_add_tail(&r->to_host_list, &rx->to_host_list);
279 queue_work(mux_rx_wq, &mux_dev->work_rx.work);
280 spin_unlock_irqrestore(&rx->to_host_lock, flags);
281 }
282}
283
284static int gdm_mux_recv(void *priv_dev,
bf0373f1 285 int (*cb)(void *data, int len, int tty_index, struct tty_dev *tty_dev, int complete)
61e12104
WK
286 )
287{
288 struct mux_dev *mux_dev = priv_dev;
289 struct usb_device *usbdev = mux_dev->usbdev;
290 struct mux_rx *r;
291 struct rx_cxt *rx = &mux_dev->rx;
292 unsigned long flags;
293 int ret;
294
295 if (!usbdev) {
0ec473b5 296 pr_err("device is disconnected\n");
61e12104
WK
297 return -ENODEV;
298 }
299
300 r = get_rx_struct(rx);
301 if (!r) {
0ec473b5 302 pr_err("get_rx_struct fail\n");
61e12104
WK
303 return -ENOMEM;
304 }
305
306 r->offset = 0;
307 r->mux_dev = (void *)mux_dev;
308 r->callback = cb;
309 mux_dev->rx_cb = cb;
310
311 usb_fill_bulk_urb(r->urb,
312 usbdev,
313 usb_rcvbulkpipe(usbdev, 0x86),
314 r->buf,
315 MUX_RX_MAX_SIZE,
316 gdm_mux_rcv_complete,
317 r);
318
319 spin_lock_irqsave(&rx->submit_list_lock, flags);
320 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
321 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
322
323 ret = usb_submit_urb(r->urb, GFP_KERNEL);
324
325 if (ret) {
326 spin_lock_irqsave(&rx->submit_list_lock, flags);
327 list_del(&r->rx_submit_list);
328 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
329
330 put_rx_struct(rx, r);
331
0ec473b5 332 pr_err("usb_submit_urb ret=%d\n", ret);
61e12104
WK
333 }
334
335 usb_mark_last_busy(usbdev);
336
337 return ret;
338}
339
340static void gdm_mux_send_complete(struct urb *urb)
341{
342 struct mux_tx *t = urb->context;
343
344 if (urb->status == -ECONNRESET) {
0ec473b5 345 pr_info("CONNRESET\n");
61e12104
WK
346 free_mux_tx(t);
347 return;
348 }
349
350 if (t->callback)
351 t->callback(t->cb_data);
352
353 free_mux_tx(t);
354}
355
356static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
357 void (*cb)(void *data), void *cb_data)
358{
359 struct mux_dev *mux_dev = priv_dev;
360 struct usb_device *usbdev = mux_dev->usbdev;
361 struct mux_pkt_header *mux_header;
362 struct mux_tx *t = NULL;
363 static u32 seq_num = 1;
364 int remain;
365 int dummy_cnt;
366 int total_len;
367 int ret;
368 unsigned long flags;
369
370 if (mux_dev->usb_state == PM_SUSPEND) {
371 ret = usb_autopm_get_interface(mux_dev->intf);
372 if (!ret)
373 usb_autopm_put_interface(mux_dev->intf);
374 }
375
376 spin_lock_irqsave(&mux_dev->write_lock, flags);
377
378 remain = (MUX_HEADER_SIZE + len) % 4;
379 dummy_cnt = remain ? (4 - remain) : 0;
380
381 total_len = len + MUX_HEADER_SIZE + dummy_cnt;
382
383 t = alloc_mux_tx(total_len);
384 if (!t) {
0ec473b5 385 pr_err("alloc_mux_tx fail\n");
61e12104
WK
386 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
387 return -ENOMEM;
388 }
389
390 mux_header = (struct mux_pkt_header *)t->buf;
391 mux_header->start_flag = __cpu_to_le32(START_FLAG);
392 mux_header->seq_num = __cpu_to_le32(seq_num++);
393 mux_header->payload_size = __cpu_to_le32((u32)len);
394 mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
395
396 memcpy(t->buf+MUX_HEADER_SIZE, data, len);
397 memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
398
399 t->len = total_len;
400 t->callback = cb;
401 t->cb_data = cb_data;
402
403 usb_fill_bulk_urb(t->urb,
404 usbdev,
405 usb_sndbulkpipe(usbdev, 5),
406 t->buf,
407 total_len,
408 gdm_mux_send_complete,
409 t);
410
b07dee7c 411 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
61e12104
WK
412
413 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
414
415 if (ret)
0ec473b5 416 pr_err("usb_submit_urb Error: %d\n", ret);
61e12104
WK
417
418 usb_mark_last_busy(usbdev);
419
420 return ret;
421}
422
423static int gdm_mux_send_control(void *priv_dev, int request, int value, void *buf, int len)
424{
425 struct mux_dev *mux_dev = priv_dev;
426 struct usb_device *usbdev = mux_dev->usbdev;
427 int ret;
428
429 ret = usb_control_msg(usbdev,
430 usb_sndctrlpipe(usbdev, 0),
431 request,
432 USB_RT_ACM,
433 value,
434 2,
435 buf,
436 len,
437 5000
438 );
439
440 if (ret < 0)
0ec473b5 441 pr_err("usb_control_msg error: %d\n", ret);
61e12104
WK
442
443 return ret < 0 ? ret : 0;
444}
445
446static void release_usb(struct mux_dev *mux_dev)
447{
448 struct rx_cxt *rx = &mux_dev->rx;
449 struct mux_rx *r, *r_next;
450 unsigned long flags;
451
452 cancel_delayed_work(&mux_dev->work_rx);
453
454 spin_lock_irqsave(&rx->submit_list_lock, flags);
455 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
456 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
457 usb_kill_urb(r->urb);
458 spin_lock_irqsave(&rx->submit_list_lock, flags);
459 }
460 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
461
462 spin_lock_irqsave(&rx->free_list_lock, flags);
463 list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
464 list_del(&r->free_list);
465 free_mux_rx(r);
466 }
467 spin_unlock_irqrestore(&rx->free_list_lock, flags);
468
469 spin_lock_irqsave(&rx->to_host_lock, flags);
470 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
471 if (r->mux_dev == (void *)mux_dev) {
472 list_del(&r->to_host_list);
473 free_mux_rx(r);
474 }
475 }
476 spin_unlock_irqrestore(&rx->to_host_lock, flags);
477}
478
479
480static int init_usb(struct mux_dev *mux_dev)
481{
482 struct mux_rx *r;
483 struct rx_cxt *rx = &mux_dev->rx;
484 int ret = 0;
485 int i;
486
487 spin_lock_init(&mux_dev->write_lock);
488 INIT_LIST_HEAD(&rx->to_host_list);
489 INIT_LIST_HEAD(&rx->rx_submit_list);
490 INIT_LIST_HEAD(&rx->rx_free_list);
491 spin_lock_init(&rx->to_host_lock);
492 spin_lock_init(&rx->submit_list_lock);
493 spin_lock_init(&rx->free_list_lock);
494
495 for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
496 r = alloc_mux_rx();
497 if (r == NULL) {
498 ret = -ENOMEM;
499 break;
500 }
501
502 list_add(&r->free_list, &rx->rx_free_list);
503 }
504
505 INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
506
507 return ret;
508}
509
510static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id *id)
511{
234ad182
DC
512 struct mux_dev *mux_dev;
513 struct tty_dev *tty_dev;
61e12104
WK
514 u16 idVendor, idProduct;
515 int bInterfaceNumber;
234ad182 516 int ret;
61e12104
WK
517 int i;
518 struct usb_device *usbdev = interface_to_usbdev(intf);
519 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
520
521 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
522 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
523
0ec473b5 524 pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
61e12104 525
234ad182
DC
526 if (bInterfaceNumber != 2)
527 return -ENODEV;
61e12104
WK
528
529 mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
234ad182
DC
530 if (!mux_dev)
531 return -ENOMEM;
61e12104
WK
532
533 tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
534 if (!tty_dev) {
61e12104 535 ret = -ENOMEM;
234ad182 536 goto err_free_mux;
61e12104
WK
537 }
538
539 mux_dev->usbdev = usbdev;
540 mux_dev->control_intf = intf;
541
542 ret = init_usb(mux_dev);
234ad182
DC
543 if (ret)
544 goto err_free_tty;
61e12104
WK
545
546 tty_dev->priv_dev = (void *)mux_dev;
547 tty_dev->send_func = gdm_mux_send;
548 tty_dev->recv_func = gdm_mux_recv;
549 tty_dev->send_control = gdm_mux_send_control;
550
234ad182
DC
551 ret = register_lte_tty_device(tty_dev, &intf->dev);
552 if (ret)
553 goto err_unregister_tty;
61e12104 554
61e12104 555 for (i = 0; i < TTY_MAX_COUNT; i++)
bf0373f1 556 mux_dev->tty_dev = tty_dev;
61e12104 557
234ad182
DC
558 mux_dev->intf = intf;
559 mux_dev->usb_state = PM_NORMAL;
61e12104
WK
560
561 usb_get_dev(usbdev);
562 usb_set_intfdata(intf, tty_dev);
563
234ad182
DC
564 return 0;
565
566err_unregister_tty:
567 unregister_lte_tty_device(tty_dev);
568 release_usb(mux_dev);
569err_free_tty:
570 kfree(tty_dev);
571err_free_mux:
572 kfree(mux_dev);
573
61e12104
WK
574 return ret;
575}
576
577static void gdm_mux_disconnect(struct usb_interface *intf)
578{
579 struct tty_dev *tty_dev;
580 struct mux_dev *mux_dev;
581 struct usb_device *usbdev = interface_to_usbdev(intf);
582
583 tty_dev = usb_get_intfdata(intf);
584
585 mux_dev = tty_dev->priv_dev;
586
587 release_usb(mux_dev);
588 unregister_lte_tty_device(tty_dev);
589
590 kfree(mux_dev);
591 kfree(tty_dev);
592
593 usb_put_dev(usbdev);
594}
595
596static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
597{
598 struct tty_dev *tty_dev;
599 struct mux_dev *mux_dev;
600 struct rx_cxt *rx;
601 struct mux_rx *r, *r_next;
602 unsigned long flags;
603
604 tty_dev = usb_get_intfdata(intf);
605 mux_dev = tty_dev->priv_dev;
606 rx = &mux_dev->rx;
607
608 if (mux_dev->usb_state != PM_NORMAL) {
0ec473b5 609 pr_err("usb suspend - invalid state\n");
61e12104
WK
610 return -1;
611 }
612
613 mux_dev->usb_state = PM_SUSPEND;
614
615
616 spin_lock_irqsave(&rx->submit_list_lock, flags);
617 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
618 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
619 usb_kill_urb(r->urb);
620 spin_lock_irqsave(&rx->submit_list_lock, flags);
621 }
622 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
623
624 return 0;
625}
626
627static int gdm_mux_resume(struct usb_interface *intf)
628{
629 struct tty_dev *tty_dev;
630 struct mux_dev *mux_dev;
631 u8 i;
632
633 tty_dev = usb_get_intfdata(intf);
634 mux_dev = tty_dev->priv_dev;
635
636 if (mux_dev->usb_state != PM_SUSPEND) {
0ec473b5 637 pr_err("usb resume - invalid state\n");
61e12104
WK
638 return -1;
639 }
640
641 mux_dev->usb_state = PM_NORMAL;
642
643 for (i = 0; i < MAX_ISSUE_NUM; i++)
644 gdm_mux_recv(mux_dev, mux_dev->rx_cb);
645
646 return 0;
647}
648
649static struct usb_driver gdm_mux_driver = {
650 .name = "gdm_mux",
651 .probe = gdm_mux_probe,
652 .disconnect = gdm_mux_disconnect,
653 .id_table = id_table,
654 .supports_autosuspend = 1,
655 .suspend = gdm_mux_suspend,
656 .resume = gdm_mux_resume,
657 .reset_resume = gdm_mux_resume,
658};
659
660static int __init gdm_usb_mux_init(void)
661{
662
663 mux_rx_wq = create_workqueue("mux_rx_wq");
664 if (mux_rx_wq == NULL) {
0ec473b5 665 pr_err("work queue create fail\n");
61e12104
WK
666 return -1;
667 }
668
669 register_lte_tty_driver();
670
671 return usb_register(&gdm_mux_driver);
672}
673
674static void __exit gdm_usb_mux_exit(void)
675{
676 unregister_lte_tty_driver();
677
678 if (mux_rx_wq) {
679 flush_workqueue(mux_rx_wq);
680 destroy_workqueue(mux_rx_wq);
681 }
682
683 usb_deregister(&gdm_mux_driver);
684}
685
686module_init(gdm_usb_mux_init);
687module_exit(gdm_usb_mux_exit);
688
689MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
690MODULE_LICENSE("GPL");
This page took 0.079874 seconds and 5 git commands to generate.