staging: gdm724x: fix leak at failure path in gdm_usb_probe()
[deliverable/linux.git] / drivers / staging / gdm724x / gdm_usb.c
CommitLineData
61e12104
WK
1/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
0ec473b5
JP
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
61e12104 16#include <linux/module.h>
61e12104
WK
17#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/usb/cdc.h>
22#include <linux/wait.h>
23#include <linux/if_ether.h>
24#include <linux/pm_runtime.h>
25
26#include "gdm_usb.h"
27#include "gdm_lte.h"
28#include "hci.h"
29#include "hci_packet.h"
30#include "gdm_endian.h"
31
32#define USB_DEVICE_CDC_DATA(vid, pid) \
33 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
34 .idVendor = vid,\
35 .idProduct = pid,\
36 .bInterfaceClass = USB_CLASS_COMM,\
37 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
38
39#define USB_DEVICE_MASS_DATA(vid, pid) \
40 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
41 .idVendor = vid,\
42 .idProduct = pid,\
43 .bInterfaceSubClass = USB_SC_SCSI, \
44 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
45 .bInterfaceProtocol = USB_PR_BULK
46
47static const struct usb_device_id id_table[] = {
48 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
49 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
50 { }
51};
52
53MODULE_DEVICE_TABLE(usb, id_table);
54
55static struct workqueue_struct *usb_tx_wq;
56static struct workqueue_struct *usb_rx_wq;
57
58static void do_tx(struct work_struct *work);
59static void do_rx(struct work_struct *work);
60
61static int gdm_usb_recv(void *priv_dev,
62 int (*cb)(void *cb_data, void *data, int len, int context),
63 void *cb_data,
64 int context);
65
66static int request_mac_address(struct lte_udev *udev)
67{
68 u8 buf[16] = {0,};
69 struct hci_packet *hci = (struct hci_packet *)buf;
70 struct usb_device *usbdev = udev->usbdev;
71 int actual;
72 int ret = -1;
73
74 hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
75 hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
76 hci->data[0] = MAC_ADDRESS;
77
78 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
79 &actual, 1000);
80
81 udev->request_mac_addr = 1;
82
83 return ret;
84}
85
86static struct usb_tx *alloc_tx_struct(int len)
87{
88 struct usb_tx *t = NULL;
89 int ret = 0;
90
1f558647 91 t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
61e12104
WK
92 if (!t) {
93 ret = -ENOMEM;
94 goto out;
95 }
61e12104
WK
96
97 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
98 if (!(len % 512))
99 len++;
100
101 t->buf = kmalloc(len, GFP_ATOMIC);
102 if (!t->urb || !t->buf) {
103 ret = -ENOMEM;
104 goto out;
105 }
106
107out:
108 if (ret < 0) {
109 if (t) {
110 usb_free_urb(t->urb);
111 kfree(t->buf);
112 kfree(t);
113 }
114 return NULL;
115 }
116
117 return t;
118}
119
120static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
121{
122 struct usb_tx_sdu *t_sdu = NULL;
123 int ret = 0;
124
125
1f558647 126 t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
61e12104
WK
127 if (!t_sdu) {
128 ret = -ENOMEM;
129 goto out;
130 }
61e12104
WK
131
132 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
133 if (!t_sdu->buf) {
134 ret = -ENOMEM;
135 goto out;
136 }
137out:
138
139 if (ret < 0) {
140 if (t_sdu) {
141 kfree(t_sdu->buf);
142 kfree(t_sdu);
143 }
144 return NULL;
145 }
146
147 return t_sdu;
148}
149
150static void free_tx_struct(struct usb_tx *t)
151{
152 if (t) {
153 usb_free_urb(t->urb);
154 kfree(t->buf);
155 kfree(t);
156 }
157}
158
159static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
160{
161 if (t_sdu) {
162 kfree(t_sdu->buf);
163 kfree(t_sdu);
164 }
165}
166
167static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
168{
169 struct usb_tx_sdu *t_sdu;
170
171 if (list_empty(&tx->free_list))
172 return NULL;
173
174 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
175 list_del(&t_sdu->list);
176
177 tx->avail_count--;
178
179 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
180
181 return t_sdu;
182}
183
184static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
185{
186 list_add_tail(&t_sdu->list, &tx->free_list);
187 tx->avail_count++;
188}
189
190static struct usb_rx *alloc_rx_struct(void)
191{
192 struct usb_rx *r = NULL;
193 int ret = 0;
194
195 r = kmalloc(sizeof(struct usb_rx), GFP_ATOMIC);
196 if (!r) {
197 ret = -ENOMEM;
198 goto out;
199 }
200
201 r->urb = usb_alloc_urb(0, GFP_ATOMIC);
202 r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
203 if (!r->urb || !r->buf) {
204 ret = -ENOMEM;
205 goto out;
206 }
207out:
208
209 if (ret < 0) {
210 if (r) {
211 usb_free_urb(r->urb);
212 kfree(r->buf);
213 kfree(r);
214 }
215 return NULL;
216 }
217
218 return r;
219}
220
221static void free_rx_struct(struct usb_rx *r)
222{
223 if (r) {
224 usb_free_urb(r->urb);
225 kfree(r->buf);
226 kfree(r);
227 }
228}
229
230static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
231{
232 struct usb_rx *r;
233 unsigned long flags;
234
235 spin_lock_irqsave(&rx->rx_lock, flags);
236
237 if (list_empty(&rx->free_list)) {
238 spin_unlock_irqrestore(&rx->rx_lock, flags);
239 return NULL;
240 }
241
242 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
243 list_del(&r->free_list);
244
245 rx->avail_count--;
246
247 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
248
249 spin_unlock_irqrestore(&rx->rx_lock, flags);
250
251 return r;
252}
253
254static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
255{
256 unsigned long flags;
257
258 spin_lock_irqsave(&rx->rx_lock, flags);
259
260 list_add_tail(&r->free_list, &rx->free_list);
261 rx->avail_count++;
262
263 spin_unlock_irqrestore(&rx->rx_lock, flags);
264}
265
266static void release_usb(struct lte_udev *udev)
267{
268 struct rx_cxt *rx = &udev->rx;
269 struct tx_cxt *tx = &udev->tx;
270 struct usb_tx *t, *t_next;
271 struct usb_rx *r, *r_next;
272 struct usb_tx_sdu *t_sdu, *t_sdu_next;
273 unsigned long flags;
274
275 spin_lock_irqsave(&tx->lock, flags);
276 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list)
277 {
278 list_del(&t_sdu->list);
279 free_tx_sdu_struct(t_sdu);
280 }
281
282 list_for_each_entry_safe(t, t_next, &tx->hci_list, list)
283 {
284 list_del(&t->list);
285 free_tx_struct(t);
286 }
287
288 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list)
289 {
290 list_del(&t_sdu->list);
291 free_tx_sdu_struct(t_sdu);
292 }
293 spin_unlock_irqrestore(&tx->lock, flags);
294
295 spin_lock_irqsave(&rx->submit_lock, flags);
296 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
297 {
298 spin_unlock_irqrestore(&rx->submit_lock, flags);
299 usb_kill_urb(r->urb);
300 spin_lock_irqsave(&rx->submit_lock, flags);
301 }
302 spin_unlock_irqrestore(&rx->submit_lock, flags);
303
304 spin_lock_irqsave(&rx->rx_lock, flags);
305 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list)
306 {
307 list_del(&r->free_list);
308 free_rx_struct(r);
309 }
310 spin_unlock_irqrestore(&rx->rx_lock, flags);
311
312 spin_lock_irqsave(&rx->to_host_lock, flags);
313 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list)
314 {
315 if (r->index == (void *)udev) {
316 list_del(&r->to_host_list);
317 free_rx_struct(r);
318 }
319 }
320 spin_unlock_irqrestore(&rx->to_host_lock, flags);
321}
322
323static int init_usb(struct lte_udev *udev)
324{
325 int ret = 0;
326 int i;
327 struct tx_cxt *tx = &udev->tx;
328 struct rx_cxt *rx = &udev->rx;
329 struct usb_tx_sdu *t_sdu = NULL;
330 struct usb_rx *r = NULL;
331
332 udev->send_complete = 1;
333 udev->tx_stop = 0;
334 udev->request_mac_addr = 0;
335 udev->usb_state = PM_NORMAL;
336
337 INIT_LIST_HEAD(&tx->sdu_list);
338 INIT_LIST_HEAD(&tx->hci_list);
339 INIT_LIST_HEAD(&tx->free_list);
340 INIT_LIST_HEAD(&rx->rx_submit_list);
341 INIT_LIST_HEAD(&rx->free_list);
342 INIT_LIST_HEAD(&rx->to_host_list);
343 spin_lock_init(&tx->lock);
344 spin_lock_init(&rx->rx_lock);
345 spin_lock_init(&rx->submit_lock);
346 spin_lock_init(&rx->to_host_lock);
347
348 tx->avail_count = 0;
349 rx->avail_count = 0;
350
351 udev->rx_cb = NULL;
352
353 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
354 t_sdu = alloc_tx_sdu_struct();
355 if (t_sdu == NULL) {
356 ret = -ENOMEM;
357 goto fail;
358 }
359
360 list_add(&t_sdu->list, &tx->free_list);
361 tx->avail_count++;
362 }
363
364 for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
365 r = alloc_rx_struct();
366 if (r == NULL) {
367 ret = -ENOMEM;
368 goto fail;
369 }
370
371 list_add(&r->free_list, &rx->free_list);
372 rx->avail_count++;
373 }
374 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
375 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
376 return 0;
377fail:
378 return ret;
379}
380
381static int set_mac_address(u8 *data, void *arg)
382{
383 struct phy_dev *phy_dev = (struct phy_dev *)arg;
384 struct lte_udev *udev = phy_dev->priv_dev;
385 struct tlv *tlv = (struct tlv *)data;
386 u8 mac_address[ETH_ALEN] = {0, };
387
388 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
389 memcpy(mac_address, tlv->data, tlv->len);
390
391 if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
0ec473b5 392 pr_err("register lte device failed\n");
61e12104
WK
393
394 udev->request_mac_addr = 0;
395
396 return 1;
397 }
398
399 return 0;
400}
401
402static void do_rx(struct work_struct *work)
403{
404 struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
405 struct rx_cxt *rx = &udev->rx;
406 struct usb_rx *r;
407 struct hci_packet *hci;
408 struct phy_dev *phy_dev;
409 u16 cmd_evt;
410 int ret;
411 unsigned long flags;
412
413 while (1) {
414 spin_lock_irqsave(&rx->to_host_lock, flags);
415 if (list_empty(&rx->to_host_list)) {
416 spin_unlock_irqrestore(&rx->to_host_lock, flags);
417 break;
418 }
419 r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
420 list_del(&r->to_host_list);
421 spin_unlock_irqrestore(&rx->to_host_lock, flags);
422
423 phy_dev = (struct phy_dev *)r->cb_data;
424 udev = (struct lte_udev *)phy_dev->priv_dev;
425 hci = (struct hci_packet *)r->buf;
426 cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
427
428 switch (cmd_evt) {
429 case LTE_GET_INFORMATION_RESULT:
430 if (set_mac_address(hci->data, r->cb_data) == 0) {
431 ret = r->callback(r->cb_data,
432 r->buf,
433 r->urb->actual_length,
434 KERNEL_THREAD);
435 }
436 break;
437
438 default:
439 if (r->callback) {
440 ret = r->callback(r->cb_data,
441 r->buf,
442 r->urb->actual_length,
443 KERNEL_THREAD);
444
445 if (ret == -EAGAIN)
0ec473b5 446 pr_err("failed to send received data\n");
61e12104
WK
447 }
448 break;
449 }
450
451 put_rx_struct(rx, r);
452
453 gdm_usb_recv(udev,
454 r->callback,
455 r->cb_data,
456 USB_COMPLETE);
457 }
458}
459
460static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
461{
462 unsigned long flags;
463 struct usb_rx *r_remove, *r_remove_next;
464
465 spin_lock_irqsave(&rx->submit_lock, flags);
466 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
467 {
468 if (r == r_remove) {
469 list_del(&r->rx_submit_list);
470 break;
471 }
472 }
473 spin_unlock_irqrestore(&rx->submit_lock, flags);
474}
475
476static void gdm_usb_rcv_complete(struct urb *urb)
477{
478 struct usb_rx *r = urb->context;
479 struct rx_cxt *rx = r->rx;
480 unsigned long flags;
481 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
482 struct usb_device *usbdev = udev->usbdev;
483
484 remove_rx_submit_list(r, rx);
485
486 if (!urb->status && r->callback) {
487 spin_lock_irqsave(&rx->to_host_lock, flags);
488 list_add_tail(&r->to_host_list, &rx->to_host_list);
489 queue_work(usb_rx_wq, &udev->work_rx.work);
490 spin_unlock_irqrestore(&rx->to_host_lock, flags);
491 } else {
492 if (urb->status && udev->usb_state == PM_NORMAL)
0ec473b5
JP
493 pr_err("%s: urb status error %d\n",
494 __func__, urb->status);
61e12104
WK
495
496 put_rx_struct(rx, r);
497 }
498
499 usb_mark_last_busy(usbdev);
500}
501
502static int gdm_usb_recv(void *priv_dev,
503 int (*cb)(void *cb_data, void *data, int len, int context),
504 void *cb_data,
505 int context)
506{
507 struct lte_udev *udev = priv_dev;
508 struct usb_device *usbdev = udev->usbdev;
509 struct rx_cxt *rx = &udev->rx;
510 struct usb_rx *r;
511 int no_spc;
512 int ret;
513 unsigned long flags;
514
515 if (!udev->usbdev) {
0ec473b5 516 pr_err("invalid device\n");
61e12104
WK
517 return -ENODEV;
518 }
519
520 r = get_rx_struct(rx, &no_spc);
521 if (!r) {
0ec473b5 522 pr_err("Out of Memory\n");
61e12104
WK
523 return -ENOMEM;
524 }
525
526 udev->rx_cb = cb;
527 r->callback = cb;
528 r->cb_data = cb_data;
529 r->index = (void *)udev;
530 r->rx = rx;
531
532 usb_fill_bulk_urb(r->urb,
533 usbdev,
534 usb_rcvbulkpipe(usbdev, 0x83),
535 r->buf,
536 RX_BUF_SIZE,
537 gdm_usb_rcv_complete,
538 r);
539
540 spin_lock_irqsave(&rx->submit_lock, flags);
541 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
542 spin_unlock_irqrestore(&rx->submit_lock, flags);
543
544 if (context == KERNEL_THREAD)
545 ret = usb_submit_urb(r->urb, GFP_KERNEL);
546 else
547 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
548
549 if (ret) {
550 spin_lock_irqsave(&rx->submit_lock, flags);
551 list_del(&r->rx_submit_list);
552 spin_unlock_irqrestore(&rx->submit_lock, flags);
553
0ec473b5 554 pr_err("usb_submit_urb failed (%p)\n", r);
61e12104
WK
555 put_rx_struct(rx, r);
556 }
557
558 return ret;
559}
560
561static void gdm_usb_send_complete(struct urb *urb)
562{
563 struct usb_tx *t = urb->context;
564 struct tx_cxt *tx = t->tx;
565 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
566 unsigned long flags;
567
568 if (urb->status == -ECONNRESET) {
0ec473b5 569 pr_info("CONNRESET\n");
61e12104
WK
570 return;
571 }
572
573 if (t->callback)
574 t->callback(t->cb_data);
575
576 free_tx_struct(t);
577
578 spin_lock_irqsave(&tx->lock, flags);
579 udev->send_complete = 1;
580 queue_work(usb_tx_wq, &udev->work_tx.work);
581 spin_unlock_irqrestore(&tx->lock, flags);
582}
583
584static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
585{
586 int ret = 0;
587
588 if (!(len%512))
589 len++;
590
591 usb_fill_bulk_urb(t->urb,
592 usbdev,
593 usb_sndbulkpipe(usbdev, 2),
594 t->buf,
595 len,
596 gdm_usb_send_complete,
597 t);
598
599 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
600
601 if (ret)
0ec473b5 602 pr_err("usb_submit_urb failed: %d\n", ret);
61e12104
WK
603
604 usb_mark_last_busy(usbdev);
605
606 return ret;
607}
608
609static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
610{
611 struct tx_cxt *tx = &udev->tx;
612 struct usb_tx_sdu *t_sdu = NULL;
613 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
614 u16 send_len = 0;
615 u16 num_packet = 0;
616 unsigned long flags;
617
618 multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
619
620 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
621 spin_lock_irqsave(&tx->lock, flags);
622 if (list_empty(&tx->sdu_list)) {
623 spin_unlock_irqrestore(&tx->lock, flags);
624 break;
625 }
626
627 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
628 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
629 spin_unlock_irqrestore(&tx->lock, flags);
630 break;
631 }
632
633 list_del(&t_sdu->list);
634 spin_unlock_irqrestore(&tx->lock, flags);
635
636 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
637
638 send_len += (t_sdu->len + 3) & 0xfffc;
639 num_packet++;
640
641 if (tx->avail_count > 10)
642 t_sdu->callback(t_sdu->cb_data);
643
644 spin_lock_irqsave(&tx->lock, flags);
645 put_tx_struct(tx, t_sdu);
646 spin_unlock_irqrestore(&tx->lock, flags);
647 }
648
649 multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
650 multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
651
652 return send_len + offsetof(struct multi_sdu, data);
653}
654
655static void do_tx(struct work_struct *work)
656{
657 struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
658 struct usb_device *usbdev = udev->usbdev;
659 struct tx_cxt *tx = &udev->tx;
660 struct usb_tx *t = NULL;
661 int is_send = 0;
662 u32 len = 0;
663 unsigned long flags;
664
665 if (!usb_autopm_get_interface(udev->intf))
666 usb_autopm_put_interface(udev->intf);
667
668 if (udev->usb_state == PM_SUSPEND)
669 return;
670
671 spin_lock_irqsave(&tx->lock, flags);
672 if (!udev->send_complete) {
673 spin_unlock_irqrestore(&tx->lock, flags);
674 return;
675 } else {
676 udev->send_complete = 0;
677 }
678
679 if (!list_empty(&tx->hci_list)) {
680 t = list_entry(tx->hci_list.next, struct usb_tx, list);
681 list_del(&t->list);
682 len = t->len;
683 t->is_sdu = 0;
684 is_send = 1;
685 } else if (!list_empty(&tx->sdu_list)) {
686 if (udev->tx_stop) {
687 udev->send_complete = 1;
688 spin_unlock_irqrestore(&tx->lock, flags);
689 return;
690 }
691
692 t = alloc_tx_struct(TX_BUF_SIZE);
693 t->callback = NULL;
694 t->tx = tx;
695 t->is_sdu = 1;
696 is_send = 1;
697 }
698
699 if (!is_send) {
700 udev->send_complete = 1;
701 spin_unlock_irqrestore(&tx->lock, flags);
702 return;
703 }
704 spin_unlock_irqrestore(&tx->lock, flags);
705
706 if (t->is_sdu)
707 len = packet_aggregation(udev, t->buf);
708
709 if (send_tx_packet(usbdev, t, len)) {
0ec473b5 710 pr_err("send_tx_packet failed\n");
61e12104
WK
711 t->callback = NULL;
712 gdm_usb_send_complete(t->urb);
713 }
714}
715
716#define SDU_PARAM_LEN 12
717static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
718 unsigned int dftEpsId, unsigned int epsId,
719 void (*cb)(void *data), void *cb_data,
720 int dev_idx, int nic_type)
721{
722 struct lte_udev *udev = priv_dev;
723 struct tx_cxt *tx = &udev->tx;
724 struct usb_tx_sdu *t_sdu;
725 struct sdu *sdu = NULL;
726 unsigned long flags;
727 int no_spc = 0;
728 u16 send_len;
729
730 if (!udev->usbdev) {
0ec473b5 731 pr_err("sdu send - invalid device\n");
61e12104
WK
732 return TX_NO_DEV;
733 }
734
735 spin_lock_irqsave(&tx->lock, flags);
736 t_sdu = get_tx_sdu_struct(tx, &no_spc);
737 spin_unlock_irqrestore(&tx->lock, flags);
738
739 if (t_sdu == NULL) {
0ec473b5 740 pr_err("sdu send - free list empty\n");
61e12104
WK
741 return TX_NO_SPC;
742 }
743
744 sdu = (struct sdu *)t_sdu->buf;
745 sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
746 if (nic_type == NIC_TYPE_ARP) {
747 send_len = len + SDU_PARAM_LEN;
748 memcpy(sdu->data, data, len);
749 } else {
750 send_len = len - ETH_HLEN;
751 send_len += SDU_PARAM_LEN;
752 memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
753 }
754
755 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
756 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
757 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
758 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
759
760 t_sdu->len = send_len + HCI_HEADER_SIZE;
761 t_sdu->callback = cb;
762 t_sdu->cb_data = cb_data;
763
764 spin_lock_irqsave(&tx->lock, flags);
765 list_add_tail(&t_sdu->list, &tx->sdu_list);
766 queue_work(usb_tx_wq, &udev->work_tx.work);
767 spin_unlock_irqrestore(&tx->lock, flags);
768
769 if (no_spc)
770 return TX_NO_BUFFER;
771
772 return 0;
773}
774
775static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
776 void (*cb)(void *data), void *cb_data)
777{
778 struct lte_udev *udev = priv_dev;
779 struct tx_cxt *tx = &udev->tx;
780 struct usb_tx *t;
781 unsigned long flags;
782
783 if (!udev->usbdev) {
0ec473b5 784 pr_err("hci send - invalid device\n");
61e12104
WK
785 return -ENODEV;
786 }
787
788 t = alloc_tx_struct(len);
789 if (t == NULL) {
0ec473b5 790 pr_err("hci_send - out of memory\n");
61e12104
WK
791 return -ENOMEM;
792 }
793
794 memcpy(t->buf, data, len);
795 t->callback = cb;
796 t->cb_data = cb_data;
797 t->len = len;
798 t->tx = tx;
799 t->is_sdu = 0;
800
801 spin_lock_irqsave(&tx->lock, flags);
802 list_add_tail(&t->list, &tx->hci_list);
803 queue_work(usb_tx_wq, &udev->work_tx.work);
804 spin_unlock_irqrestore(&tx->lock, flags);
805
806 return 0;
807}
808
809static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
810{
811 struct lte_udev *udev = priv_dev;
812
813 return &udev->gdm_ed;
814}
815
816static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
817{
818 int ret = 0;
819 struct phy_dev *phy_dev = NULL;
820 struct lte_udev *udev = NULL;
821 u16 idVendor, idProduct;
822 int bInterfaceNumber;
823 struct usb_device *usbdev = interface_to_usbdev(intf);
824
825 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
826 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
827 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
828
0ec473b5 829 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
61e12104
WK
830
831 if (bInterfaceNumber > NETWORK_INTERFACE) {
0ec473b5 832 pr_info("not a network device\n");
a34c72b3 833 return -ENODEV;
61e12104
WK
834 }
835
a34c72b3
AK
836 phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
837 if (!phy_dev)
838 return -ENOMEM;
61e12104 839
a34c72b3 840 udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
61e12104
WK
841 if (!udev) {
842 ret = -ENOMEM;
a34c72b3 843 goto err_udev;
61e12104
WK
844 }
845
61e12104
WK
846 phy_dev->priv_dev = (void *)udev;
847 phy_dev->send_hci_func = gdm_usb_hci_send;
848 phy_dev->send_sdu_func = gdm_usb_sdu_send;
849 phy_dev->rcv_func = gdm_usb_recv;
850 phy_dev->get_endian = gdm_usb_get_endian;
851
852 udev->usbdev = usbdev;
853 ret = init_usb(udev);
854 if (ret < 0) {
0ec473b5 855 pr_err("init_usb func failed\n");
a34c72b3 856 goto err_init_usb;
61e12104
WK
857 }
858 udev->intf = intf;
859
860 intf->needs_remote_wakeup = 1;
861 usb_enable_autosuspend(usbdev);
862 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
863
864 /* List up hosts with big endians, otherwise, defaults to little endian */
865 if (idProduct == PID_GDM7243)
9c02d0db 866 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
61e12104 867 else
9c02d0db 868 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
61e12104
WK
869
870 ret = request_mac_address(udev);
871 if (ret < 0) {
0ec473b5 872 pr_err("request Mac address failed\n");
a34c72b3 873 goto err_mac_address;
61e12104
WK
874 }
875
876 start_rx_proc(phy_dev);
61e12104
WK
877 usb_get_dev(usbdev);
878 usb_set_intfdata(intf, phy_dev);
879
a34c72b3
AK
880 return 0;
881
882err_mac_address:
883 release_usb(udev);
884err_init_usb:
885 kfree(udev);
886err_udev:
887 kfree(phy_dev);
888
61e12104
WK
889 return ret;
890}
891
892static void gdm_usb_disconnect(struct usb_interface *intf)
893{
894 struct phy_dev *phy_dev;
895 struct lte_udev *udev;
896 u16 idVendor, idProduct;
897 struct usb_device *usbdev;
898 usbdev = interface_to_usbdev(intf);
899
900 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
901 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
902
903 phy_dev = usb_get_intfdata(intf);
904
905 udev = phy_dev->priv_dev;
906 unregister_lte_device(phy_dev);
907
908 release_usb(udev);
909
910 kfree(udev);
911 udev = NULL;
912
913 kfree(phy_dev);
914 phy_dev = NULL;
915
916 usb_put_dev(usbdev);
917}
918
919static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
920{
921 struct phy_dev *phy_dev;
922 struct lte_udev *udev;
923 struct rx_cxt *rx;
924 struct usb_rx *r;
925 struct usb_rx *r_next;
926 unsigned long flags;
927
928 phy_dev = usb_get_intfdata(intf);
929 udev = phy_dev->priv_dev;
930 rx = &udev->rx;
931 if (udev->usb_state != PM_NORMAL) {
0ec473b5 932 pr_err("usb suspend - invalid state\n");
61e12104
WK
933 return -1;
934 }
935
936 udev->usb_state = PM_SUSPEND;
937
938 spin_lock_irqsave(&rx->submit_lock, flags);
939 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
940 {
941 spin_unlock_irqrestore(&rx->submit_lock, flags);
942 usb_kill_urb(r->urb);
943 spin_lock_irqsave(&rx->submit_lock, flags);
944 }
945 spin_unlock_irqrestore(&rx->submit_lock, flags);
946
947 return 0;
948}
949
950static int gdm_usb_resume(struct usb_interface *intf)
951{
952 struct phy_dev *phy_dev;
953 struct lte_udev *udev;
954 struct tx_cxt *tx;
955 struct rx_cxt *rx;
956 unsigned long flags;
957 int issue_count;
958 int i;
959
960 phy_dev = usb_get_intfdata(intf);
961 udev = phy_dev->priv_dev;
962 rx = &udev->rx;
963
964 if (udev->usb_state != PM_SUSPEND) {
0ec473b5 965 pr_err("usb resume - invalid state\n");
61e12104
WK
966 return -1;
967 }
968 udev->usb_state = PM_NORMAL;
969
970 spin_lock_irqsave(&rx->rx_lock, flags);
971 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
972 spin_unlock_irqrestore(&rx->rx_lock, flags);
973
974 if (issue_count >= 0) {
975 for (i = 0; i < issue_count; i++)
976 gdm_usb_recv(phy_dev->priv_dev,
977 udev->rx_cb,
978 phy_dev,
979 USB_COMPLETE);
980 }
981
982 tx = &udev->tx;
983 spin_lock_irqsave(&tx->lock, flags);
984 queue_work(usb_tx_wq, &udev->work_tx.work);
985 spin_unlock_irqrestore(&tx->lock, flags);
986
987 return 0;
988}
989
990static struct usb_driver gdm_usb_lte_driver = {
991 .name = "gdm_lte",
992 .probe = gdm_usb_probe,
993 .disconnect = gdm_usb_disconnect,
994 .id_table = id_table,
995 .supports_autosuspend = 1,
996 .suspend = gdm_usb_suspend,
997 .resume = gdm_usb_resume,
998 .reset_resume = gdm_usb_resume,
999};
1000
1001static int __init gdm_usb_lte_init(void)
1002{
1003 if (gdm_lte_event_init() < 0) {
0ec473b5 1004 pr_err("error creating event\n");
61e12104
WK
1005 return -1;
1006 }
1007
1008 usb_tx_wq = create_workqueue("usb_tx_wq");
1009 if (usb_tx_wq == NULL)
1010 return -1;
1011
1012 usb_rx_wq = create_workqueue("usb_rx_wq");
1013 if (usb_rx_wq == NULL)
1014 return -1;
1015
1016 return usb_register(&gdm_usb_lte_driver);
1017}
1018
1019static void __exit gdm_usb_lte_exit(void)
1020{
1021 gdm_lte_event_exit();
1022
1023 usb_deregister(&gdm_usb_lte_driver);
1024
1025 if (usb_tx_wq) {
1026 flush_workqueue(usb_tx_wq);
1027 destroy_workqueue(usb_tx_wq);
1028 }
1029
1030 if (usb_rx_wq) {
1031 flush_workqueue(usb_rx_wq);
1032 destroy_workqueue(usb_rx_wq);
1033 }
1034}
1035
1036module_init(gdm_usb_lte_init);
1037module_exit(gdm_usb_lte_exit);
1038
1039MODULE_VERSION(DRIVER_VERSION);
1040MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1041MODULE_LICENSE("GPL");
This page took 0.111335 seconds and 5 git commands to generate.