MAINTAINERS: Add phy-miphy28lp.c and phy-miphy365x.c to ARCH/STI architecture
[deliverable/linux.git] / drivers / usb / gadget / function / u_serial.c
1 /*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18 /* #define VERBOSE_DEBUG */
19
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/delay.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/module.h>
30
31 #include "u_serial.h"
32
33
34 /*
35 * This component encapsulates the TTY layer glue needed to provide basic
36 * "serial port" functionality through the USB gadget stack. Each such
37 * port is exposed through a /dev/ttyGS* node.
38 *
39 * After this module has been loaded, the individual TTY port can be requested
40 * (gserial_alloc_line()) and it will stay available until they are removed
41 * (gserial_free_line()). Each one may be connected to a USB function
42 * (gserial_connect), or disconnected (with gserial_disconnect) when the USB
43 * host issues a config change event. Data can only flow when the port is
44 * connected to the host.
45 *
46 * A given TTY port can be made available in multiple configurations.
47 * For example, each one might expose a ttyGS0 node which provides a
48 * login application. In one case that might use CDC ACM interface 0,
49 * while another configuration might use interface 3 for that. The
50 * work to handle that (including descriptor management) is not part
51 * of this component.
52 *
53 * Configurations may expose more than one TTY port. For example, if
54 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
55 * for a telephone or fax link. And ttyGS2 might be something that just
56 * needs a simple byte stream interface for some messaging protocol that
57 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
58 *
59 *
60 * gserial is the lifecycle interface, used by USB functions
61 * gs_port is the I/O nexus, used by the tty driver
62 * tty_struct links to the tty/filesystem framework
63 *
64 * gserial <---> gs_port ... links will be null when the USB link is
65 * inactive; managed by gserial_{connect,disconnect}(). each gserial
66 * instance can wrap its own USB control protocol.
67 * gserial->ioport == usb_ep->driver_data ... gs_port
68 * gs_port->port_usb ... gserial
69 *
70 * gs_port <---> tty_struct ... links will be null when the TTY file
71 * isn't opened; managed by gs_open()/gs_close()
72 * gserial->port_tty ... tty_struct
73 * tty_struct->driver_data ... gserial
74 */
75
76 /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
77 * next layer of buffering. For TX that's a circular buffer; for RX
78 * consider it a NOP. A third layer is provided by the TTY code.
79 */
80 #define QUEUE_SIZE 16
81 #define WRITE_BUF_SIZE 8192 /* TX only */
82
83 /* circular buffer */
84 struct gs_buf {
85 unsigned buf_size;
86 char *buf_buf;
87 char *buf_get;
88 char *buf_put;
89 };
90
91 /*
92 * The port structure holds info for each port, one for each minor number
93 * (and thus for each /dev/ node).
94 */
95 struct gs_port {
96 struct tty_port port;
97 spinlock_t port_lock; /* guard port_* access */
98
99 struct gserial *port_usb;
100
101 bool openclose; /* open/close in progress */
102 u8 port_num;
103
104 struct list_head read_pool;
105 int read_started;
106 int read_allocated;
107 struct list_head read_queue;
108 unsigned n_read;
109 struct tasklet_struct push;
110
111 struct list_head write_pool;
112 int write_started;
113 int write_allocated;
114 struct gs_buf port_write_buf;
115 wait_queue_head_t drain_wait; /* wait while writes drain */
116
117 /* REVISIT this state ... */
118 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
119 };
120
121 static struct portmaster {
122 struct mutex lock; /* protect open/close */
123 struct gs_port *port;
124 } ports[MAX_U_SERIAL_PORTS];
125
126 #define GS_CLOSE_TIMEOUT 15 /* seconds */
127
128
129
130 #ifdef VERBOSE_DEBUG
131 #ifndef pr_vdebug
132 #define pr_vdebug(fmt, arg...) \
133 pr_debug(fmt, ##arg)
134 #endif /* pr_vdebug */
135 #else
136 #ifndef pr_vdebug
137 #define pr_vdebug(fmt, arg...) \
138 ({ if (0) pr_debug(fmt, ##arg); })
139 #endif /* pr_vdebug */
140 #endif
141
142 /*-------------------------------------------------------------------------*/
143
144 /* Circular Buffer */
145
146 /*
147 * gs_buf_alloc
148 *
149 * Allocate a circular buffer and all associated memory.
150 */
151 static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
152 {
153 gb->buf_buf = kmalloc(size, GFP_KERNEL);
154 if (gb->buf_buf == NULL)
155 return -ENOMEM;
156
157 gb->buf_size = size;
158 gb->buf_put = gb->buf_buf;
159 gb->buf_get = gb->buf_buf;
160
161 return 0;
162 }
163
164 /*
165 * gs_buf_free
166 *
167 * Free the buffer and all associated memory.
168 */
169 static void gs_buf_free(struct gs_buf *gb)
170 {
171 kfree(gb->buf_buf);
172 gb->buf_buf = NULL;
173 }
174
175 /*
176 * gs_buf_clear
177 *
178 * Clear out all data in the circular buffer.
179 */
180 static void gs_buf_clear(struct gs_buf *gb)
181 {
182 gb->buf_get = gb->buf_put;
183 /* equivalent to a get of all data available */
184 }
185
186 /*
187 * gs_buf_data_avail
188 *
189 * Return the number of bytes of data written into the circular
190 * buffer.
191 */
192 static unsigned gs_buf_data_avail(struct gs_buf *gb)
193 {
194 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
195 }
196
197 /*
198 * gs_buf_space_avail
199 *
200 * Return the number of bytes of space available in the circular
201 * buffer.
202 */
203 static unsigned gs_buf_space_avail(struct gs_buf *gb)
204 {
205 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
206 }
207
208 /*
209 * gs_buf_put
210 *
211 * Copy data data from a user buffer and put it into the circular buffer.
212 * Restrict to the amount of space available.
213 *
214 * Return the number of bytes copied.
215 */
216 static unsigned
217 gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
218 {
219 unsigned len;
220
221 len = gs_buf_space_avail(gb);
222 if (count > len)
223 count = len;
224
225 if (count == 0)
226 return 0;
227
228 len = gb->buf_buf + gb->buf_size - gb->buf_put;
229 if (count > len) {
230 memcpy(gb->buf_put, buf, len);
231 memcpy(gb->buf_buf, buf+len, count - len);
232 gb->buf_put = gb->buf_buf + count - len;
233 } else {
234 memcpy(gb->buf_put, buf, count);
235 if (count < len)
236 gb->buf_put += count;
237 else /* count == len */
238 gb->buf_put = gb->buf_buf;
239 }
240
241 return count;
242 }
243
244 /*
245 * gs_buf_get
246 *
247 * Get data from the circular buffer and copy to the given buffer.
248 * Restrict to the amount of data available.
249 *
250 * Return the number of bytes copied.
251 */
252 static unsigned
253 gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
254 {
255 unsigned len;
256
257 len = gs_buf_data_avail(gb);
258 if (count > len)
259 count = len;
260
261 if (count == 0)
262 return 0;
263
264 len = gb->buf_buf + gb->buf_size - gb->buf_get;
265 if (count > len) {
266 memcpy(buf, gb->buf_get, len);
267 memcpy(buf+len, gb->buf_buf, count - len);
268 gb->buf_get = gb->buf_buf + count - len;
269 } else {
270 memcpy(buf, gb->buf_get, count);
271 if (count < len)
272 gb->buf_get += count;
273 else /* count == len */
274 gb->buf_get = gb->buf_buf;
275 }
276
277 return count;
278 }
279
280 /*-------------------------------------------------------------------------*/
281
282 /* I/O glue between TTY (upper) and USB function (lower) driver layers */
283
284 /*
285 * gs_alloc_req
286 *
287 * Allocate a usb_request and its buffer. Returns a pointer to the
288 * usb_request or NULL if there is an error.
289 */
290 struct usb_request *
291 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
292 {
293 struct usb_request *req;
294
295 req = usb_ep_alloc_request(ep, kmalloc_flags);
296
297 if (req != NULL) {
298 req->length = len;
299 req->buf = kmalloc(len, kmalloc_flags);
300 if (req->buf == NULL) {
301 usb_ep_free_request(ep, req);
302 return NULL;
303 }
304 }
305
306 return req;
307 }
308 EXPORT_SYMBOL_GPL(gs_alloc_req);
309
310 /*
311 * gs_free_req
312 *
313 * Free a usb_request and its buffer.
314 */
315 void gs_free_req(struct usb_ep *ep, struct usb_request *req)
316 {
317 kfree(req->buf);
318 usb_ep_free_request(ep, req);
319 }
320 EXPORT_SYMBOL_GPL(gs_free_req);
321
322 /*
323 * gs_send_packet
324 *
325 * If there is data to send, a packet is built in the given
326 * buffer and the size is returned. If there is no data to
327 * send, 0 is returned.
328 *
329 * Called with port_lock held.
330 */
331 static unsigned
332 gs_send_packet(struct gs_port *port, char *packet, unsigned size)
333 {
334 unsigned len;
335
336 len = gs_buf_data_avail(&port->port_write_buf);
337 if (len < size)
338 size = len;
339 if (size != 0)
340 size = gs_buf_get(&port->port_write_buf, packet, size);
341 return size;
342 }
343
344 /*
345 * gs_start_tx
346 *
347 * This function finds available write requests, calls
348 * gs_send_packet to fill these packets with data, and
349 * continues until either there are no more write requests
350 * available or no more data to send. This function is
351 * run whenever data arrives or write requests are available.
352 *
353 * Context: caller owns port_lock; port_usb is non-null.
354 */
355 static int gs_start_tx(struct gs_port *port)
356 /*
357 __releases(&port->port_lock)
358 __acquires(&port->port_lock)
359 */
360 {
361 struct list_head *pool = &port->write_pool;
362 struct usb_ep *in = port->port_usb->in;
363 int status = 0;
364 bool do_tty_wake = false;
365
366 while (!list_empty(pool)) {
367 struct usb_request *req;
368 int len;
369
370 if (port->write_started >= QUEUE_SIZE)
371 break;
372
373 req = list_entry(pool->next, struct usb_request, list);
374 len = gs_send_packet(port, req->buf, in->maxpacket);
375 if (len == 0) {
376 wake_up_interruptible(&port->drain_wait);
377 break;
378 }
379 do_tty_wake = true;
380
381 req->length = len;
382 list_del(&req->list);
383 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
384
385 pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
386 port->port_num, len, *((u8 *)req->buf),
387 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
388
389 /* Drop lock while we call out of driver; completions
390 * could be issued while we do so. Disconnection may
391 * happen too; maybe immediately before we queue this!
392 *
393 * NOTE that we may keep sending data for a while after
394 * the TTY closed (dev->ioport->port_tty is NULL).
395 */
396 spin_unlock(&port->port_lock);
397 status = usb_ep_queue(in, req, GFP_ATOMIC);
398 spin_lock(&port->port_lock);
399
400 if (status) {
401 pr_debug("%s: %s %s err %d\n",
402 __func__, "queue", in->name, status);
403 list_add(&req->list, pool);
404 break;
405 }
406
407 port->write_started++;
408
409 /* abort immediately after disconnect */
410 if (!port->port_usb)
411 break;
412 }
413
414 if (do_tty_wake && port->port.tty)
415 tty_wakeup(port->port.tty);
416 return status;
417 }
418
419 /*
420 * Context: caller owns port_lock, and port_usb is set
421 */
422 static unsigned gs_start_rx(struct gs_port *port)
423 /*
424 __releases(&port->port_lock)
425 __acquires(&port->port_lock)
426 */
427 {
428 struct list_head *pool = &port->read_pool;
429 struct usb_ep *out = port->port_usb->out;
430
431 while (!list_empty(pool)) {
432 struct usb_request *req;
433 int status;
434 struct tty_struct *tty;
435
436 /* no more rx if closed */
437 tty = port->port.tty;
438 if (!tty)
439 break;
440
441 if (port->read_started >= QUEUE_SIZE)
442 break;
443
444 req = list_entry(pool->next, struct usb_request, list);
445 list_del(&req->list);
446 req->length = out->maxpacket;
447
448 /* drop lock while we call out; the controller driver
449 * may need to call us back (e.g. for disconnect)
450 */
451 spin_unlock(&port->port_lock);
452 status = usb_ep_queue(out, req, GFP_ATOMIC);
453 spin_lock(&port->port_lock);
454
455 if (status) {
456 pr_debug("%s: %s %s err %d\n",
457 __func__, "queue", out->name, status);
458 list_add(&req->list, pool);
459 break;
460 }
461 port->read_started++;
462
463 /* abort immediately after disconnect */
464 if (!port->port_usb)
465 break;
466 }
467 return port->read_started;
468 }
469
470 /*
471 * RX tasklet takes data out of the RX queue and hands it up to the TTY
472 * layer until it refuses to take any more data (or is throttled back).
473 * Then it issues reads for any further data.
474 *
475 * If the RX queue becomes full enough that no usb_request is queued,
476 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
477 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
478 * can be buffered before the TTY layer's buffers (currently 64 KB).
479 */
480 static void gs_rx_push(unsigned long _port)
481 {
482 struct gs_port *port = (void *)_port;
483 struct tty_struct *tty;
484 struct list_head *queue = &port->read_queue;
485 bool disconnect = false;
486 bool do_push = false;
487
488 /* hand any queued data to the tty */
489 spin_lock_irq(&port->port_lock);
490 tty = port->port.tty;
491 while (!list_empty(queue)) {
492 struct usb_request *req;
493
494 req = list_first_entry(queue, struct usb_request, list);
495
496 /* leave data queued if tty was rx throttled */
497 if (tty && test_bit(TTY_THROTTLED, &tty->flags))
498 break;
499
500 switch (req->status) {
501 case -ESHUTDOWN:
502 disconnect = true;
503 pr_vdebug("ttyGS%d: shutdown\n", port->port_num);
504 break;
505
506 default:
507 /* presumably a transient fault */
508 pr_warn("ttyGS%d: unexpected RX status %d\n",
509 port->port_num, req->status);
510 /* FALLTHROUGH */
511 case 0:
512 /* normal completion */
513 break;
514 }
515
516 /* push data to (open) tty */
517 if (req->actual) {
518 char *packet = req->buf;
519 unsigned size = req->actual;
520 unsigned n;
521 int count;
522
523 /* we may have pushed part of this packet already... */
524 n = port->n_read;
525 if (n) {
526 packet += n;
527 size -= n;
528 }
529
530 count = tty_insert_flip_string(&port->port, packet,
531 size);
532 if (count)
533 do_push = true;
534 if (count != size) {
535 /* stop pushing; TTY layer can't handle more */
536 port->n_read += count;
537 pr_vdebug("ttyGS%d: rx block %d/%d\n",
538 port->port_num, count, req->actual);
539 break;
540 }
541 port->n_read = 0;
542 }
543
544 list_move(&req->list, &port->read_pool);
545 port->read_started--;
546 }
547
548 /* Push from tty to ldisc; this is handled by a workqueue,
549 * so we won't get callbacks and can hold port_lock
550 */
551 if (do_push)
552 tty_flip_buffer_push(&port->port);
553
554
555 /* We want our data queue to become empty ASAP, keeping data
556 * in the tty and ldisc (not here). If we couldn't push any
557 * this time around, there may be trouble unless there's an
558 * implicit tty_unthrottle() call on its way...
559 *
560 * REVISIT we should probably add a timer to keep the tasklet
561 * from starving ... but it's not clear that case ever happens.
562 */
563 if (!list_empty(queue) && tty) {
564 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
565 if (do_push)
566 tasklet_schedule(&port->push);
567 else
568 pr_warn("ttyGS%d: RX not scheduled?\n",
569 port->port_num);
570 }
571 }
572
573 /* If we're still connected, refill the USB RX queue. */
574 if (!disconnect && port->port_usb)
575 gs_start_rx(port);
576
577 spin_unlock_irq(&port->port_lock);
578 }
579
580 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
581 {
582 struct gs_port *port = ep->driver_data;
583
584 /* Queue all received data until the tty layer is ready for it. */
585 spin_lock(&port->port_lock);
586 list_add_tail(&req->list, &port->read_queue);
587 tasklet_schedule(&port->push);
588 spin_unlock(&port->port_lock);
589 }
590
591 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
592 {
593 struct gs_port *port = ep->driver_data;
594
595 spin_lock(&port->port_lock);
596 list_add(&req->list, &port->write_pool);
597 port->write_started--;
598
599 switch (req->status) {
600 default:
601 /* presumably a transient fault */
602 pr_warning("%s: unexpected %s status %d\n",
603 __func__, ep->name, req->status);
604 /* FALL THROUGH */
605 case 0:
606 /* normal completion */
607 gs_start_tx(port);
608 break;
609
610 case -ESHUTDOWN:
611 /* disconnect */
612 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
613 break;
614 }
615
616 spin_unlock(&port->port_lock);
617 }
618
619 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
620 int *allocated)
621 {
622 struct usb_request *req;
623
624 while (!list_empty(head)) {
625 req = list_entry(head->next, struct usb_request, list);
626 list_del(&req->list);
627 gs_free_req(ep, req);
628 if (allocated)
629 (*allocated)--;
630 }
631 }
632
633 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
634 void (*fn)(struct usb_ep *, struct usb_request *),
635 int *allocated)
636 {
637 int i;
638 struct usb_request *req;
639 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
640
641 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
642 * do quite that many this time, don't fail ... we just won't
643 * be as speedy as we might otherwise be.
644 */
645 for (i = 0; i < n; i++) {
646 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
647 if (!req)
648 return list_empty(head) ? -ENOMEM : 0;
649 req->complete = fn;
650 list_add_tail(&req->list, head);
651 if (allocated)
652 (*allocated)++;
653 }
654 return 0;
655 }
656
657 /**
658 * gs_start_io - start USB I/O streams
659 * @dev: encapsulates endpoints to use
660 * Context: holding port_lock; port_tty and port_usb are non-null
661 *
662 * We only start I/O when something is connected to both sides of
663 * this port. If nothing is listening on the host side, we may
664 * be pointlessly filling up our TX buffers and FIFO.
665 */
666 static int gs_start_io(struct gs_port *port)
667 {
668 struct list_head *head = &port->read_pool;
669 struct usb_ep *ep = port->port_usb->out;
670 int status;
671 unsigned started;
672
673 /* Allocate RX and TX I/O buffers. We can't easily do this much
674 * earlier (with GFP_KERNEL) because the requests are coupled to
675 * endpoints, as are the packet sizes we'll be using. Different
676 * configurations may use different endpoints with a given port;
677 * and high speed vs full speed changes packet sizes too.
678 */
679 status = gs_alloc_requests(ep, head, gs_read_complete,
680 &port->read_allocated);
681 if (status)
682 return status;
683
684 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
685 gs_write_complete, &port->write_allocated);
686 if (status) {
687 gs_free_requests(ep, head, &port->read_allocated);
688 return status;
689 }
690
691 /* queue read requests */
692 port->n_read = 0;
693 started = gs_start_rx(port);
694
695 /* unblock any pending writes into our circular buffer */
696 if (started) {
697 tty_wakeup(port->port.tty);
698 } else {
699 gs_free_requests(ep, head, &port->read_allocated);
700 gs_free_requests(port->port_usb->in, &port->write_pool,
701 &port->write_allocated);
702 status = -EIO;
703 }
704
705 return status;
706 }
707
708 /*-------------------------------------------------------------------------*/
709
710 /* TTY Driver */
711
712 /*
713 * gs_open sets up the link between a gs_port and its associated TTY.
714 * That link is broken *only* by TTY close(), and all driver methods
715 * know that.
716 */
717 static int gs_open(struct tty_struct *tty, struct file *file)
718 {
719 int port_num = tty->index;
720 struct gs_port *port;
721 int status;
722
723 do {
724 mutex_lock(&ports[port_num].lock);
725 port = ports[port_num].port;
726 if (!port)
727 status = -ENODEV;
728 else {
729 spin_lock_irq(&port->port_lock);
730
731 /* already open? Great. */
732 if (port->port.count) {
733 status = 0;
734 port->port.count++;
735
736 /* currently opening/closing? wait ... */
737 } else if (port->openclose) {
738 status = -EBUSY;
739
740 /* ... else we do the work */
741 } else {
742 status = -EAGAIN;
743 port->openclose = true;
744 }
745 spin_unlock_irq(&port->port_lock);
746 }
747 mutex_unlock(&ports[port_num].lock);
748
749 switch (status) {
750 default:
751 /* fully handled */
752 return status;
753 case -EAGAIN:
754 /* must do the work */
755 break;
756 case -EBUSY:
757 /* wait for EAGAIN task to finish */
758 msleep(1);
759 /* REVISIT could have a waitchannel here, if
760 * concurrent open performance is important
761 */
762 break;
763 }
764 } while (status != -EAGAIN);
765
766 /* Do the "real open" */
767 spin_lock_irq(&port->port_lock);
768
769 /* allocate circular buffer on first open */
770 if (port->port_write_buf.buf_buf == NULL) {
771
772 spin_unlock_irq(&port->port_lock);
773 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
774 spin_lock_irq(&port->port_lock);
775
776 if (status) {
777 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
778 port->port_num, tty, file);
779 port->openclose = false;
780 goto exit_unlock_port;
781 }
782 }
783
784 /* REVISIT if REMOVED (ports[].port NULL), abort the open
785 * to let rmmod work faster (but this way isn't wrong).
786 */
787
788 /* REVISIT maybe wait for "carrier detect" */
789
790 tty->driver_data = port;
791 port->port.tty = tty;
792
793 port->port.count = 1;
794 port->openclose = false;
795
796 /* if connected, start the I/O stream */
797 if (port->port_usb) {
798 struct gserial *gser = port->port_usb;
799
800 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
801 gs_start_io(port);
802
803 if (gser->connect)
804 gser->connect(gser);
805 }
806
807 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
808
809 status = 0;
810
811 exit_unlock_port:
812 spin_unlock_irq(&port->port_lock);
813 return status;
814 }
815
816 static int gs_writes_finished(struct gs_port *p)
817 {
818 int cond;
819
820 /* return true on disconnect or empty buffer */
821 spin_lock_irq(&p->port_lock);
822 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
823 spin_unlock_irq(&p->port_lock);
824
825 return cond;
826 }
827
828 static void gs_close(struct tty_struct *tty, struct file *file)
829 {
830 struct gs_port *port = tty->driver_data;
831 struct gserial *gser;
832
833 spin_lock_irq(&port->port_lock);
834
835 if (port->port.count != 1) {
836 if (port->port.count == 0)
837 WARN_ON(1);
838 else
839 --port->port.count;
840 goto exit;
841 }
842
843 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
844
845 /* mark port as closing but in use; we can drop port lock
846 * and sleep if necessary
847 */
848 port->openclose = true;
849 port->port.count = 0;
850
851 gser = port->port_usb;
852 if (gser && gser->disconnect)
853 gser->disconnect(gser);
854
855 /* wait for circular write buffer to drain, disconnect, or at
856 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
857 */
858 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
859 spin_unlock_irq(&port->port_lock);
860 wait_event_interruptible_timeout(port->drain_wait,
861 gs_writes_finished(port),
862 GS_CLOSE_TIMEOUT * HZ);
863 spin_lock_irq(&port->port_lock);
864 gser = port->port_usb;
865 }
866
867 /* Iff we're disconnected, there can be no I/O in flight so it's
868 * ok to free the circular buffer; else just scrub it. And don't
869 * let the push tasklet fire again until we're re-opened.
870 */
871 if (gser == NULL)
872 gs_buf_free(&port->port_write_buf);
873 else
874 gs_buf_clear(&port->port_write_buf);
875
876 tty->driver_data = NULL;
877 port->port.tty = NULL;
878
879 port->openclose = false;
880
881 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
882 port->port_num, tty, file);
883
884 wake_up(&port->port.close_wait);
885 exit:
886 spin_unlock_irq(&port->port_lock);
887 }
888
889 static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
890 {
891 struct gs_port *port = tty->driver_data;
892 unsigned long flags;
893 int status;
894
895 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
896 port->port_num, tty, count);
897
898 spin_lock_irqsave(&port->port_lock, flags);
899 if (count)
900 count = gs_buf_put(&port->port_write_buf, buf, count);
901 /* treat count == 0 as flush_chars() */
902 if (port->port_usb)
903 status = gs_start_tx(port);
904 spin_unlock_irqrestore(&port->port_lock, flags);
905
906 return count;
907 }
908
909 static int gs_put_char(struct tty_struct *tty, unsigned char ch)
910 {
911 struct gs_port *port = tty->driver_data;
912 unsigned long flags;
913 int status;
914
915 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
916 port->port_num, tty, ch, __builtin_return_address(0));
917
918 spin_lock_irqsave(&port->port_lock, flags);
919 status = gs_buf_put(&port->port_write_buf, &ch, 1);
920 spin_unlock_irqrestore(&port->port_lock, flags);
921
922 return status;
923 }
924
925 static void gs_flush_chars(struct tty_struct *tty)
926 {
927 struct gs_port *port = tty->driver_data;
928 unsigned long flags;
929
930 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
931
932 spin_lock_irqsave(&port->port_lock, flags);
933 if (port->port_usb)
934 gs_start_tx(port);
935 spin_unlock_irqrestore(&port->port_lock, flags);
936 }
937
938 static int gs_write_room(struct tty_struct *tty)
939 {
940 struct gs_port *port = tty->driver_data;
941 unsigned long flags;
942 int room = 0;
943
944 spin_lock_irqsave(&port->port_lock, flags);
945 if (port->port_usb)
946 room = gs_buf_space_avail(&port->port_write_buf);
947 spin_unlock_irqrestore(&port->port_lock, flags);
948
949 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
950 port->port_num, tty, room);
951
952 return room;
953 }
954
955 static int gs_chars_in_buffer(struct tty_struct *tty)
956 {
957 struct gs_port *port = tty->driver_data;
958 unsigned long flags;
959 int chars = 0;
960
961 spin_lock_irqsave(&port->port_lock, flags);
962 chars = gs_buf_data_avail(&port->port_write_buf);
963 spin_unlock_irqrestore(&port->port_lock, flags);
964
965 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
966 port->port_num, tty, chars);
967
968 return chars;
969 }
970
971 /* undo side effects of setting TTY_THROTTLED */
972 static void gs_unthrottle(struct tty_struct *tty)
973 {
974 struct gs_port *port = tty->driver_data;
975 unsigned long flags;
976
977 spin_lock_irqsave(&port->port_lock, flags);
978 if (port->port_usb) {
979 /* Kickstart read queue processing. We don't do xon/xoff,
980 * rts/cts, or other handshaking with the host, but if the
981 * read queue backs up enough we'll be NAKing OUT packets.
982 */
983 tasklet_schedule(&port->push);
984 pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
985 }
986 spin_unlock_irqrestore(&port->port_lock, flags);
987 }
988
989 static int gs_break_ctl(struct tty_struct *tty, int duration)
990 {
991 struct gs_port *port = tty->driver_data;
992 int status = 0;
993 struct gserial *gser;
994
995 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
996 port->port_num, duration);
997
998 spin_lock_irq(&port->port_lock);
999 gser = port->port_usb;
1000 if (gser && gser->send_break)
1001 status = gser->send_break(gser, duration);
1002 spin_unlock_irq(&port->port_lock);
1003
1004 return status;
1005 }
1006
1007 static const struct tty_operations gs_tty_ops = {
1008 .open = gs_open,
1009 .close = gs_close,
1010 .write = gs_write,
1011 .put_char = gs_put_char,
1012 .flush_chars = gs_flush_chars,
1013 .write_room = gs_write_room,
1014 .chars_in_buffer = gs_chars_in_buffer,
1015 .unthrottle = gs_unthrottle,
1016 .break_ctl = gs_break_ctl,
1017 };
1018
1019 /*-------------------------------------------------------------------------*/
1020
1021 static struct tty_driver *gs_tty_driver;
1022
1023 static int
1024 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1025 {
1026 struct gs_port *port;
1027 int ret = 0;
1028
1029 mutex_lock(&ports[port_num].lock);
1030 if (ports[port_num].port) {
1031 ret = -EBUSY;
1032 goto out;
1033 }
1034
1035 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1036 if (port == NULL) {
1037 ret = -ENOMEM;
1038 goto out;
1039 }
1040
1041 tty_port_init(&port->port);
1042 spin_lock_init(&port->port_lock);
1043 init_waitqueue_head(&port->drain_wait);
1044
1045 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1046
1047 INIT_LIST_HEAD(&port->read_pool);
1048 INIT_LIST_HEAD(&port->read_queue);
1049 INIT_LIST_HEAD(&port->write_pool);
1050
1051 port->port_num = port_num;
1052 port->port_line_coding = *coding;
1053
1054 ports[port_num].port = port;
1055 out:
1056 mutex_unlock(&ports[port_num].lock);
1057 return ret;
1058 }
1059
1060 static int gs_closed(struct gs_port *port)
1061 {
1062 int cond;
1063
1064 spin_lock_irq(&port->port_lock);
1065 cond = (port->port.count == 0) && !port->openclose;
1066 spin_unlock_irq(&port->port_lock);
1067 return cond;
1068 }
1069
1070 static void gserial_free_port(struct gs_port *port)
1071 {
1072 tasklet_kill(&port->push);
1073 /* wait for old opens to finish */
1074 wait_event(port->port.close_wait, gs_closed(port));
1075 WARN_ON(port->port_usb != NULL);
1076 tty_port_destroy(&port->port);
1077 kfree(port);
1078 }
1079
1080 void gserial_free_line(unsigned char port_num)
1081 {
1082 struct gs_port *port;
1083
1084 mutex_lock(&ports[port_num].lock);
1085 if (WARN_ON(!ports[port_num].port)) {
1086 mutex_unlock(&ports[port_num].lock);
1087 return;
1088 }
1089 port = ports[port_num].port;
1090 ports[port_num].port = NULL;
1091 mutex_unlock(&ports[port_num].lock);
1092
1093 gserial_free_port(port);
1094 tty_unregister_device(gs_tty_driver, port_num);
1095 }
1096 EXPORT_SYMBOL_GPL(gserial_free_line);
1097
1098 int gserial_alloc_line(unsigned char *line_num)
1099 {
1100 struct usb_cdc_line_coding coding;
1101 struct device *tty_dev;
1102 int ret;
1103 int port_num;
1104
1105 coding.dwDTERate = cpu_to_le32(9600);
1106 coding.bCharFormat = 8;
1107 coding.bParityType = USB_CDC_NO_PARITY;
1108 coding.bDataBits = USB_CDC_1_STOP_BITS;
1109
1110 for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) {
1111 ret = gs_port_alloc(port_num, &coding);
1112 if (ret == -EBUSY)
1113 continue;
1114 if (ret)
1115 return ret;
1116 break;
1117 }
1118 if (ret)
1119 return ret;
1120
1121 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1122
1123 tty_dev = tty_port_register_device(&ports[port_num].port->port,
1124 gs_tty_driver, port_num, NULL);
1125 if (IS_ERR(tty_dev)) {
1126 struct gs_port *port;
1127 pr_err("%s: failed to register tty for port %d, err %ld\n",
1128 __func__, port_num, PTR_ERR(tty_dev));
1129
1130 ret = PTR_ERR(tty_dev);
1131 port = ports[port_num].port;
1132 ports[port_num].port = NULL;
1133 gserial_free_port(port);
1134 goto err;
1135 }
1136 *line_num = port_num;
1137 err:
1138 return ret;
1139 }
1140 EXPORT_SYMBOL_GPL(gserial_alloc_line);
1141
1142 /**
1143 * gserial_connect - notify TTY I/O glue that USB link is active
1144 * @gser: the function, set up with endpoints and descriptors
1145 * @port_num: which port is active
1146 * Context: any (usually from irq)
1147 *
1148 * This is called activate endpoints and let the TTY layer know that
1149 * the connection is active ... not unlike "carrier detect". It won't
1150 * necessarily start I/O queues; unless the TTY is held open by any
1151 * task, there would be no point. However, the endpoints will be
1152 * activated so the USB host can perform I/O, subject to basic USB
1153 * hardware flow control.
1154 *
1155 * Caller needs to have set up the endpoints and USB function in @dev
1156 * before calling this, as well as the appropriate (speed-specific)
1157 * endpoint descriptors, and also have allocate @port_num by calling
1158 * @gserial_alloc_line().
1159 *
1160 * Returns negative errno or zero.
1161 * On success, ep->driver_data will be overwritten.
1162 */
1163 int gserial_connect(struct gserial *gser, u8 port_num)
1164 {
1165 struct gs_port *port;
1166 unsigned long flags;
1167 int status;
1168
1169 if (port_num >= MAX_U_SERIAL_PORTS)
1170 return -ENXIO;
1171
1172 port = ports[port_num].port;
1173 if (!port) {
1174 pr_err("serial line %d not allocated.\n", port_num);
1175 return -EINVAL;
1176 }
1177 if (port->port_usb) {
1178 pr_err("serial line %d is in use.\n", port_num);
1179 return -EBUSY;
1180 }
1181
1182 /* activate the endpoints */
1183 status = usb_ep_enable(gser->in);
1184 if (status < 0)
1185 return status;
1186 gser->in->driver_data = port;
1187
1188 status = usb_ep_enable(gser->out);
1189 if (status < 0)
1190 goto fail_out;
1191 gser->out->driver_data = port;
1192
1193 /* then tell the tty glue that I/O can work */
1194 spin_lock_irqsave(&port->port_lock, flags);
1195 gser->ioport = port;
1196 port->port_usb = gser;
1197
1198 /* REVISIT unclear how best to handle this state...
1199 * we don't really couple it with the Linux TTY.
1200 */
1201 gser->port_line_coding = port->port_line_coding;
1202
1203 /* REVISIT if waiting on "carrier detect", signal. */
1204
1205 /* if it's already open, start I/O ... and notify the serial
1206 * protocol about open/close status (connect/disconnect).
1207 */
1208 if (port->port.count) {
1209 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1210 gs_start_io(port);
1211 if (gser->connect)
1212 gser->connect(gser);
1213 } else {
1214 if (gser->disconnect)
1215 gser->disconnect(gser);
1216 }
1217
1218 spin_unlock_irqrestore(&port->port_lock, flags);
1219
1220 return status;
1221
1222 fail_out:
1223 usb_ep_disable(gser->in);
1224 gser->in->driver_data = NULL;
1225 return status;
1226 }
1227 EXPORT_SYMBOL_GPL(gserial_connect);
1228 /**
1229 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1230 * @gser: the function, on which gserial_connect() was called
1231 * Context: any (usually from irq)
1232 *
1233 * This is called to deactivate endpoints and let the TTY layer know
1234 * that the connection went inactive ... not unlike "hangup".
1235 *
1236 * On return, the state is as if gserial_connect() had never been called;
1237 * there is no active USB I/O on these endpoints.
1238 */
1239 void gserial_disconnect(struct gserial *gser)
1240 {
1241 struct gs_port *port = gser->ioport;
1242 unsigned long flags;
1243
1244 if (!port)
1245 return;
1246
1247 /* tell the TTY glue not to do I/O here any more */
1248 spin_lock_irqsave(&port->port_lock, flags);
1249
1250 /* REVISIT as above: how best to track this? */
1251 port->port_line_coding = gser->port_line_coding;
1252
1253 port->port_usb = NULL;
1254 gser->ioport = NULL;
1255 if (port->port.count > 0 || port->openclose) {
1256 wake_up_interruptible(&port->drain_wait);
1257 if (port->port.tty)
1258 tty_hangup(port->port.tty);
1259 }
1260 spin_unlock_irqrestore(&port->port_lock, flags);
1261
1262 /* disable endpoints, aborting down any active I/O */
1263 usb_ep_disable(gser->out);
1264 gser->out->driver_data = NULL;
1265
1266 usb_ep_disable(gser->in);
1267 gser->in->driver_data = NULL;
1268
1269 /* finally, free any unused/unusable I/O buffers */
1270 spin_lock_irqsave(&port->port_lock, flags);
1271 if (port->port.count == 0 && !port->openclose)
1272 gs_buf_free(&port->port_write_buf);
1273 gs_free_requests(gser->out, &port->read_pool, NULL);
1274 gs_free_requests(gser->out, &port->read_queue, NULL);
1275 gs_free_requests(gser->in, &port->write_pool, NULL);
1276
1277 port->read_allocated = port->read_started =
1278 port->write_allocated = port->write_started = 0;
1279
1280 spin_unlock_irqrestore(&port->port_lock, flags);
1281 }
1282 EXPORT_SYMBOL_GPL(gserial_disconnect);
1283
1284 static int userial_init(void)
1285 {
1286 unsigned i;
1287 int status;
1288
1289 gs_tty_driver = alloc_tty_driver(MAX_U_SERIAL_PORTS);
1290 if (!gs_tty_driver)
1291 return -ENOMEM;
1292
1293 gs_tty_driver->driver_name = "g_serial";
1294 gs_tty_driver->name = "ttyGS";
1295 /* uses dynamically assigned dev_t values */
1296
1297 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1298 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1299 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1300 gs_tty_driver->init_termios = tty_std_termios;
1301
1302 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1303 * MS-Windows. Otherwise, most of these flags shouldn't affect
1304 * anything unless we were to actually hook up to a serial line.
1305 */
1306 gs_tty_driver->init_termios.c_cflag =
1307 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1308 gs_tty_driver->init_termios.c_ispeed = 9600;
1309 gs_tty_driver->init_termios.c_ospeed = 9600;
1310
1311 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1312 for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
1313 mutex_init(&ports[i].lock);
1314
1315 /* export the driver ... */
1316 status = tty_register_driver(gs_tty_driver);
1317 if (status) {
1318 pr_err("%s: cannot register, err %d\n",
1319 __func__, status);
1320 goto fail;
1321 }
1322
1323 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1324 MAX_U_SERIAL_PORTS,
1325 (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
1326
1327 return status;
1328 fail:
1329 put_tty_driver(gs_tty_driver);
1330 gs_tty_driver = NULL;
1331 return status;
1332 }
1333 module_init(userial_init);
1334
1335 static void userial_cleanup(void)
1336 {
1337 tty_unregister_driver(gs_tty_driver);
1338 put_tty_driver(gs_tty_driver);
1339 gs_tty_driver = NULL;
1340 }
1341 module_exit(userial_cleanup);
1342
1343 MODULE_LICENSE("GPL");
This page took 0.064898 seconds and 5 git commands to generate.