serial: jsm: Convert jsm_printk to jsm_dbg
[deliverable/linux.git] / drivers / tty / serial / jsm / jsm_tty.c
1 /************************************************************************
2 * Copyright 2003 Digi International (www.digi.com)
3 *
4 * Copyright (C) 2004 IBM Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
19 * MA 02111-1307, USA.
20 *
21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Ananda Venkatarman <mansarov@us.ibm.com>
24 * Modifications:
25 * 01/19/06: changed jsm_input routine to use the dynamically allocated
26 * tty_buffer changes. Contributors: Scott Kilau and Ananda V.
27 ***********************************************************************/
28 #include <linux/tty.h>
29 #include <linux/tty_flip.h>
30 #include <linux/serial_reg.h>
31 #include <linux/delay.h> /* For udelay */
32 #include <linux/pci.h>
33 #include <linux/slab.h>
34
35 #include "jsm.h"
36
37 static DECLARE_BITMAP(linemap, MAXLINES);
38
39 static void jsm_carrier(struct jsm_channel *ch);
40
41 static inline int jsm_get_mstat(struct jsm_channel *ch)
42 {
43 unsigned char mstat;
44 unsigned result;
45
46 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n");
47
48 mstat = (ch->ch_mostat | ch->ch_mistat);
49
50 result = 0;
51
52 if (mstat & UART_MCR_DTR)
53 result |= TIOCM_DTR;
54 if (mstat & UART_MCR_RTS)
55 result |= TIOCM_RTS;
56 if (mstat & UART_MSR_CTS)
57 result |= TIOCM_CTS;
58 if (mstat & UART_MSR_DSR)
59 result |= TIOCM_DSR;
60 if (mstat & UART_MSR_RI)
61 result |= TIOCM_RI;
62 if (mstat & UART_MSR_DCD)
63 result |= TIOCM_CD;
64
65 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n");
66 return result;
67 }
68
69 static unsigned int jsm_tty_tx_empty(struct uart_port *port)
70 {
71 return TIOCSER_TEMT;
72 }
73
74 /*
75 * Return modem signals to ld.
76 */
77 static unsigned int jsm_tty_get_mctrl(struct uart_port *port)
78 {
79 int result;
80 struct jsm_channel *channel = (struct jsm_channel *)port;
81
82 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
83
84 result = jsm_get_mstat(channel);
85
86 if (result < 0)
87 return -ENXIO;
88
89 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
90
91 return result;
92 }
93
94 /*
95 * jsm_set_modem_info()
96 *
97 * Set modem signals, called by ld.
98 */
99 static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
100 {
101 struct jsm_channel *channel = (struct jsm_channel *)port;
102
103 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
104
105 if (mctrl & TIOCM_RTS)
106 channel->ch_mostat |= UART_MCR_RTS;
107 else
108 channel->ch_mostat &= ~UART_MCR_RTS;
109
110 if (mctrl & TIOCM_DTR)
111 channel->ch_mostat |= UART_MCR_DTR;
112 else
113 channel->ch_mostat &= ~UART_MCR_DTR;
114
115 channel->ch_bd->bd_ops->assert_modem_signals(channel);
116
117 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
118 udelay(10);
119 }
120
121 /*
122 * jsm_tty_write()
123 *
124 * Take data from the user or kernel and send it out to the FEP.
125 * In here exists all the Transparent Print magic as well.
126 */
127 static void jsm_tty_write(struct uart_port *port)
128 {
129 struct jsm_channel *channel;
130 channel = container_of(port, struct jsm_channel, uart_port);
131 channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
132 }
133
134 static void jsm_tty_start_tx(struct uart_port *port)
135 {
136 struct jsm_channel *channel = (struct jsm_channel *)port;
137
138 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
139
140 channel->ch_flags &= ~(CH_STOP);
141 jsm_tty_write(port);
142
143 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
144 }
145
146 static void jsm_tty_stop_tx(struct uart_port *port)
147 {
148 struct jsm_channel *channel = (struct jsm_channel *)port;
149
150 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
151
152 channel->ch_flags |= (CH_STOP);
153
154 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
155 }
156
157 static void jsm_tty_send_xchar(struct uart_port *port, char ch)
158 {
159 unsigned long lock_flags;
160 struct jsm_channel *channel = (struct jsm_channel *)port;
161 struct ktermios *termios;
162
163 spin_lock_irqsave(&port->lock, lock_flags);
164 termios = &port->state->port.tty->termios;
165 if (ch == termios->c_cc[VSTART])
166 channel->ch_bd->bd_ops->send_start_character(channel);
167
168 if (ch == termios->c_cc[VSTOP])
169 channel->ch_bd->bd_ops->send_stop_character(channel);
170 spin_unlock_irqrestore(&port->lock, lock_flags);
171 }
172
173 static void jsm_tty_stop_rx(struct uart_port *port)
174 {
175 struct jsm_channel *channel = (struct jsm_channel *)port;
176
177 channel->ch_bd->bd_ops->disable_receiver(channel);
178 }
179
180 static void jsm_tty_enable_ms(struct uart_port *port)
181 {
182 /* Nothing needed */
183 }
184
185 static void jsm_tty_break(struct uart_port *port, int break_state)
186 {
187 unsigned long lock_flags;
188 struct jsm_channel *channel = (struct jsm_channel *)port;
189
190 spin_lock_irqsave(&port->lock, lock_flags);
191 if (break_state == -1)
192 channel->ch_bd->bd_ops->send_break(channel);
193 else
194 channel->ch_bd->bd_ops->clear_break(channel, 0);
195
196 spin_unlock_irqrestore(&port->lock, lock_flags);
197 }
198
199 static int jsm_tty_open(struct uart_port *port)
200 {
201 struct jsm_board *brd;
202 struct jsm_channel *channel = (struct jsm_channel *)port;
203 struct ktermios *termios;
204
205 /* Get board pointer from our array of majors we have allocated */
206 brd = channel->ch_bd;
207
208 /*
209 * Allocate channel buffers for read/write/error.
210 * Set flag, so we don't get trounced on.
211 */
212 channel->ch_flags |= (CH_OPENING);
213
214 /* Drop locks, as malloc with GFP_KERNEL can sleep */
215
216 if (!channel->ch_rqueue) {
217 channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
218 if (!channel->ch_rqueue) {
219 jsm_dbg(INIT, &channel->ch_bd->pci_dev,
220 "unable to allocate read queue buf\n");
221 return -ENOMEM;
222 }
223 }
224 if (!channel->ch_equeue) {
225 channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
226 if (!channel->ch_equeue) {
227 jsm_dbg(INIT, &channel->ch_bd->pci_dev,
228 "unable to allocate error queue buf\n");
229 return -ENOMEM;
230 }
231 }
232
233 channel->ch_flags &= ~(CH_OPENING);
234 /*
235 * Initialize if neither terminal is open.
236 */
237 jsm_dbg(OPEN, &channel->ch_bd->pci_dev,
238 "jsm_open: initializing channel in open...\n");
239
240 /*
241 * Flush input queues.
242 */
243 channel->ch_r_head = channel->ch_r_tail = 0;
244 channel->ch_e_head = channel->ch_e_tail = 0;
245
246 brd->bd_ops->flush_uart_write(channel);
247 brd->bd_ops->flush_uart_read(channel);
248
249 channel->ch_flags = 0;
250 channel->ch_cached_lsr = 0;
251 channel->ch_stops_sent = 0;
252
253 termios = &port->state->port.tty->termios;
254 channel->ch_c_cflag = termios->c_cflag;
255 channel->ch_c_iflag = termios->c_iflag;
256 channel->ch_c_oflag = termios->c_oflag;
257 channel->ch_c_lflag = termios->c_lflag;
258 channel->ch_startc = termios->c_cc[VSTART];
259 channel->ch_stopc = termios->c_cc[VSTOP];
260
261 /* Tell UART to init itself */
262 brd->bd_ops->uart_init(channel);
263
264 /*
265 * Run param in case we changed anything
266 */
267 brd->bd_ops->param(channel);
268
269 jsm_carrier(channel);
270
271 channel->ch_open_count++;
272
273 jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
274 return 0;
275 }
276
277 static void jsm_tty_close(struct uart_port *port)
278 {
279 struct jsm_board *bd;
280 struct ktermios *ts;
281 struct jsm_channel *channel = (struct jsm_channel *)port;
282
283 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n");
284
285 bd = channel->ch_bd;
286 ts = &port->state->port.tty->termios;
287
288 channel->ch_flags &= ~(CH_STOPI);
289
290 channel->ch_open_count--;
291
292 /*
293 * If we have HUPCL set, lower DTR and RTS
294 */
295 if (channel->ch_c_cflag & HUPCL) {
296 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev,
297 "Close. HUPCL set, dropping DTR/RTS\n");
298
299 /* Drop RTS/DTR */
300 channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
301 bd->bd_ops->assert_modem_signals(channel);
302 }
303
304 /* Turn off UART interrupts for this port */
305 channel->ch_bd->bd_ops->uart_off(channel);
306
307 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "finish\n");
308 }
309
310 static void jsm_tty_set_termios(struct uart_port *port,
311 struct ktermios *termios,
312 struct ktermios *old_termios)
313 {
314 unsigned long lock_flags;
315 struct jsm_channel *channel = (struct jsm_channel *)port;
316
317 spin_lock_irqsave(&port->lock, lock_flags);
318 channel->ch_c_cflag = termios->c_cflag;
319 channel->ch_c_iflag = termios->c_iflag;
320 channel->ch_c_oflag = termios->c_oflag;
321 channel->ch_c_lflag = termios->c_lflag;
322 channel->ch_startc = termios->c_cc[VSTART];
323 channel->ch_stopc = termios->c_cc[VSTOP];
324
325 channel->ch_bd->bd_ops->param(channel);
326 jsm_carrier(channel);
327 spin_unlock_irqrestore(&port->lock, lock_flags);
328 }
329
330 static const char *jsm_tty_type(struct uart_port *port)
331 {
332 return "jsm";
333 }
334
335 static void jsm_tty_release_port(struct uart_port *port)
336 {
337 }
338
339 static int jsm_tty_request_port(struct uart_port *port)
340 {
341 return 0;
342 }
343
344 static void jsm_config_port(struct uart_port *port, int flags)
345 {
346 port->type = PORT_JSM;
347 }
348
349 static struct uart_ops jsm_ops = {
350 .tx_empty = jsm_tty_tx_empty,
351 .set_mctrl = jsm_tty_set_mctrl,
352 .get_mctrl = jsm_tty_get_mctrl,
353 .stop_tx = jsm_tty_stop_tx,
354 .start_tx = jsm_tty_start_tx,
355 .send_xchar = jsm_tty_send_xchar,
356 .stop_rx = jsm_tty_stop_rx,
357 .enable_ms = jsm_tty_enable_ms,
358 .break_ctl = jsm_tty_break,
359 .startup = jsm_tty_open,
360 .shutdown = jsm_tty_close,
361 .set_termios = jsm_tty_set_termios,
362 .type = jsm_tty_type,
363 .release_port = jsm_tty_release_port,
364 .request_port = jsm_tty_request_port,
365 .config_port = jsm_config_port,
366 };
367
368 /*
369 * jsm_tty_init()
370 *
371 * Init the tty subsystem. Called once per board after board has been
372 * downloaded and init'ed.
373 */
374 int __devinit jsm_tty_init(struct jsm_board *brd)
375 {
376 int i;
377 void __iomem *vaddr;
378 struct jsm_channel *ch;
379
380 if (!brd)
381 return -ENXIO;
382
383 jsm_dbg(INIT, &brd->pci_dev, "start\n");
384
385 /*
386 * Initialize board structure elements.
387 */
388
389 brd->nasync = brd->maxports;
390
391 /*
392 * Allocate channel memory that might not have been allocated
393 * when the driver was first loaded.
394 */
395 for (i = 0; i < brd->nasync; i++) {
396 if (!brd->channels[i]) {
397
398 /*
399 * Okay to malloc with GFP_KERNEL, we are not at
400 * interrupt context, and there are no locks held.
401 */
402 brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL);
403 if (!brd->channels[i]) {
404 jsm_dbg(CORE, &brd->pci_dev,
405 "%s:%d Unable to allocate memory for channel struct\n",
406 __FILE__, __LINE__);
407 }
408 }
409 }
410
411 ch = brd->channels[0];
412 vaddr = brd->re_map_membase;
413
414 /* Set up channel variables */
415 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
416
417 if (!brd->channels[i])
418 continue;
419
420 spin_lock_init(&ch->ch_lock);
421
422 if (brd->bd_uart_offset == 0x200)
423 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
424
425 ch->ch_bd = brd;
426 ch->ch_portnum = i;
427
428 /* .25 second delay */
429 ch->ch_close_delay = 250;
430
431 init_waitqueue_head(&ch->ch_flags_wait);
432 }
433
434 jsm_dbg(INIT, &brd->pci_dev, "finish\n");
435 return 0;
436 }
437
438 int jsm_uart_port_init(struct jsm_board *brd)
439 {
440 int i, rc;
441 unsigned int line;
442 struct jsm_channel *ch;
443
444 if (!brd)
445 return -ENXIO;
446
447 jsm_dbg(INIT, &brd->pci_dev, "start\n");
448
449 /*
450 * Initialize board structure elements.
451 */
452
453 brd->nasync = brd->maxports;
454
455 /* Set up channel variables */
456 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
457
458 if (!brd->channels[i])
459 continue;
460
461 brd->channels[i]->uart_port.irq = brd->irq;
462 brd->channels[i]->uart_port.uartclk = 14745600;
463 brd->channels[i]->uart_port.type = PORT_JSM;
464 brd->channels[i]->uart_port.iotype = UPIO_MEM;
465 brd->channels[i]->uart_port.membase = brd->re_map_membase;
466 brd->channels[i]->uart_port.fifosize = 16;
467 brd->channels[i]->uart_port.ops = &jsm_ops;
468 line = find_first_zero_bit(linemap, MAXLINES);
469 if (line >= MAXLINES) {
470 printk(KERN_INFO "jsm: linemap is full, added device failed\n");
471 continue;
472 } else
473 set_bit(line, linemap);
474 brd->channels[i]->uart_port.line = line;
475 rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
476 if (rc){
477 printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
478 return rc;
479 }
480 else
481 printk(KERN_INFO "jsm: Port %d added\n", i);
482 }
483
484 jsm_dbg(INIT, &brd->pci_dev, "finish\n");
485 return 0;
486 }
487
488 int jsm_remove_uart_port(struct jsm_board *brd)
489 {
490 int i;
491 struct jsm_channel *ch;
492
493 if (!brd)
494 return -ENXIO;
495
496 jsm_dbg(INIT, &brd->pci_dev, "start\n");
497
498 /*
499 * Initialize board structure elements.
500 */
501
502 brd->nasync = brd->maxports;
503
504 /* Set up channel variables */
505 for (i = 0; i < brd->nasync; i++) {
506
507 if (!brd->channels[i])
508 continue;
509
510 ch = brd->channels[i];
511
512 clear_bit(ch->uart_port.line, linemap);
513 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
514 }
515
516 jsm_dbg(INIT, &brd->pci_dev, "finish\n");
517 return 0;
518 }
519
520 void jsm_input(struct jsm_channel *ch)
521 {
522 struct jsm_board *bd;
523 struct tty_struct *tp;
524 u32 rmask;
525 u16 head;
526 u16 tail;
527 int data_len;
528 unsigned long lock_flags;
529 int len = 0;
530 int n = 0;
531 int s = 0;
532 int i = 0;
533
534 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n");
535
536 if (!ch)
537 return;
538
539 tp = ch->uart_port.state->port.tty;
540
541 bd = ch->ch_bd;
542 if(!bd)
543 return;
544
545 spin_lock_irqsave(&ch->ch_lock, lock_flags);
546
547 /*
548 *Figure the number of characters in the buffer.
549 *Exit immediately if none.
550 */
551
552 rmask = RQUEUEMASK;
553
554 head = ch->ch_r_head & rmask;
555 tail = ch->ch_r_tail & rmask;
556
557 data_len = (head - tail) & rmask;
558 if (data_len == 0) {
559 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
560 return;
561 }
562
563 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n");
564
565 /*
566 *If the device is not open, or CREAD is off, flush
567 *input data and return immediately.
568 */
569 if (!tp ||
570 !(tp->termios.c_cflag & CREAD) ) {
571
572 jsm_dbg(READ, &ch->ch_bd->pci_dev,
573 "input. dropping %d bytes on port %d...\n",
574 data_len, ch->ch_portnum);
575 ch->ch_r_head = tail;
576
577 /* Force queue flow control to be released, if needed */
578 jsm_check_queue_flow_control(ch);
579
580 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
581 return;
582 }
583
584 /*
585 * If we are throttled, simply don't read any data.
586 */
587 if (ch->ch_flags & CH_STOPI) {
588 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
589 jsm_dbg(READ, &ch->ch_bd->pci_dev,
590 "Port %d throttled, not reading any data. head: %x tail: %x\n",
591 ch->ch_portnum, head, tail);
592 return;
593 }
594
595 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n");
596
597 if (data_len <= 0) {
598 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
599 jsm_dbg(READ, &ch->ch_bd->pci_dev, "jsm_input 1\n");
600 return;
601 }
602
603 len = tty_buffer_request_room(tp, data_len);
604 n = len;
605
606 /*
607 * n now contains the most amount of data we can copy,
608 * bounded either by the flip buffer size or the amount
609 * of data the card actually has pending...
610 */
611 while (n) {
612 s = ((head >= tail) ? head : RQUEUESIZE) - tail;
613 s = min(s, n);
614
615 if (s <= 0)
616 break;
617
618 /*
619 * If conditions are such that ld needs to see all
620 * UART errors, we will have to walk each character
621 * and error byte and send them to the buffer one at
622 * a time.
623 */
624
625 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
626 for (i = 0; i < s; i++) {
627 /*
628 * Give the Linux ld the flags in the
629 * format it likes.
630 */
631 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
632 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK);
633 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
634 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
635 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
636 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
637 else
638 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
639 }
640 } else {
641 tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
642 }
643 tail += s;
644 n -= s;
645 /* Flip queue if needed */
646 tail &= rmask;
647 }
648
649 ch->ch_r_tail = tail & rmask;
650 ch->ch_e_tail = tail & rmask;
651 jsm_check_queue_flow_control(ch);
652 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
653
654 /* Tell the tty layer its okay to "eat" the data now */
655 tty_flip_buffer_push(tp);
656
657 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n");
658 }
659
660 static void jsm_carrier(struct jsm_channel *ch)
661 {
662 struct jsm_board *bd;
663
664 int virt_carrier = 0;
665 int phys_carrier = 0;
666
667 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n");
668 if (!ch)
669 return;
670
671 bd = ch->ch_bd;
672
673 if (!bd)
674 return;
675
676 if (ch->ch_mistat & UART_MSR_DCD) {
677 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n",
678 ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD);
679 phys_carrier = 1;
680 }
681
682 if (ch->ch_c_cflag & CLOCAL)
683 virt_carrier = 1;
684
685 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n",
686 phys_carrier, virt_carrier);
687
688 /*
689 * Test for a VIRTUAL carrier transition to HIGH.
690 */
691 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
692
693 /*
694 * When carrier rises, wake any threads waiting
695 * for carrier in the open routine.
696 */
697
698 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n");
699
700 if (waitqueue_active(&(ch->ch_flags_wait)))
701 wake_up_interruptible(&ch->ch_flags_wait);
702 }
703
704 /*
705 * Test for a PHYSICAL carrier transition to HIGH.
706 */
707 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
708
709 /*
710 * When carrier rises, wake any threads waiting
711 * for carrier in the open routine.
712 */
713
714 jsm_dbg(CARR, &ch->ch_bd->pci_dev,
715 "carrier: physical DCD rose\n");
716
717 if (waitqueue_active(&(ch->ch_flags_wait)))
718 wake_up_interruptible(&ch->ch_flags_wait);
719 }
720
721 /*
722 * Test for a PHYSICAL transition to low, so long as we aren't
723 * currently ignoring physical transitions (which is what "virtual
724 * carrier" indicates).
725 *
726 * The transition of the virtual carrier to low really doesn't
727 * matter... it really only means "ignore carrier state", not
728 * "make pretend that carrier is there".
729 */
730 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0)
731 && (phys_carrier == 0)) {
732 /*
733 * When carrier drops:
734 *
735 * Drop carrier on all open units.
736 *
737 * Flush queues, waking up any task waiting in the
738 * line discipline.
739 *
740 * Send a hangup to the control terminal.
741 *
742 * Enable all select calls.
743 */
744 if (waitqueue_active(&(ch->ch_flags_wait)))
745 wake_up_interruptible(&ch->ch_flags_wait);
746 }
747
748 /*
749 * Make sure that our cached values reflect the current reality.
750 */
751 if (virt_carrier == 1)
752 ch->ch_flags |= CH_FCAR;
753 else
754 ch->ch_flags &= ~CH_FCAR;
755
756 if (phys_carrier == 1)
757 ch->ch_flags |= CH_CD;
758 else
759 ch->ch_flags &= ~CH_CD;
760 }
761
762
763 void jsm_check_queue_flow_control(struct jsm_channel *ch)
764 {
765 struct board_ops *bd_ops = ch->ch_bd->bd_ops;
766 int qleft;
767
768 /* Store how much space we have left in the queue */
769 if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
770 qleft += RQUEUEMASK + 1;
771
772 /*
773 * Check to see if we should enforce flow control on our queue because
774 * the ld (or user) isn't reading data out of our queue fast enuf.
775 *
776 * NOTE: This is done based on what the current flow control of the
777 * port is set for.
778 *
779 * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
780 * This will cause the UART's FIFO to back up, and force
781 * the RTS signal to be dropped.
782 * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
783 * the other side, in hopes it will stop sending data to us.
784 * 3) NONE - Nothing we can do. We will simply drop any extra data
785 * that gets sent into us when the queue fills up.
786 */
787 if (qleft < 256) {
788 /* HWFLOW */
789 if (ch->ch_c_cflag & CRTSCTS) {
790 if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
791 bd_ops->disable_receiver(ch);
792 ch->ch_flags |= (CH_RECEIVER_OFF);
793 jsm_dbg(READ, &ch->ch_bd->pci_dev,
794 "Internal queue hit hilevel mark (%d)! Turning off interrupts\n",
795 qleft);
796 }
797 }
798 /* SWFLOW */
799 else if (ch->ch_c_iflag & IXOFF) {
800 if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
801 bd_ops->send_stop_character(ch);
802 ch->ch_stops_sent++;
803 jsm_dbg(READ, &ch->ch_bd->pci_dev,
804 "Sending stop char! Times sent: %x\n",
805 ch->ch_stops_sent);
806 }
807 }
808 }
809
810 /*
811 * Check to see if we should unenforce flow control because
812 * ld (or user) finally read enuf data out of our queue.
813 *
814 * NOTE: This is done based on what the current flow control of the
815 * port is set for.
816 *
817 * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
818 * This will cause the UART's FIFO to raise RTS back up,
819 * which will allow the other side to start sending data again.
820 * 2) SWFLOW (IXOFF) - Send a start character to
821 * the other side, so it will start sending data to us again.
822 * 3) NONE - Do nothing. Since we didn't do anything to turn off the
823 * other side, we don't need to do anything now.
824 */
825 if (qleft > (RQUEUESIZE / 2)) {
826 /* HWFLOW */
827 if (ch->ch_c_cflag & CRTSCTS) {
828 if (ch->ch_flags & CH_RECEIVER_OFF) {
829 bd_ops->enable_receiver(ch);
830 ch->ch_flags &= ~(CH_RECEIVER_OFF);
831 jsm_dbg(READ, &ch->ch_bd->pci_dev,
832 "Internal queue hit lowlevel mark (%d)! Turning on interrupts\n",
833 qleft);
834 }
835 }
836 /* SWFLOW */
837 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
838 ch->ch_stops_sent = 0;
839 bd_ops->send_start_character(ch);
840 jsm_dbg(READ, &ch->ch_bd->pci_dev,
841 "Sending start char!\n");
842 }
843 }
844 }
This page took 0.047372 seconds and 5 git commands to generate.