2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
13 #ifdef CONFIG_NOCONFIG_CHAN
14 static void *not_configged_init(char *str
, int device
,
15 const struct chan_opts
*opts
)
17 printk(KERN_ERR
"Using a channel type which is configured out of "
22 static int not_configged_open(int input
, int output
, int primary
, void *data
,
25 printk(KERN_ERR
"Using a channel type which is configured out of "
30 static void not_configged_close(int fd
, void *data
)
32 printk(KERN_ERR
"Using a channel type which is configured out of "
36 static int not_configged_read(int fd
, char *c_out
, void *data
)
38 printk(KERN_ERR
"Using a channel type which is configured out of "
43 static int not_configged_write(int fd
, const char *buf
, int len
, void *data
)
45 printk(KERN_ERR
"Using a channel type which is configured out of "
50 static int not_configged_console_write(int fd
, const char *buf
, int len
)
52 printk(KERN_ERR
"Using a channel type which is configured out of "
57 static int not_configged_window_size(int fd
, void *data
, unsigned short *rows
,
60 printk(KERN_ERR
"Using a channel type which is configured out of "
65 static void not_configged_free(void *data
)
67 printk(KERN_ERR
"Using a channel type which is configured out of "
71 static const struct chan_ops not_configged_ops
= {
72 .init
= not_configged_init
,
73 .open
= not_configged_open
,
74 .close
= not_configged_close
,
75 .read
= not_configged_read
,
76 .write
= not_configged_write
,
77 .console_write
= not_configged_console_write
,
78 .window_size
= not_configged_window_size
,
79 .free
= not_configged_free
,
82 #endif /* CONFIG_NOCONFIG_CHAN */
84 static void tty_receive_char(struct tty_struct
*tty
, char ch
)
89 if (I_IXON(tty
) && !I_IXOFF(tty
) && !tty
->raw
) {
90 if (ch
== STOP_CHAR(tty
)) {
94 else if (ch
== START_CHAR(tty
)) {
100 tty_insert_flip_char(tty
, ch
, TTY_NORMAL
);
103 static int open_one_chan(struct chan
*chan
)
110 if (chan
->ops
->open
== NULL
)
112 else fd
= (*chan
->ops
->open
)(chan
->input
, chan
->output
, chan
->primary
,
113 chan
->data
, &chan
->dev
);
117 err
= os_set_fd_block(fd
, 0);
119 (*chan
->ops
->close
)(fd
, chan
->data
);
129 static int open_chan(struct list_head
*chans
)
131 struct list_head
*ele
;
135 list_for_each(ele
, chans
) {
136 chan
= list_entry(ele
, struct chan
, list
);
137 ret
= open_one_chan(chan
);
144 void chan_enable_winch(struct chan
*chan
, struct tty_struct
*tty
)
146 if (chan
&& chan
->primary
&& chan
->ops
->winch
)
147 register_winch(chan
->fd
, tty
);
150 static void line_timer_cb(struct work_struct
*work
)
152 struct line
*line
= container_of(work
, struct line
, task
.work
);
154 if (!line
->throttled
)
155 chan_interrupt(line
, line
->tty
, line
->driver
->read_irq
);
158 int enable_chan(struct line
*line
)
160 struct list_head
*ele
;
164 INIT_DELAYED_WORK(&line
->task
, line_timer_cb
);
166 list_for_each(ele
, &line
->chan_list
) {
167 chan
= list_entry(ele
, struct chan
, list
);
168 err
= open_one_chan(chan
);
178 err
= line_setup_irq(chan
->fd
, chan
->input
, chan
->output
, line
,
193 /* Items are added in IRQ context, when free_irq can't be called, and
194 * removed in process context, when it can.
195 * This handles interrupt sources which disappear, and which need to
196 * be permanently disabled. This is discovered in IRQ context, but
197 * the freeing of the IRQ must be done later.
199 static DEFINE_SPINLOCK(irqs_to_free_lock
);
200 static LIST_HEAD(irqs_to_free
);
206 struct list_head
*ele
;
209 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
210 list_splice_init(&irqs_to_free
, &list
);
211 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
213 list_for_each(ele
, &list
) {
214 chan
= list_entry(ele
, struct chan
, free_list
);
216 if (chan
->input
&& chan
->enabled
)
217 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
218 if (chan
->output
&& chan
->enabled
)
219 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
224 static void close_one_chan(struct chan
*chan
, int delay_free_irq
)
231 if (delay_free_irq
) {
232 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
233 list_add(&chan
->free_list
, &irqs_to_free
);
234 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
237 if (chan
->input
&& chan
->enabled
)
238 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
239 if (chan
->output
&& chan
->enabled
)
240 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
243 if (chan
->ops
->close
!= NULL
)
244 (*chan
->ops
->close
)(chan
->fd
, chan
->data
);
250 void close_chan(struct line
*line
)
254 /* Close in reverse order as open in case more than one of them
255 * refers to the same device and they save and restore that device's
256 * state. Then, the first one opened will have the original state,
257 * so it must be the last closed.
259 list_for_each_entry_reverse(chan
, &line
->chan_list
, list
) {
260 close_one_chan(chan
, 0);
264 void deactivate_chan(struct chan
*chan
, int irq
)
266 if (chan
&& chan
->enabled
)
267 deactivate_fd(chan
->fd
, irq
);
270 void reactivate_chan(struct chan
*chan
, int irq
)
272 if (chan
&& chan
->enabled
)
273 reactivate_fd(chan
->fd
, irq
);
276 int write_chan(struct chan
*chan
, const char *buf
, int len
,
281 if (len
== 0 || !chan
|| !chan
->ops
->write
)
284 n
= chan
->ops
->write(chan
->fd
, buf
, len
, chan
->data
);
287 if ((ret
== -EAGAIN
) || ((ret
>= 0) && (ret
< len
)))
288 reactivate_fd(chan
->fd
, write_irq
);
293 int console_write_chan(struct chan
*chan
, const char *buf
, int len
)
297 if (!chan
|| !chan
->ops
->console_write
)
300 n
= chan
->ops
->console_write(chan
->fd
, buf
, len
);
306 int console_open_chan(struct line
*line
, struct console
*co
)
310 err
= open_chan(&line
->chan_list
);
314 printk(KERN_INFO
"Console initialized on /dev/%s%d\n", co
->name
,
319 int chan_window_size(struct line
*line
, unsigned short *rows_out
,
320 unsigned short *cols_out
)
324 chan
= line
->chan_in
;
325 if (chan
&& chan
->primary
) {
326 if (chan
->ops
->window_size
== NULL
)
328 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
331 chan
= line
->chan_out
;
332 if (chan
&& chan
->primary
) {
333 if (chan
->ops
->window_size
== NULL
)
335 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
341 static void free_one_chan(struct chan
*chan
)
343 list_del(&chan
->list
);
345 close_one_chan(chan
, 0);
347 if (chan
->ops
->free
!= NULL
)
348 (*chan
->ops
->free
)(chan
->data
);
350 if (chan
->primary
&& chan
->output
)
351 ignore_sigio_fd(chan
->fd
);
355 static void free_chan(struct list_head
*chans
)
357 struct list_head
*ele
, *next
;
360 list_for_each_safe(ele
, next
, chans
) {
361 chan
= list_entry(ele
, struct chan
, list
);
366 static int one_chan_config_string(struct chan
*chan
, char *str
, int size
,
372 CONFIG_CHUNK(str
, size
, n
, "none", 1);
376 CONFIG_CHUNK(str
, size
, n
, chan
->ops
->type
, 0);
378 if (chan
->dev
== NULL
) {
379 CONFIG_CHUNK(str
, size
, n
, "", 1);
383 CONFIG_CHUNK(str
, size
, n
, ":", 0);
384 CONFIG_CHUNK(str
, size
, n
, chan
->dev
, 0);
389 static int chan_pair_config_string(struct chan
*in
, struct chan
*out
,
390 char *str
, int size
, char **error_out
)
394 n
= one_chan_config_string(in
, str
, size
, error_out
);
399 CONFIG_CHUNK(str
, size
, n
, "", 1);
403 CONFIG_CHUNK(str
, size
, n
, ",", 1);
404 n
= one_chan_config_string(out
, str
, size
, error_out
);
407 CONFIG_CHUNK(str
, size
, n
, "", 1);
412 int chan_config_string(struct line
*line
, char *str
, int size
,
415 struct chan
*in
= line
->chan_in
, *out
= line
->chan_out
;
417 if (in
&& !in
->primary
)
419 if (out
&& !out
->primary
)
422 return chan_pair_config_string(in
, out
, str
, size
, error_out
);
427 const struct chan_ops
*ops
;
430 static const struct chan_type chan_table
[] = {
433 #ifdef CONFIG_NULL_CHAN
434 { "null", &null_ops
},
436 { "null", ¬_configged_ops
},
439 #ifdef CONFIG_PORT_CHAN
440 { "port", &port_ops
},
442 { "port", ¬_configged_ops
},
445 #ifdef CONFIG_PTY_CHAN
449 { "pty", ¬_configged_ops
},
450 { "pts", ¬_configged_ops
},
453 #ifdef CONFIG_TTY_CHAN
456 { "tty", ¬_configged_ops
},
459 #ifdef CONFIG_XTERM_CHAN
460 { "xterm", &xterm_ops
},
462 { "xterm", ¬_configged_ops
},
466 static struct chan
*parse_chan(struct line
*line
, char *str
, int device
,
467 const struct chan_opts
*opts
, char **error_out
)
469 const struct chan_type
*entry
;
470 const struct chan_ops
*ops
;
477 for(i
= 0; i
< ARRAY_SIZE(chan_table
); i
++) {
478 entry
= &chan_table
[i
];
479 if (!strncmp(str
, entry
->key
, strlen(entry
->key
))) {
481 str
+= strlen(entry
->key
);
486 *error_out
= "No match for configured backends";
490 data
= (*ops
->init
)(str
, device
, opts
);
492 *error_out
= "Configuration failed";
496 chan
= kmalloc(sizeof(*chan
), GFP_ATOMIC
);
498 *error_out
= "Memory allocation failed";
501 *chan
= ((struct chan
) { .list
= LIST_HEAD_INIT(chan
->list
),
503 LIST_HEAD_INIT(chan
->free_list
),
516 int parse_chan_pair(char *str
, struct line
*line
, int device
,
517 const struct chan_opts
*opts
, char **error_out
)
519 struct list_head
*chans
= &line
->chan_list
;
523 if (!list_empty(chans
)) {
524 line
->chan_in
= line
->chan_out
= NULL
;
526 INIT_LIST_HEAD(chans
);
532 out
= strchr(str
, ',');
537 new = parse_chan(line
, in
, device
, opts
, error_out
);
542 list_add(&new->list
, chans
);
545 new = parse_chan(line
, out
, device
, opts
, error_out
);
549 list_add(&new->list
, chans
);
551 line
->chan_out
= new;
554 new = parse_chan(line
, str
, device
, opts
, error_out
);
558 list_add(&new->list
, chans
);
561 line
->chan_in
= line
->chan_out
= new;
566 void chan_interrupt(struct line
*line
, struct tty_struct
*tty
, int irq
)
568 struct chan
*chan
= line
->chan_in
;
572 if (!chan
|| !chan
->ops
->read
)
576 if (tty
&& !tty_buffer_request_room(tty
, 1)) {
577 schedule_delayed_work(&line
->task
, 1);
580 err
= chan
->ops
->read(chan
->fd
, &c
, chan
->data
);
582 tty_receive_char(tty
, c
);
586 reactivate_fd(chan
->fd
, irq
);
591 if (line
->chan_out
!= chan
)
592 close_one_chan(line
->chan_out
, 1);
594 close_one_chan(chan
, 1);
600 tty_flip_buffer_push(tty
);