Merge tag 'davinci-fixes-for-v3.15-rc4' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / s390 / char / sclp_vt220.c
1 /*
2 * SCLP VT220 terminal driver.
3 *
4 * Copyright IBM Corp. 2003, 2009
5 *
6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/list.h>
12 #include <linux/wait.h>
13 #include <linux/timer.h>
14 #include <linux/kernel.h>
15 #include <linux/tty.h>
16 #include <linux/tty_driver.h>
17 #include <linux/tty_flip.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/major.h>
21 #include <linux/console.h>
22 #include <linux/kdev_t.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/reboot.h>
26 #include <linux/slab.h>
27
28 #include <asm/uaccess.h>
29 #include "sclp.h"
30
31 #define SCLP_VT220_MAJOR TTY_MAJOR
32 #define SCLP_VT220_MINOR 65
33 #define SCLP_VT220_DRIVER_NAME "sclp_vt220"
34 #define SCLP_VT220_DEVICE_NAME "ttysclp"
35 #define SCLP_VT220_CONSOLE_NAME "ttyS"
36 #define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
37
38 /* Representation of a single write request */
39 struct sclp_vt220_request {
40 struct list_head list;
41 struct sclp_req sclp_req;
42 int retry_count;
43 };
44
45 /* VT220 SCCB */
46 struct sclp_vt220_sccb {
47 struct sccb_header header;
48 struct evbuf_header evbuf;
49 };
50
51 #define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
52 sizeof(struct sclp_vt220_request) - \
53 sizeof(struct sclp_vt220_sccb))
54
55 /* Structures and data needed to register tty driver */
56 static struct tty_driver *sclp_vt220_driver;
57
58 static struct tty_port sclp_vt220_port;
59
60 /* Lock to protect internal data from concurrent access */
61 static spinlock_t sclp_vt220_lock;
62
63 /* List of empty pages to be used as write request buffers */
64 static struct list_head sclp_vt220_empty;
65
66 /* List of pending requests */
67 static struct list_head sclp_vt220_outqueue;
68
69 /* Suspend mode flag */
70 static int sclp_vt220_suspended;
71
72 /* Flag that output queue is currently running */
73 static int sclp_vt220_queue_running;
74
75 /* Timer used for delaying write requests to merge subsequent messages into
76 * a single buffer */
77 static struct timer_list sclp_vt220_timer;
78
79 /* Pointer to current request buffer which has been partially filled but not
80 * yet sent */
81 static struct sclp_vt220_request *sclp_vt220_current_request;
82
83 /* Number of characters in current request buffer */
84 static int sclp_vt220_buffered_chars;
85
86 /* Counter controlling core driver initialization. */
87 static int __initdata sclp_vt220_init_count;
88
89 /* Flag indicating that sclp_vt220_current_request should really
90 * have been already queued but wasn't because the SCLP was processing
91 * another buffer */
92 static int sclp_vt220_flush_later;
93
94 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
95 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
96 enum sclp_pm_event sclp_pm_event);
97 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
98 static void sclp_vt220_emit_current(void);
99
100 /* Registration structure for SCLP output event buffers */
101 static struct sclp_register sclp_vt220_register = {
102 .send_mask = EVTYP_VT220MSG_MASK,
103 .pm_event_fn = sclp_vt220_pm_event_fn,
104 };
105
106 /* Registration structure for SCLP input event buffers */
107 static struct sclp_register sclp_vt220_register_input = {
108 .receive_mask = EVTYP_VT220MSG_MASK,
109 .receiver_fn = sclp_vt220_receiver_fn,
110 };
111
112
113 /*
114 * Put provided request buffer back into queue and check emit pending
115 * buffers if necessary.
116 */
117 static void
118 sclp_vt220_process_queue(struct sclp_vt220_request *request)
119 {
120 unsigned long flags;
121 void *page;
122
123 do {
124 /* Put buffer back to list of empty buffers */
125 page = request->sclp_req.sccb;
126 spin_lock_irqsave(&sclp_vt220_lock, flags);
127 /* Move request from outqueue to empty queue */
128 list_del(&request->list);
129 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
130 /* Check if there is a pending buffer on the out queue. */
131 request = NULL;
132 if (!list_empty(&sclp_vt220_outqueue))
133 request = list_entry(sclp_vt220_outqueue.next,
134 struct sclp_vt220_request, list);
135 if (!request || sclp_vt220_suspended) {
136 sclp_vt220_queue_running = 0;
137 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
138 break;
139 }
140 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
141 } while (__sclp_vt220_emit(request));
142 if (request == NULL && sclp_vt220_flush_later)
143 sclp_vt220_emit_current();
144 tty_port_tty_wakeup(&sclp_vt220_port);
145 }
146
147 #define SCLP_BUFFER_MAX_RETRY 1
148
149 /*
150 * Callback through which the result of a write request is reported by the
151 * SCLP.
152 */
153 static void
154 sclp_vt220_callback(struct sclp_req *request, void *data)
155 {
156 struct sclp_vt220_request *vt220_request;
157 struct sclp_vt220_sccb *sccb;
158
159 vt220_request = (struct sclp_vt220_request *) data;
160 if (request->status == SCLP_REQ_FAILED) {
161 sclp_vt220_process_queue(vt220_request);
162 return;
163 }
164 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
165
166 /* Check SCLP response code and choose suitable action */
167 switch (sccb->header.response_code) {
168 case 0x0020 :
169 break;
170
171 case 0x05f0: /* Target resource in improper state */
172 break;
173
174 case 0x0340: /* Contained SCLP equipment check */
175 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
176 break;
177 /* Remove processed buffers and requeue rest */
178 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
179 /* Not all buffers were processed */
180 sccb->header.response_code = 0x0000;
181 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
182 if (sclp_add_request(request) == 0)
183 return;
184 }
185 break;
186
187 case 0x0040: /* SCLP equipment check */
188 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
189 break;
190 sccb->header.response_code = 0x0000;
191 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
192 if (sclp_add_request(request) == 0)
193 return;
194 break;
195
196 default:
197 break;
198 }
199 sclp_vt220_process_queue(vt220_request);
200 }
201
202 /*
203 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
204 * otherwise.
205 */
206 static int
207 __sclp_vt220_emit(struct sclp_vt220_request *request)
208 {
209 if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
210 request->sclp_req.status = SCLP_REQ_FAILED;
211 return -EIO;
212 }
213 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
214 request->sclp_req.status = SCLP_REQ_FILLED;
215 request->sclp_req.callback = sclp_vt220_callback;
216 request->sclp_req.callback_data = (void *) request;
217
218 return sclp_add_request(&request->sclp_req);
219 }
220
221 /*
222 * Queue and emit current request.
223 */
224 static void
225 sclp_vt220_emit_current(void)
226 {
227 unsigned long flags;
228 struct sclp_vt220_request *request;
229 struct sclp_vt220_sccb *sccb;
230
231 spin_lock_irqsave(&sclp_vt220_lock, flags);
232 if (sclp_vt220_current_request) {
233 sccb = (struct sclp_vt220_sccb *)
234 sclp_vt220_current_request->sclp_req.sccb;
235 /* Only emit buffers with content */
236 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
237 list_add_tail(&sclp_vt220_current_request->list,
238 &sclp_vt220_outqueue);
239 sclp_vt220_current_request = NULL;
240 if (timer_pending(&sclp_vt220_timer))
241 del_timer(&sclp_vt220_timer);
242 }
243 sclp_vt220_flush_later = 0;
244 }
245 if (sclp_vt220_queue_running || sclp_vt220_suspended)
246 goto out_unlock;
247 if (list_empty(&sclp_vt220_outqueue))
248 goto out_unlock;
249 request = list_first_entry(&sclp_vt220_outqueue,
250 struct sclp_vt220_request, list);
251 sclp_vt220_queue_running = 1;
252 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
253
254 if (__sclp_vt220_emit(request))
255 sclp_vt220_process_queue(request);
256 return;
257 out_unlock:
258 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
259 }
260
261 #define SCLP_NORMAL_WRITE 0x00
262
263 /*
264 * Helper function to initialize a page with the sclp request structure.
265 */
266 static struct sclp_vt220_request *
267 sclp_vt220_initialize_page(void *page)
268 {
269 struct sclp_vt220_request *request;
270 struct sclp_vt220_sccb *sccb;
271
272 /* Place request structure at end of page */
273 request = ((struct sclp_vt220_request *)
274 ((addr_t) page + PAGE_SIZE)) - 1;
275 request->retry_count = 0;
276 request->sclp_req.sccb = page;
277 /* SCCB goes at start of page */
278 sccb = (struct sclp_vt220_sccb *) page;
279 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
280 sccb->header.length = sizeof(struct sclp_vt220_sccb);
281 sccb->header.function_code = SCLP_NORMAL_WRITE;
282 sccb->header.response_code = 0x0000;
283 sccb->evbuf.type = EVTYP_VT220MSG;
284 sccb->evbuf.length = sizeof(struct evbuf_header);
285
286 return request;
287 }
288
289 static inline unsigned int
290 sclp_vt220_space_left(struct sclp_vt220_request *request)
291 {
292 struct sclp_vt220_sccb *sccb;
293 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
294 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
295 sccb->header.length;
296 }
297
298 static inline unsigned int
299 sclp_vt220_chars_stored(struct sclp_vt220_request *request)
300 {
301 struct sclp_vt220_sccb *sccb;
302 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
303 return sccb->evbuf.length - sizeof(struct evbuf_header);
304 }
305
306 /*
307 * Add msg to buffer associated with request. Return the number of characters
308 * added.
309 */
310 static int
311 sclp_vt220_add_msg(struct sclp_vt220_request *request,
312 const unsigned char *msg, int count, int convertlf)
313 {
314 struct sclp_vt220_sccb *sccb;
315 void *buffer;
316 unsigned char c;
317 int from;
318 int to;
319
320 if (count > sclp_vt220_space_left(request))
321 count = sclp_vt220_space_left(request);
322 if (count <= 0)
323 return 0;
324
325 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
326 buffer = (void *) ((addr_t) sccb + sccb->header.length);
327
328 if (convertlf) {
329 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
330 for (from=0, to=0;
331 (from < count) && (to < sclp_vt220_space_left(request));
332 from++) {
333 /* Retrieve character */
334 c = msg[from];
335 /* Perform conversion */
336 if (c == 0x0a) {
337 if (to + 1 < sclp_vt220_space_left(request)) {
338 ((unsigned char *) buffer)[to++] = c;
339 ((unsigned char *) buffer)[to++] = 0x0d;
340 } else
341 break;
342
343 } else
344 ((unsigned char *) buffer)[to++] = c;
345 }
346 sccb->header.length += to;
347 sccb->evbuf.length += to;
348 return from;
349 } else {
350 memcpy(buffer, (const void *) msg, count);
351 sccb->header.length += count;
352 sccb->evbuf.length += count;
353 return count;
354 }
355 }
356
357 /*
358 * Emit buffer after having waited long enough for more data to arrive.
359 */
360 static void
361 sclp_vt220_timeout(unsigned long data)
362 {
363 sclp_vt220_emit_current();
364 }
365
366 #define BUFFER_MAX_DELAY HZ/20
367
368 /*
369 * Drop oldest console buffer if sclp_con_drop is set
370 */
371 static int
372 sclp_vt220_drop_buffer(void)
373 {
374 struct list_head *list;
375 struct sclp_vt220_request *request;
376 void *page;
377
378 if (!sclp_console_drop)
379 return 0;
380 list = sclp_vt220_outqueue.next;
381 if (sclp_vt220_queue_running)
382 /* The first element is in I/O */
383 list = list->next;
384 if (list == &sclp_vt220_outqueue)
385 return 0;
386 list_del(list);
387 request = list_entry(list, struct sclp_vt220_request, list);
388 page = request->sclp_req.sccb;
389 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
390 return 1;
391 }
392
393 /*
394 * Internal implementation of the write function. Write COUNT bytes of data
395 * from memory at BUF
396 * to the SCLP interface. In case that the data does not fit into the current
397 * write buffer, emit the current one and allocate a new one. If there are no
398 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
399 * is non-zero, the buffer will be scheduled for emitting after a timeout -
400 * otherwise the user has to explicitly call the flush function.
401 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
402 * buffer should be converted to 0x0a 0x0d. After completion, return the number
403 * of bytes written.
404 */
405 static int
406 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
407 int convertlf, int may_fail)
408 {
409 unsigned long flags;
410 void *page;
411 int written;
412 int overall_written;
413
414 if (count <= 0)
415 return 0;
416 overall_written = 0;
417 spin_lock_irqsave(&sclp_vt220_lock, flags);
418 do {
419 /* Create an sclp output buffer if none exists yet */
420 if (sclp_vt220_current_request == NULL) {
421 if (list_empty(&sclp_vt220_empty))
422 sclp_console_full++;
423 while (list_empty(&sclp_vt220_empty)) {
424 if (may_fail || sclp_vt220_suspended)
425 goto out;
426 if (sclp_vt220_drop_buffer())
427 break;
428 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
429
430 sclp_sync_wait();
431 spin_lock_irqsave(&sclp_vt220_lock, flags);
432 }
433 page = (void *) sclp_vt220_empty.next;
434 list_del((struct list_head *) page);
435 sclp_vt220_current_request =
436 sclp_vt220_initialize_page(page);
437 }
438 /* Try to write the string to the current request buffer */
439 written = sclp_vt220_add_msg(sclp_vt220_current_request,
440 buf, count, convertlf);
441 overall_written += written;
442 if (written == count)
443 break;
444 /*
445 * Not all characters could be written to the current
446 * output buffer. Emit the buffer, create a new buffer
447 * and then output the rest of the string.
448 */
449 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
450 sclp_vt220_emit_current();
451 spin_lock_irqsave(&sclp_vt220_lock, flags);
452 buf += written;
453 count -= written;
454 } while (count > 0);
455 /* Setup timer to output current console buffer after some time */
456 if (sclp_vt220_current_request != NULL &&
457 !timer_pending(&sclp_vt220_timer) && do_schedule) {
458 sclp_vt220_timer.function = sclp_vt220_timeout;
459 sclp_vt220_timer.data = 0UL;
460 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
461 add_timer(&sclp_vt220_timer);
462 }
463 out:
464 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
465 return overall_written;
466 }
467
468 /*
469 * This routine is called by the kernel to write a series of
470 * characters to the tty device. The characters may come from
471 * user space or kernel space. This routine will return the
472 * number of characters actually accepted for writing.
473 */
474 static int
475 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
476 {
477 return __sclp_vt220_write(buf, count, 1, 0, 1);
478 }
479
480 #define SCLP_VT220_SESSION_ENDED 0x01
481 #define SCLP_VT220_SESSION_STARTED 0x80
482 #define SCLP_VT220_SESSION_DATA 0x00
483
484 /*
485 * Called by the SCLP to report incoming event buffers.
486 */
487 static void
488 sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
489 {
490 char *buffer;
491 unsigned int count;
492
493 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
494 count = evbuf->length - sizeof(struct evbuf_header);
495
496 switch (*buffer) {
497 case SCLP_VT220_SESSION_ENDED:
498 case SCLP_VT220_SESSION_STARTED:
499 break;
500 case SCLP_VT220_SESSION_DATA:
501 /* Send input to line discipline */
502 buffer++;
503 count--;
504 tty_insert_flip_string(&sclp_vt220_port, buffer, count);
505 tty_flip_buffer_push(&sclp_vt220_port);
506 break;
507 }
508 }
509
510 /*
511 * This routine is called when a particular tty device is opened.
512 */
513 static int
514 sclp_vt220_open(struct tty_struct *tty, struct file *filp)
515 {
516 if (tty->count == 1) {
517 tty_port_tty_set(&sclp_vt220_port, tty);
518 sclp_vt220_port.low_latency = 0;
519 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
520 tty->winsize.ws_row = 24;
521 tty->winsize.ws_col = 80;
522 }
523 }
524 return 0;
525 }
526
527 /*
528 * This routine is called when a particular tty device is closed.
529 */
530 static void
531 sclp_vt220_close(struct tty_struct *tty, struct file *filp)
532 {
533 if (tty->count == 1)
534 tty_port_tty_set(&sclp_vt220_port, NULL);
535 }
536
537 /*
538 * This routine is called by the kernel to write a single
539 * character to the tty device. If the kernel uses this routine,
540 * it must call the flush_chars() routine (if defined) when it is
541 * done stuffing characters into the driver.
542 */
543 static int
544 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
545 {
546 return __sclp_vt220_write(&ch, 1, 0, 0, 1);
547 }
548
549 /*
550 * This routine is called by the kernel after it has written a
551 * series of characters to the tty device using put_char().
552 */
553 static void
554 sclp_vt220_flush_chars(struct tty_struct *tty)
555 {
556 if (!sclp_vt220_queue_running)
557 sclp_vt220_emit_current();
558 else
559 sclp_vt220_flush_later = 1;
560 }
561
562 /*
563 * This routine returns the numbers of characters the tty driver
564 * will accept for queuing to be written. This number is subject
565 * to change as output buffers get emptied, or if the output flow
566 * control is acted.
567 */
568 static int
569 sclp_vt220_write_room(struct tty_struct *tty)
570 {
571 unsigned long flags;
572 struct list_head *l;
573 int count;
574
575 spin_lock_irqsave(&sclp_vt220_lock, flags);
576 count = 0;
577 if (sclp_vt220_current_request != NULL)
578 count = sclp_vt220_space_left(sclp_vt220_current_request);
579 list_for_each(l, &sclp_vt220_empty)
580 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
581 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
582 return count;
583 }
584
585 /*
586 * Return number of buffered chars.
587 */
588 static int
589 sclp_vt220_chars_in_buffer(struct tty_struct *tty)
590 {
591 unsigned long flags;
592 struct list_head *l;
593 struct sclp_vt220_request *r;
594 int count;
595
596 spin_lock_irqsave(&sclp_vt220_lock, flags);
597 count = 0;
598 if (sclp_vt220_current_request != NULL)
599 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
600 list_for_each(l, &sclp_vt220_outqueue) {
601 r = list_entry(l, struct sclp_vt220_request, list);
602 count += sclp_vt220_chars_stored(r);
603 }
604 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
605 return count;
606 }
607
608 /*
609 * Pass on all buffers to the hardware. Return only when there are no more
610 * buffers pending.
611 */
612 static void
613 sclp_vt220_flush_buffer(struct tty_struct *tty)
614 {
615 sclp_vt220_emit_current();
616 }
617
618 /* Release allocated pages. */
619 static void __init __sclp_vt220_free_pages(void)
620 {
621 struct list_head *page, *p;
622
623 list_for_each_safe(page, p, &sclp_vt220_empty) {
624 list_del(page);
625 free_page((unsigned long) page);
626 }
627 }
628
629 /* Release memory and unregister from sclp core. Controlled by init counting -
630 * only the last invoker will actually perform these actions. */
631 static void __init __sclp_vt220_cleanup(void)
632 {
633 sclp_vt220_init_count--;
634 if (sclp_vt220_init_count != 0)
635 return;
636 sclp_unregister(&sclp_vt220_register);
637 __sclp_vt220_free_pages();
638 tty_port_destroy(&sclp_vt220_port);
639 }
640
641 /* Allocate buffer pages and register with sclp core. Controlled by init
642 * counting - only the first invoker will actually perform these actions. */
643 static int __init __sclp_vt220_init(int num_pages)
644 {
645 void *page;
646 int i;
647 int rc;
648
649 sclp_vt220_init_count++;
650 if (sclp_vt220_init_count != 1)
651 return 0;
652 spin_lock_init(&sclp_vt220_lock);
653 INIT_LIST_HEAD(&sclp_vt220_empty);
654 INIT_LIST_HEAD(&sclp_vt220_outqueue);
655 init_timer(&sclp_vt220_timer);
656 tty_port_init(&sclp_vt220_port);
657 sclp_vt220_current_request = NULL;
658 sclp_vt220_buffered_chars = 0;
659 sclp_vt220_flush_later = 0;
660
661 /* Allocate pages for output buffering */
662 rc = -ENOMEM;
663 for (i = 0; i < num_pages; i++) {
664 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
665 if (!page)
666 goto out;
667 list_add_tail(page, &sclp_vt220_empty);
668 }
669 rc = sclp_register(&sclp_vt220_register);
670 out:
671 if (rc) {
672 __sclp_vt220_free_pages();
673 sclp_vt220_init_count--;
674 tty_port_destroy(&sclp_vt220_port);
675 }
676 return rc;
677 }
678
679 static const struct tty_operations sclp_vt220_ops = {
680 .open = sclp_vt220_open,
681 .close = sclp_vt220_close,
682 .write = sclp_vt220_write,
683 .put_char = sclp_vt220_put_char,
684 .flush_chars = sclp_vt220_flush_chars,
685 .write_room = sclp_vt220_write_room,
686 .chars_in_buffer = sclp_vt220_chars_in_buffer,
687 .flush_buffer = sclp_vt220_flush_buffer,
688 };
689
690 /*
691 * Register driver with SCLP and Linux and initialize internal tty structures.
692 */
693 static int __init sclp_vt220_tty_init(void)
694 {
695 struct tty_driver *driver;
696 int rc;
697
698 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
699 * symmetry between VM and LPAR systems regarding ttyS1. */
700 driver = alloc_tty_driver(1);
701 if (!driver)
702 return -ENOMEM;
703 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
704 if (rc)
705 goto out_driver;
706
707 driver->driver_name = SCLP_VT220_DRIVER_NAME;
708 driver->name = SCLP_VT220_DEVICE_NAME;
709 driver->major = SCLP_VT220_MAJOR;
710 driver->minor_start = SCLP_VT220_MINOR;
711 driver->type = TTY_DRIVER_TYPE_SYSTEM;
712 driver->subtype = SYSTEM_TYPE_TTY;
713 driver->init_termios = tty_std_termios;
714 driver->flags = TTY_DRIVER_REAL_RAW;
715 tty_set_operations(driver, &sclp_vt220_ops);
716 tty_port_link_device(&sclp_vt220_port, driver, 0);
717
718 rc = tty_register_driver(driver);
719 if (rc)
720 goto out_init;
721 rc = sclp_register(&sclp_vt220_register_input);
722 if (rc)
723 goto out_reg;
724 sclp_vt220_driver = driver;
725 return 0;
726
727 out_reg:
728 tty_unregister_driver(driver);
729 out_init:
730 __sclp_vt220_cleanup();
731 out_driver:
732 put_tty_driver(driver);
733 return rc;
734 }
735 __initcall(sclp_vt220_tty_init);
736
737 static void __sclp_vt220_flush_buffer(void)
738 {
739 unsigned long flags;
740
741 sclp_vt220_emit_current();
742 spin_lock_irqsave(&sclp_vt220_lock, flags);
743 if (timer_pending(&sclp_vt220_timer))
744 del_timer(&sclp_vt220_timer);
745 while (sclp_vt220_queue_running) {
746 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
747 sclp_sync_wait();
748 spin_lock_irqsave(&sclp_vt220_lock, flags);
749 }
750 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
751 }
752
753 /*
754 * Resume console: If there are cached messages, emit them.
755 */
756 static void sclp_vt220_resume(void)
757 {
758 unsigned long flags;
759
760 spin_lock_irqsave(&sclp_vt220_lock, flags);
761 sclp_vt220_suspended = 0;
762 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
763 sclp_vt220_emit_current();
764 }
765
766 /*
767 * Suspend console: Set suspend flag and flush console
768 */
769 static void sclp_vt220_suspend(void)
770 {
771 unsigned long flags;
772
773 spin_lock_irqsave(&sclp_vt220_lock, flags);
774 sclp_vt220_suspended = 1;
775 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
776 __sclp_vt220_flush_buffer();
777 }
778
779 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
780 enum sclp_pm_event sclp_pm_event)
781 {
782 switch (sclp_pm_event) {
783 case SCLP_PM_EVENT_FREEZE:
784 sclp_vt220_suspend();
785 break;
786 case SCLP_PM_EVENT_RESTORE:
787 case SCLP_PM_EVENT_THAW:
788 sclp_vt220_resume();
789 break;
790 }
791 }
792
793 #ifdef CONFIG_SCLP_VT220_CONSOLE
794
795 static void
796 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
797 {
798 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
799 }
800
801 static struct tty_driver *
802 sclp_vt220_con_device(struct console *c, int *index)
803 {
804 *index = 0;
805 return sclp_vt220_driver;
806 }
807
808 static int
809 sclp_vt220_notify(struct notifier_block *self,
810 unsigned long event, void *data)
811 {
812 __sclp_vt220_flush_buffer();
813 return NOTIFY_OK;
814 }
815
816 static struct notifier_block on_panic_nb = {
817 .notifier_call = sclp_vt220_notify,
818 .priority = 1,
819 };
820
821 static struct notifier_block on_reboot_nb = {
822 .notifier_call = sclp_vt220_notify,
823 .priority = 1,
824 };
825
826 /* Structure needed to register with printk */
827 static struct console sclp_vt220_console =
828 {
829 .name = SCLP_VT220_CONSOLE_NAME,
830 .write = sclp_vt220_con_write,
831 .device = sclp_vt220_con_device,
832 .flags = CON_PRINTBUFFER,
833 .index = SCLP_VT220_CONSOLE_INDEX
834 };
835
836 static int __init
837 sclp_vt220_con_init(void)
838 {
839 int rc;
840
841 if (!CONSOLE_IS_SCLP)
842 return 0;
843 rc = __sclp_vt220_init(sclp_console_pages);
844 if (rc)
845 return rc;
846 /* Attach linux console */
847 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
848 register_reboot_notifier(&on_reboot_nb);
849 register_console(&sclp_vt220_console);
850 return 0;
851 }
852
853 console_initcall(sclp_vt220_con_init);
854 #endif /* CONFIG_SCLP_VT220_CONSOLE */
855
This page took 0.059013 seconds and 5 git commands to generate.