[S390] vt220 console: convert from bootmem to slab
[deliverable/linux.git] / drivers / s390 / char / sclp_vt220.c
CommitLineData
1da177e4 1/*
62b74942 2 * SCLP VT220 terminal driver.
1da177e4 3 *
62b74942
MH
4 * Copyright IBM Corp. 2003, 2009
5 *
6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
1da177e4
LT
7 */
8
1da177e4
LT
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <linux/list.h>
12#include <linux/wait.h>
13#include <linux/timer.h>
14#include <linux/kernel.h>
15#include <linux/tty.h>
16#include <linux/tty_driver.h>
33f0f88f 17#include <linux/tty_flip.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/major.h>
21#include <linux/console.h>
22#include <linux/kdev_t.h>
1da177e4
LT
23#include <linux/interrupt.h>
24#include <linux/init.h>
2332ce1a
HS
25#include <linux/reboot.h>
26
1da177e4
LT
27#include <asm/uaccess.h>
28#include "sclp.h"
29
1da177e4
LT
30#define SCLP_VT220_MAJOR TTY_MAJOR
31#define SCLP_VT220_MINOR 65
32#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
33#define SCLP_VT220_DEVICE_NAME "ttysclp"
34#define SCLP_VT220_CONSOLE_NAME "ttyS"
35#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
36#define SCLP_VT220_BUF_SIZE 80
37
38/* Representation of a single write request */
39struct sclp_vt220_request {
40 struct list_head list;
41 struct sclp_req sclp_req;
42 int retry_count;
43};
44
45/* VT220 SCCB */
46struct sclp_vt220_sccb {
47 struct sccb_header header;
48 struct evbuf_header evbuf;
49};
50
51#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
52 sizeof(struct sclp_vt220_request) - \
53 sizeof(struct sclp_vt220_sccb))
54
55/* Structures and data needed to register tty driver */
56static struct tty_driver *sclp_vt220_driver;
57
58/* The tty_struct that the kernel associated with us */
59static struct tty_struct *sclp_vt220_tty;
60
61/* Lock to protect internal data from concurrent access */
62static spinlock_t sclp_vt220_lock;
63
64/* List of empty pages to be used as write request buffers */
65static struct list_head sclp_vt220_empty;
66
67/* List of pending requests */
68static struct list_head sclp_vt220_outqueue;
69
62b74942
MH
70/* Suspend mode flag */
71static int sclp_vt220_suspended;
72
73/* Flag that output queue is currently running */
74static int sclp_vt220_queue_running;
1da177e4 75
1da177e4
LT
76/* Timer used for delaying write requests to merge subsequent messages into
77 * a single buffer */
78static struct timer_list sclp_vt220_timer;
79
80/* Pointer to current request buffer which has been partially filled but not
81 * yet sent */
82static struct sclp_vt220_request *sclp_vt220_current_request;
83
84/* Number of characters in current request buffer */
85static int sclp_vt220_buffered_chars;
86
ad211790
PO
87/* Counter controlling core driver initialization. */
88static int __initdata sclp_vt220_init_count;
1da177e4
LT
89
90/* Flag indicating that sclp_vt220_current_request should really
91 * have been already queued but wasn't because the SCLP was processing
92 * another buffer */
93static int sclp_vt220_flush_later;
94
95static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
62b74942
MH
96static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
97 enum sclp_pm_event sclp_pm_event);
1da177e4
LT
98static int __sclp_vt220_emit(struct sclp_vt220_request *request);
99static void sclp_vt220_emit_current(void);
100
101/* Registration structure for our interest in SCLP event buffers */
102static struct sclp_register sclp_vt220_register = {
6d4740c8
SH
103 .send_mask = EVTYP_VT220MSG_MASK,
104 .receive_mask = EVTYP_VT220MSG_MASK,
1da177e4 105 .state_change_fn = NULL,
62b74942
MH
106 .receiver_fn = sclp_vt220_receiver_fn,
107 .pm_event_fn = sclp_vt220_pm_event_fn,
1da177e4
LT
108};
109
110
111/*
112 * Put provided request buffer back into queue and check emit pending
113 * buffers if necessary.
114 */
115static void
116sclp_vt220_process_queue(struct sclp_vt220_request *request)
117{
118 unsigned long flags;
119 void *page;
120
121 do {
122 /* Put buffer back to list of empty buffers */
123 page = request->sclp_req.sccb;
124 spin_lock_irqsave(&sclp_vt220_lock, flags);
125 /* Move request from outqueue to empty queue */
126 list_del(&request->list);
1da177e4
LT
127 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
128 /* Check if there is a pending buffer on the out queue. */
129 request = NULL;
130 if (!list_empty(&sclp_vt220_outqueue))
131 request = list_entry(sclp_vt220_outqueue.next,
132 struct sclp_vt220_request, list);
62b74942
MH
133 if (!request || sclp_vt220_suspended) {
134 sclp_vt220_queue_running = 0;
135 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
136 break;
137 }
1da177e4 138 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
62b74942 139 } while (__sclp_vt220_emit(request));
1da177e4
LT
140 if (request == NULL && sclp_vt220_flush_later)
141 sclp_vt220_emit_current();
1da177e4
LT
142 /* Check if the tty needs a wake up call */
143 if (sclp_vt220_tty != NULL) {
144 tty_wakeup(sclp_vt220_tty);
145 }
146}
147
148#define SCLP_BUFFER_MAX_RETRY 1
149
150/*
151 * Callback through which the result of a write request is reported by the
152 * SCLP.
153 */
154static void
155sclp_vt220_callback(struct sclp_req *request, void *data)
156{
157 struct sclp_vt220_request *vt220_request;
158 struct sclp_vt220_sccb *sccb;
159
160 vt220_request = (struct sclp_vt220_request *) data;
161 if (request->status == SCLP_REQ_FAILED) {
162 sclp_vt220_process_queue(vt220_request);
163 return;
164 }
165 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
166
167 /* Check SCLP response code and choose suitable action */
168 switch (sccb->header.response_code) {
169 case 0x0020 :
170 break;
171
172 case 0x05f0: /* Target resource in improper state */
173 break;
174
175 case 0x0340: /* Contained SCLP equipment check */
176 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
177 break;
178 /* Remove processed buffers and requeue rest */
179 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
180 /* Not all buffers were processed */
181 sccb->header.response_code = 0x0000;
182 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
183 if (sclp_add_request(request) == 0)
184 return;
185 }
186 break;
187
188 case 0x0040: /* SCLP equipment check */
189 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
190 break;
191 sccb->header.response_code = 0x0000;
192 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
193 if (sclp_add_request(request) == 0)
194 return;
195 break;
196
197 default:
198 break;
199 }
200 sclp_vt220_process_queue(vt220_request);
201}
202
203/*
204 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
205 * otherwise.
206 */
207static int
208__sclp_vt220_emit(struct sclp_vt220_request *request)
209{
d082d3ce 210 if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
1da177e4
LT
211 request->sclp_req.status = SCLP_REQ_FAILED;
212 return -EIO;
213 }
ab14de6c 214 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
1da177e4
LT
215 request->sclp_req.status = SCLP_REQ_FILLED;
216 request->sclp_req.callback = sclp_vt220_callback;
217 request->sclp_req.callback_data = (void *) request;
218
219 return sclp_add_request(&request->sclp_req);
220}
221
222/*
62b74942 223 * Queue and emit current request.
1da177e4
LT
224 */
225static void
226sclp_vt220_emit_current(void)
227{
228 unsigned long flags;
229 struct sclp_vt220_request *request;
230 struct sclp_vt220_sccb *sccb;
231
232 spin_lock_irqsave(&sclp_vt220_lock, flags);
62b74942 233 if (sclp_vt220_current_request) {
1da177e4
LT
234 sccb = (struct sclp_vt220_sccb *)
235 sclp_vt220_current_request->sclp_req.sccb;
236 /* Only emit buffers with content */
237 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
62b74942
MH
238 list_add_tail(&sclp_vt220_current_request->list,
239 &sclp_vt220_outqueue);
1da177e4
LT
240 sclp_vt220_current_request = NULL;
241 if (timer_pending(&sclp_vt220_timer))
242 del_timer(&sclp_vt220_timer);
243 }
244 sclp_vt220_flush_later = 0;
245 }
62b74942
MH
246 if (sclp_vt220_queue_running || sclp_vt220_suspended)
247 goto out_unlock;
248 if (list_empty(&sclp_vt220_outqueue))
249 goto out_unlock;
250 request = list_first_entry(&sclp_vt220_outqueue,
251 struct sclp_vt220_request, list);
252 sclp_vt220_queue_running = 1;
253 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
254
255 if (__sclp_vt220_emit(request))
256 sclp_vt220_process_queue(request);
257 return;
258out_unlock:
1da177e4 259 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
1da177e4
LT
260}
261
262#define SCLP_NORMAL_WRITE 0x00
263
264/*
265 * Helper function to initialize a page with the sclp request structure.
266 */
267static struct sclp_vt220_request *
268sclp_vt220_initialize_page(void *page)
269{
270 struct sclp_vt220_request *request;
271 struct sclp_vt220_sccb *sccb;
272
273 /* Place request structure at end of page */
274 request = ((struct sclp_vt220_request *)
275 ((addr_t) page + PAGE_SIZE)) - 1;
276 request->retry_count = 0;
277 request->sclp_req.sccb = page;
278 /* SCCB goes at start of page */
279 sccb = (struct sclp_vt220_sccb *) page;
280 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
281 sccb->header.length = sizeof(struct sclp_vt220_sccb);
282 sccb->header.function_code = SCLP_NORMAL_WRITE;
283 sccb->header.response_code = 0x0000;
6d4740c8 284 sccb->evbuf.type = EVTYP_VT220MSG;
1da177e4
LT
285 sccb->evbuf.length = sizeof(struct evbuf_header);
286
287 return request;
288}
289
290static inline unsigned int
291sclp_vt220_space_left(struct sclp_vt220_request *request)
292{
293 struct sclp_vt220_sccb *sccb;
294 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
295 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
296 sccb->header.length;
297}
298
299static inline unsigned int
300sclp_vt220_chars_stored(struct sclp_vt220_request *request)
301{
302 struct sclp_vt220_sccb *sccb;
303 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
304 return sccb->evbuf.length - sizeof(struct evbuf_header);
305}
306
307/*
308 * Add msg to buffer associated with request. Return the number of characters
309 * added.
310 */
311static int
312sclp_vt220_add_msg(struct sclp_vt220_request *request,
313 const unsigned char *msg, int count, int convertlf)
314{
315 struct sclp_vt220_sccb *sccb;
316 void *buffer;
317 unsigned char c;
318 int from;
319 int to;
320
321 if (count > sclp_vt220_space_left(request))
322 count = sclp_vt220_space_left(request);
323 if (count <= 0)
324 return 0;
325
326 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
327 buffer = (void *) ((addr_t) sccb + sccb->header.length);
328
329 if (convertlf) {
330 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
331 for (from=0, to=0;
332 (from < count) && (to < sclp_vt220_space_left(request));
333 from++) {
334 /* Retrieve character */
335 c = msg[from];
336 /* Perform conversion */
337 if (c == 0x0a) {
338 if (to + 1 < sclp_vt220_space_left(request)) {
339 ((unsigned char *) buffer)[to++] = c;
340 ((unsigned char *) buffer)[to++] = 0x0d;
341 } else
342 break;
343
344 } else
345 ((unsigned char *) buffer)[to++] = c;
346 }
347 sccb->header.length += to;
348 sccb->evbuf.length += to;
349 return from;
350 } else {
351 memcpy(buffer, (const void *) msg, count);
352 sccb->header.length += count;
353 sccb->evbuf.length += count;
354 return count;
355 }
356}
357
358/*
359 * Emit buffer after having waited long enough for more data to arrive.
360 */
361static void
362sclp_vt220_timeout(unsigned long data)
363{
364 sclp_vt220_emit_current();
365}
366
fa331ffc 367#define BUFFER_MAX_DELAY HZ/20
1da177e4
LT
368
369/*
370 * Internal implementation of the write function. Write COUNT bytes of data
371 * from memory at BUF
372 * to the SCLP interface. In case that the data does not fit into the current
373 * write buffer, emit the current one and allocate a new one. If there are no
374 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
375 * is non-zero, the buffer will be scheduled for emitting after a timeout -
376 * otherwise the user has to explicitly call the flush function.
377 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
378 * buffer should be converted to 0x0a 0x0d. After completion, return the number
379 * of bytes written.
380 */
381static int
382__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
d4820e44 383 int convertlf, int may_fail)
1da177e4
LT
384{
385 unsigned long flags;
386 void *page;
387 int written;
388 int overall_written;
389
390 if (count <= 0)
391 return 0;
392 overall_written = 0;
393 spin_lock_irqsave(&sclp_vt220_lock, flags);
394 do {
d4820e44 395 /* Create an sclp output buffer if none exists yet */
1da177e4
LT
396 if (sclp_vt220_current_request == NULL) {
397 while (list_empty(&sclp_vt220_empty)) {
d1e23375 398 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
62b74942 399 if (may_fail || sclp_vt220_suspended)
d4820e44 400 goto out;
1da177e4 401 else
d4820e44 402 sclp_sync_wait();
1da177e4
LT
403 spin_lock_irqsave(&sclp_vt220_lock, flags);
404 }
405 page = (void *) sclp_vt220_empty.next;
406 list_del((struct list_head *) page);
407 sclp_vt220_current_request =
408 sclp_vt220_initialize_page(page);
409 }
410 /* Try to write the string to the current request buffer */
411 written = sclp_vt220_add_msg(sclp_vt220_current_request,
412 buf, count, convertlf);
413 overall_written += written;
414 if (written == count)
415 break;
416 /*
417 * Not all characters could be written to the current
418 * output buffer. Emit the buffer, create a new buffer
419 * and then output the rest of the string.
420 */
421 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
422 sclp_vt220_emit_current();
423 spin_lock_irqsave(&sclp_vt220_lock, flags);
424 buf += written;
425 count -= written;
426 } while (count > 0);
427 /* Setup timer to output current console buffer after some time */
428 if (sclp_vt220_current_request != NULL &&
429 !timer_pending(&sclp_vt220_timer) && do_schedule) {
430 sclp_vt220_timer.function = sclp_vt220_timeout;
431 sclp_vt220_timer.data = 0UL;
432 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
433 add_timer(&sclp_vt220_timer);
434 }
435 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
d4820e44 436out:
1da177e4
LT
437 return overall_written;
438}
439
440/*
441 * This routine is called by the kernel to write a series of
442 * characters to the tty device. The characters may come from
443 * user space or kernel space. This routine will return the
444 * number of characters actually accepted for writing.
445 */
446static int
447sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
448{
d1e23375 449 return __sclp_vt220_write(buf, count, 1, 0, 1);
1da177e4
LT
450}
451
452#define SCLP_VT220_SESSION_ENDED 0x01
453#define SCLP_VT220_SESSION_STARTED 0x80
454#define SCLP_VT220_SESSION_DATA 0x00
455
456/*
457 * Called by the SCLP to report incoming event buffers.
458 */
459static void
460sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
461{
462 char *buffer;
463 unsigned int count;
464
465 /* Ignore input if device is not open */
466 if (sclp_vt220_tty == NULL)
467 return;
468
469 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
470 count = evbuf->length - sizeof(struct evbuf_header);
471
472 switch (*buffer) {
473 case SCLP_VT220_SESSION_ENDED:
474 case SCLP_VT220_SESSION_STARTED:
475 break;
476 case SCLP_VT220_SESSION_DATA:
477 /* Send input to line discipline */
478 buffer++;
479 count--;
33f0f88f 480 tty_insert_flip_string(sclp_vt220_tty, buffer, count);
1da177e4
LT
481 tty_flip_buffer_push(sclp_vt220_tty);
482 break;
483 }
484}
485
486/*
487 * This routine is called when a particular tty device is opened.
488 */
489static int
490sclp_vt220_open(struct tty_struct *tty, struct file *filp)
491{
492 if (tty->count == 1) {
493 sclp_vt220_tty = tty;
494 tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
495 if (tty->driver_data == NULL)
496 return -ENOMEM;
497 tty->low_latency = 0;
498 }
499 return 0;
500}
501
502/*
503 * This routine is called when a particular tty device is closed.
504 */
505static void
506sclp_vt220_close(struct tty_struct *tty, struct file *filp)
507{
508 if (tty->count == 1) {
509 sclp_vt220_tty = NULL;
510 kfree(tty->driver_data);
511 tty->driver_data = NULL;
512 }
513}
514
515/*
516 * This routine is called by the kernel to write a single
517 * character to the tty device. If the kernel uses this routine,
518 * it must call the flush_chars() routine (if defined) when it is
519 * done stuffing characters into the driver.
1da177e4 520 */
9e7c9a19 521static int
1da177e4
LT
522sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
523{
d4820e44 524 return __sclp_vt220_write(&ch, 1, 0, 0, 1);
1da177e4
LT
525}
526
527/*
528 * This routine is called by the kernel after it has written a
529 * series of characters to the tty device using put_char().
530 */
531static void
532sclp_vt220_flush_chars(struct tty_struct *tty)
533{
62b74942 534 if (!sclp_vt220_queue_running)
1da177e4
LT
535 sclp_vt220_emit_current();
536 else
537 sclp_vt220_flush_later = 1;
538}
539
540/*
541 * This routine returns the numbers of characters the tty driver
542 * will accept for queuing to be written. This number is subject
543 * to change as output buffers get emptied, or if the output flow
544 * control is acted.
545 */
546static int
547sclp_vt220_write_room(struct tty_struct *tty)
548{
549 unsigned long flags;
550 struct list_head *l;
551 int count;
552
553 spin_lock_irqsave(&sclp_vt220_lock, flags);
554 count = 0;
555 if (sclp_vt220_current_request != NULL)
556 count = sclp_vt220_space_left(sclp_vt220_current_request);
557 list_for_each(l, &sclp_vt220_empty)
558 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
559 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
560 return count;
561}
562
563/*
564 * Return number of buffered chars.
565 */
566static int
567sclp_vt220_chars_in_buffer(struct tty_struct *tty)
568{
569 unsigned long flags;
570 struct list_head *l;
571 struct sclp_vt220_request *r;
572 int count;
573
574 spin_lock_irqsave(&sclp_vt220_lock, flags);
575 count = 0;
576 if (sclp_vt220_current_request != NULL)
577 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
578 list_for_each(l, &sclp_vt220_outqueue) {
579 r = list_entry(l, struct sclp_vt220_request, list);
580 count += sclp_vt220_chars_stored(r);
581 }
582 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
583 return count;
584}
585
1da177e4
LT
586/*
587 * Pass on all buffers to the hardware. Return only when there are no more
588 * buffers pending.
589 */
590static void
591sclp_vt220_flush_buffer(struct tty_struct *tty)
592{
593 sclp_vt220_emit_current();
594}
595
ad211790
PO
596/* Release allocated pages. */
597static void __init __sclp_vt220_free_pages(void)
5aaaf9f0
HC
598{
599 struct list_head *page, *p;
600
601 list_for_each_safe(page, p, &sclp_vt220_empty) {
602 list_del(page);
5c0792f6 603 free_page((unsigned long) page);
5aaaf9f0
HC
604 }
605}
606
ad211790
PO
607/* Release memory and unregister from sclp core. Controlled by init counting -
608 * only the last invoker will actually perform these actions. */
609static void __init __sclp_vt220_cleanup(void)
610{
611 sclp_vt220_init_count--;
612 if (sclp_vt220_init_count != 0)
613 return;
614 sclp_unregister(&sclp_vt220_register);
615 __sclp_vt220_free_pages();
616}
617
618/* Allocate buffer pages and register with sclp core. Controlled by init
619 * counting - only the first invoker will actually perform these actions. */
620static int __init __sclp_vt220_init(int num_pages)
1da177e4
LT
621{
622 void *page;
623 int i;
59eb1ca7 624 int rc;
1da177e4 625
ad211790
PO
626 sclp_vt220_init_count++;
627 if (sclp_vt220_init_count != 1)
1da177e4 628 return 0;
1da177e4
LT
629 spin_lock_init(&sclp_vt220_lock);
630 INIT_LIST_HEAD(&sclp_vt220_empty);
631 INIT_LIST_HEAD(&sclp_vt220_outqueue);
1da177e4
LT
632 init_timer(&sclp_vt220_timer);
633 sclp_vt220_current_request = NULL;
634 sclp_vt220_buffered_chars = 0;
1da177e4
LT
635 sclp_vt220_tty = NULL;
636 sclp_vt220_flush_later = 0;
637
638 /* Allocate pages for output buffering */
5c0792f6 639 rc = -ENOMEM;
5aaaf9f0 640 for (i = 0; i < num_pages; i++) {
5c0792f6
HC
641 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
642 if (!page)
ad211790 643 goto out;
5c0792f6 644 list_add_tail(page, &sclp_vt220_empty);
1da177e4 645 }
59eb1ca7 646 rc = sclp_register(&sclp_vt220_register);
ad211790 647out:
59eb1ca7 648 if (rc) {
ad211790
PO
649 __sclp_vt220_free_pages();
650 sclp_vt220_init_count--;
59eb1ca7
CB
651 }
652 return rc;
1da177e4
LT
653}
654
b68e31d0 655static const struct tty_operations sclp_vt220_ops = {
1da177e4
LT
656 .open = sclp_vt220_open,
657 .close = sclp_vt220_close,
658 .write = sclp_vt220_write,
659 .put_char = sclp_vt220_put_char,
660 .flush_chars = sclp_vt220_flush_chars,
661 .write_room = sclp_vt220_write_room,
662 .chars_in_buffer = sclp_vt220_chars_in_buffer,
5aaaf9f0 663 .flush_buffer = sclp_vt220_flush_buffer,
1da177e4
LT
664};
665
666/*
667 * Register driver with SCLP and Linux and initialize internal tty structures.
668 */
5aaaf9f0 669static int __init sclp_vt220_tty_init(void)
1da177e4
LT
670{
671 struct tty_driver *driver;
672 int rc;
673
674 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
675 * symmetry between VM and LPAR systems regarding ttyS1. */
676 driver = alloc_tty_driver(1);
677 if (!driver)
678 return -ENOMEM;
ad211790 679 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
5aaaf9f0
HC
680 if (rc)
681 goto out_driver;
1da177e4
LT
682
683 driver->owner = THIS_MODULE;
684 driver->driver_name = SCLP_VT220_DRIVER_NAME;
685 driver->name = SCLP_VT220_DEVICE_NAME;
686 driver->major = SCLP_VT220_MAJOR;
687 driver->minor_start = SCLP_VT220_MINOR;
688 driver->type = TTY_DRIVER_TYPE_SYSTEM;
689 driver->subtype = SYSTEM_TYPE_TTY;
690 driver->init_termios = tty_std_termios;
691 driver->flags = TTY_DRIVER_REAL_RAW;
692 tty_set_operations(driver, &sclp_vt220_ops);
693
694 rc = tty_register_driver(driver);
a12c53f4 695 if (rc)
59eb1ca7 696 goto out_init;
1da177e4
LT
697 sclp_vt220_driver = driver;
698 return 0;
1da177e4 699
5aaaf9f0 700out_init:
ad211790 701 __sclp_vt220_cleanup();
5aaaf9f0
HC
702out_driver:
703 put_tty_driver(driver);
704 return rc;
705}
706__initcall(sclp_vt220_tty_init);
1da177e4
LT
707
708#ifdef CONFIG_SCLP_VT220_CONSOLE
709
710static void
711sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
712{
d1e23375 713 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
1da177e4
LT
714}
715
716static struct tty_driver *
717sclp_vt220_con_device(struct console *c, int *index)
718{
719 *index = 0;
720 return sclp_vt220_driver;
721}
722
b3b59d33
HC
723static void __sclp_vt220_flush_buffer(void)
724{
725 unsigned long flags;
726
727 sclp_vt220_emit_current();
728 spin_lock_irqsave(&sclp_vt220_lock, flags);
729 if (timer_pending(&sclp_vt220_timer))
730 del_timer(&sclp_vt220_timer);
62b74942 731 while (sclp_vt220_queue_running) {
b3b59d33
HC
732 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
733 sclp_sync_wait();
734 spin_lock_irqsave(&sclp_vt220_lock, flags);
735 }
736 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
737}
738
62b74942
MH
739/*
740 * Resume console: If there are cached messages, emit them.
741 */
742static void sclp_vt220_resume(void)
743{
744 unsigned long flags;
745
746 spin_lock_irqsave(&sclp_vt220_lock, flags);
747 sclp_vt220_suspended = 0;
748 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
749 sclp_vt220_emit_current();
750}
751
752/*
753 * Suspend console: Set suspend flag and flush console
754 */
755static void sclp_vt220_suspend(void)
756{
757 unsigned long flags;
758
759 spin_lock_irqsave(&sclp_vt220_lock, flags);
760 sclp_vt220_suspended = 1;
761 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
762 __sclp_vt220_flush_buffer();
763}
764
765static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
766 enum sclp_pm_event sclp_pm_event)
767{
768 switch (sclp_pm_event) {
769 case SCLP_PM_EVENT_FREEZE:
770 sclp_vt220_suspend();
771 break;
772 case SCLP_PM_EVENT_RESTORE:
773 case SCLP_PM_EVENT_THAW:
774 sclp_vt220_resume();
775 break;
776 }
777}
778
2332ce1a
HS
779static int
780sclp_vt220_notify(struct notifier_block *self,
781 unsigned long event, void *data)
1da177e4
LT
782{
783 __sclp_vt220_flush_buffer();
2332ce1a 784 return NOTIFY_OK;
1da177e4
LT
785}
786
2332ce1a
HS
787static struct notifier_block on_panic_nb = {
788 .notifier_call = sclp_vt220_notify,
789 .priority = 1,
790};
791
792static struct notifier_block on_reboot_nb = {
793 .notifier_call = sclp_vt220_notify,
794 .priority = 1,
795};
796
1da177e4
LT
797/* Structure needed to register with printk */
798static struct console sclp_vt220_console =
799{
800 .name = SCLP_VT220_CONSOLE_NAME,
801 .write = sclp_vt220_con_write,
802 .device = sclp_vt220_con_device,
1da177e4
LT
803 .flags = CON_PRINTBUFFER,
804 .index = SCLP_VT220_CONSOLE_INDEX
805};
806
807static int __init
808sclp_vt220_con_init(void)
809{
810 int rc;
811
812 if (!CONSOLE_IS_SCLP)
813 return 0;
ad211790 814 rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
1da177e4
LT
815 if (rc)
816 return rc;
817 /* Attach linux console */
2332ce1a
HS
818 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
819 register_reboot_notifier(&on_reboot_nb);
1da177e4
LT
820 register_console(&sclp_vt220_console);
821 return 0;
822}
823
824console_initcall(sclp_vt220_con_init);
825#endif /* CONFIG_SCLP_VT220_CONSOLE */
826
This page took 0.476622 seconds and 5 git commands to generate.