Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze
[deliverable/linux.git] / drivers / char / hvc_iucv.c
1 /*
2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
3 *
4 * This HVC device driver provides terminal access using
5 * z/VM IUCV communication paths.
6 *
7 * Copyright IBM Corp. 2008
8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11 #define KMSG_COMPONENT "hvc_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/types.h>
15 #include <asm/ebcdic.h>
16 #include <linux/ctype.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/mempool.h>
20 #include <linux/moduleparam.h>
21 #include <linux/tty.h>
22 #include <linux/wait.h>
23 #include <net/iucv/iucv.h>
24
25 #include "hvc_console.h"
26
27
28 /* General device driver settings */
29 #define HVC_IUCV_MAGIC 0xc9e4c3e5
30 #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
31 #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
32
33 /* IUCV TTY message */
34 #define MSG_VERSION 0x02 /* Message version */
35 #define MSG_TYPE_ERROR 0x01 /* Error message */
36 #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
37 #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
38 #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
39 #define MSG_TYPE_DATA 0x10 /* Terminal data */
40
41 struct iucv_tty_msg {
42 u8 version; /* Message version */
43 u8 type; /* Message type */
44 #define MSG_MAX_DATALEN ((u16)(~0))
45 u16 datalen; /* Payload length */
46 u8 data[]; /* Payload buffer */
47 } __attribute__((packed));
48 #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
49
50 enum iucv_state_t {
51 IUCV_DISCONN = 0,
52 IUCV_CONNECTED = 1,
53 IUCV_SEVERED = 2,
54 };
55
56 enum tty_state_t {
57 TTY_CLOSED = 0,
58 TTY_OPENED = 1,
59 };
60
61 struct hvc_iucv_private {
62 struct hvc_struct *hvc; /* HVC struct reference */
63 u8 srv_name[8]; /* IUCV service name (ebcdic) */
64 unsigned char is_console; /* Linux console usage flag */
65 enum iucv_state_t iucv_state; /* IUCV connection status */
66 enum tty_state_t tty_state; /* TTY status */
67 struct iucv_path *path; /* IUCV path pointer */
68 spinlock_t lock; /* hvc_iucv_private lock */
69 #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
70 void *sndbuf; /* send buffer */
71 size_t sndbuf_len; /* length of send buffer */
72 #define QUEUE_SNDBUF_DELAY (HZ / 25)
73 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
74 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
75 struct list_head tty_outqueue; /* outgoing IUCV messages */
76 struct list_head tty_inqueue; /* incoming IUCV messages */
77 };
78
79 struct iucv_tty_buffer {
80 struct list_head list; /* list pointer */
81 struct iucv_message msg; /* store an IUCV message */
82 size_t offset; /* data buffer offset */
83 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
84 };
85
86 /* IUCV callback handler */
87 static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
88 static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
89 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
90 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
91
92
93 /* Kernel module parameter: use one terminal device as default */
94 static unsigned long hvc_iucv_devices = 1;
95
96 /* Array of allocated hvc iucv tty lines... */
97 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
98 #define IUCV_HVC_CON_IDX (0)
99 /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
100 #define MAX_VMID_FILTER (500)
101 static size_t hvc_iucv_filter_size;
102 static void *hvc_iucv_filter;
103 static const char *hvc_iucv_filter_string;
104 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
105
106 /* Kmem cache and mempool for iucv_tty_buffer elements */
107 static struct kmem_cache *hvc_iucv_buffer_cache;
108 static mempool_t *hvc_iucv_mempool;
109
110 /* IUCV handler callback functions */
111 static struct iucv_handler hvc_iucv_handler = {
112 .path_pending = hvc_iucv_path_pending,
113 .path_severed = hvc_iucv_path_severed,
114 .message_complete = hvc_iucv_msg_complete,
115 .message_pending = hvc_iucv_msg_pending,
116 };
117
118
119 /**
120 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
121 * @num: The HVC virtual terminal number (vtermno)
122 *
123 * This function returns the struct hvc_iucv_private instance that corresponds
124 * to the HVC virtual terminal number specified as parameter @num.
125 */
126 struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
127 {
128 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
129 return NULL;
130 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
131 }
132
133 /**
134 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
135 * @size: Size of the internal buffer used to store data.
136 * @flags: Memory allocation flags passed to mempool.
137 *
138 * This function allocates a new struct iucv_tty_buffer element and, optionally,
139 * allocates an internal data buffer with the specified size @size.
140 * Note: The total message size arises from the internal buffer size and the
141 * members of the iucv_tty_msg structure.
142 * The function returns NULL if memory allocation has failed.
143 */
144 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
145 {
146 struct iucv_tty_buffer *bufp;
147
148 bufp = mempool_alloc(hvc_iucv_mempool, flags);
149 if (!bufp)
150 return NULL;
151 memset(bufp, 0, sizeof(*bufp));
152
153 if (size > 0) {
154 bufp->msg.length = MSG_SIZE(size);
155 bufp->mbuf = kmalloc(bufp->msg.length, flags);
156 if (!bufp->mbuf) {
157 mempool_free(bufp, hvc_iucv_mempool);
158 return NULL;
159 }
160 bufp->mbuf->version = MSG_VERSION;
161 bufp->mbuf->type = MSG_TYPE_DATA;
162 bufp->mbuf->datalen = (u16) size;
163 }
164 return bufp;
165 }
166
167 /**
168 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
169 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
170 */
171 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
172 {
173 kfree(bufp->mbuf);
174 mempool_free(bufp, hvc_iucv_mempool);
175 }
176
177 /**
178 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
179 * @list: List containing struct iucv_tty_buffer elements.
180 */
181 static void destroy_tty_buffer_list(struct list_head *list)
182 {
183 struct iucv_tty_buffer *ent, *next;
184
185 list_for_each_entry_safe(ent, next, list, list) {
186 list_del(&ent->list);
187 destroy_tty_buffer(ent);
188 }
189 }
190
191 /**
192 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
193 * @priv: Pointer to struct hvc_iucv_private
194 * @buf: HVC buffer for writing received terminal data.
195 * @count: HVC buffer size.
196 * @has_more_data: Pointer to an int variable.
197 *
198 * The function picks up pending messages from the input queue and receives
199 * the message data that is then written to the specified buffer @buf.
200 * If the buffer size @count is less than the data message size, the
201 * message is kept on the input queue and @has_more_data is set to 1.
202 * If all message data has been written, the message is removed from
203 * the input queue.
204 *
205 * The function returns the number of bytes written to the terminal, zero if
206 * there are no pending data messages available or if there is no established
207 * IUCV path.
208 * If the IUCV path has been severed, then -EPIPE is returned to cause a
209 * hang up (that is issued by the HVC layer).
210 */
211 static int hvc_iucv_write(struct hvc_iucv_private *priv,
212 char *buf, int count, int *has_more_data)
213 {
214 struct iucv_tty_buffer *rb;
215 int written;
216 int rc;
217
218 /* immediately return if there is no IUCV connection */
219 if (priv->iucv_state == IUCV_DISCONN)
220 return 0;
221
222 /* if the IUCV path has been severed, return -EPIPE to inform the
223 * HVC layer to hang up the tty device. */
224 if (priv->iucv_state == IUCV_SEVERED)
225 return -EPIPE;
226
227 /* check if there are pending messages */
228 if (list_empty(&priv->tty_inqueue))
229 return 0;
230
231 /* receive an iucv message and flip data to the tty (ldisc) */
232 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
233
234 written = 0;
235 if (!rb->mbuf) { /* message not yet received ... */
236 /* allocate mem to store msg data; if no memory is available
237 * then leave the buffer on the list and re-try later */
238 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
239 if (!rb->mbuf)
240 return -ENOMEM;
241
242 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
243 rb->mbuf, rb->msg.length, NULL);
244 switch (rc) {
245 case 0: /* Successful */
246 break;
247 case 2: /* No message found */
248 case 9: /* Message purged */
249 break;
250 default:
251 written = -EIO;
252 }
253 /* remove buffer if an error has occured or received data
254 * is not correct */
255 if (rc || (rb->mbuf->version != MSG_VERSION) ||
256 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
257 goto out_remove_buffer;
258 }
259
260 switch (rb->mbuf->type) {
261 case MSG_TYPE_DATA:
262 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
263 memcpy(buf, rb->mbuf->data + rb->offset, written);
264 if (written < (rb->mbuf->datalen - rb->offset)) {
265 rb->offset += written;
266 *has_more_data = 1;
267 goto out_written;
268 }
269 break;
270
271 case MSG_TYPE_WINSIZE:
272 if (rb->mbuf->datalen != sizeof(struct winsize))
273 break;
274 hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
275 break;
276
277 case MSG_TYPE_ERROR: /* ignored ... */
278 case MSG_TYPE_TERMENV: /* ignored ... */
279 case MSG_TYPE_TERMIOS: /* ignored ... */
280 break;
281 }
282
283 out_remove_buffer:
284 list_del(&rb->list);
285 destroy_tty_buffer(rb);
286 *has_more_data = !list_empty(&priv->tty_inqueue);
287
288 out_written:
289 return written;
290 }
291
292 /**
293 * hvc_iucv_get_chars() - HVC get_chars operation.
294 * @vtermno: HVC virtual terminal number.
295 * @buf: Pointer to a buffer to store data
296 * @count: Size of buffer available for writing
297 *
298 * The HVC thread calls this method to read characters from the back-end.
299 * If an IUCV communication path has been established, pending IUCV messages
300 * are received and data is copied into buffer @buf up to @count bytes.
301 *
302 * Locking: The routine gets called under an irqsave() spinlock; and
303 * the routine locks the struct hvc_iucv_private->lock to call
304 * helper functions.
305 */
306 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
307 {
308 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
309 int written;
310 int has_more_data;
311
312 if (count <= 0)
313 return 0;
314
315 if (!priv)
316 return -ENODEV;
317
318 spin_lock(&priv->lock);
319 has_more_data = 0;
320 written = hvc_iucv_write(priv, buf, count, &has_more_data);
321 spin_unlock(&priv->lock);
322
323 /* if there are still messages on the queue... schedule another run */
324 if (has_more_data)
325 hvc_kick();
326
327 return written;
328 }
329
330 /**
331 * hvc_iucv_queue() - Buffer terminal data for sending.
332 * @priv: Pointer to struct hvc_iucv_private instance.
333 * @buf: Buffer containing data to send.
334 * @count: Size of buffer and amount of data to send.
335 *
336 * The function queues data for sending. To actually send the buffered data,
337 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
338 * The function returns the number of data bytes that has been buffered.
339 *
340 * If the device is not connected, data is ignored and the function returns
341 * @count.
342 * If the buffer is full, the function returns 0.
343 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
344 * (that can be passed to HVC layer to cause a tty hangup).
345 */
346 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
347 int count)
348 {
349 size_t len;
350
351 if (priv->iucv_state == IUCV_DISCONN)
352 return count; /* ignore data */
353
354 if (priv->iucv_state == IUCV_SEVERED)
355 return -EPIPE;
356
357 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
358 if (!len)
359 return 0;
360
361 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
362 priv->sndbuf_len += len;
363
364 if (priv->iucv_state == IUCV_CONNECTED)
365 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
366
367 return len;
368 }
369
370 /**
371 * hvc_iucv_send() - Send an IUCV message containing terminal data.
372 * @priv: Pointer to struct hvc_iucv_private instance.
373 *
374 * If an IUCV communication path has been established, the buffered output data
375 * is sent via an IUCV message and the number of bytes sent is returned.
376 * Returns 0 if there is no established IUCV communication path or
377 * -EPIPE if an existing IUCV communicaton path has been severed.
378 */
379 static int hvc_iucv_send(struct hvc_iucv_private *priv)
380 {
381 struct iucv_tty_buffer *sb;
382 int rc, len;
383
384 if (priv->iucv_state == IUCV_SEVERED)
385 return -EPIPE;
386
387 if (priv->iucv_state == IUCV_DISCONN)
388 return -EIO;
389
390 if (!priv->sndbuf_len)
391 return 0;
392
393 /* allocate internal buffer to store msg data and also compute total
394 * message length */
395 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
396 if (!sb)
397 return -ENOMEM;
398
399 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
400 sb->mbuf->datalen = (u16) priv->sndbuf_len;
401 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
402
403 list_add_tail(&sb->list, &priv->tty_outqueue);
404
405 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
406 (void *) sb->mbuf, sb->msg.length);
407 if (rc) {
408 /* drop the message here; however we might want to handle
409 * 0x03 (msg limit reached) by trying again... */
410 list_del(&sb->list);
411 destroy_tty_buffer(sb);
412 }
413 len = priv->sndbuf_len;
414 priv->sndbuf_len = 0;
415
416 return len;
417 }
418
419 /**
420 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
421 * @work: Work structure.
422 *
423 * This work queue function sends buffered output data over IUCV and,
424 * if not all buffered data could be sent, reschedules itself.
425 */
426 static void hvc_iucv_sndbuf_work(struct work_struct *work)
427 {
428 struct hvc_iucv_private *priv;
429
430 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
431 if (!priv)
432 return;
433
434 spin_lock_bh(&priv->lock);
435 hvc_iucv_send(priv);
436 spin_unlock_bh(&priv->lock);
437 }
438
439 /**
440 * hvc_iucv_put_chars() - HVC put_chars operation.
441 * @vtermno: HVC virtual terminal number.
442 * @buf: Pointer to an buffer to read data from
443 * @count: Size of buffer available for reading
444 *
445 * The HVC thread calls this method to write characters to the back-end.
446 * The function calls hvc_iucv_queue() to queue terminal data for sending.
447 *
448 * Locking: The method gets called under an irqsave() spinlock; and
449 * locks struct hvc_iucv_private->lock.
450 */
451 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
452 {
453 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
454 int queued;
455
456 if (count <= 0)
457 return 0;
458
459 if (!priv)
460 return -ENODEV;
461
462 spin_lock(&priv->lock);
463 queued = hvc_iucv_queue(priv, buf, count);
464 spin_unlock(&priv->lock);
465
466 return queued;
467 }
468
469 /**
470 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
471 * @hp: Pointer to the HVC device (struct hvc_struct)
472 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
473 * hvc_iucv_private instance.
474 *
475 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
476 * instance that is derived from @id. Always returns 0.
477 *
478 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
479 */
480 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
481 {
482 struct hvc_iucv_private *priv;
483
484 priv = hvc_iucv_get_private(id);
485 if (!priv)
486 return 0;
487
488 spin_lock_bh(&priv->lock);
489 priv->tty_state = TTY_OPENED;
490 spin_unlock_bh(&priv->lock);
491
492 return 0;
493 }
494
495 /**
496 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
497 * @priv: Pointer to the struct hvc_iucv_private instance.
498 */
499 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
500 {
501 destroy_tty_buffer_list(&priv->tty_outqueue);
502 destroy_tty_buffer_list(&priv->tty_inqueue);
503
504 priv->tty_state = TTY_CLOSED;
505 priv->iucv_state = IUCV_DISCONN;
506
507 priv->sndbuf_len = 0;
508 }
509
510 /**
511 * tty_outqueue_empty() - Test if the tty outq is empty
512 * @priv: Pointer to struct hvc_iucv_private instance.
513 */
514 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
515 {
516 int rc;
517
518 spin_lock_bh(&priv->lock);
519 rc = list_empty(&priv->tty_outqueue);
520 spin_unlock_bh(&priv->lock);
521
522 return rc;
523 }
524
525 /**
526 * flush_sndbuf_sync() - Flush send buffer and wait for completion
527 * @priv: Pointer to struct hvc_iucv_private instance.
528 *
529 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
530 * to flush any buffered terminal output data and waits for completion.
531 */
532 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
533 {
534 int sync_wait;
535
536 cancel_delayed_work_sync(&priv->sndbuf_work);
537
538 spin_lock_bh(&priv->lock);
539 hvc_iucv_send(priv); /* force sending buffered data */
540 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
541 spin_unlock_bh(&priv->lock);
542
543 if (sync_wait)
544 wait_event_timeout(priv->sndbuf_waitq,
545 tty_outqueue_empty(priv), HZ);
546 }
547
548 /**
549 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
550 * @hp: Pointer to the HVC device (struct hvc_struct)
551 * @id: Additional data (originally passed to hvc_alloc):
552 * the index of an struct hvc_iucv_private instance.
553 *
554 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
555 * virtual or otherwise) has occured.
556 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
557 * to keep an existing IUCV communication path established.
558 * (Background: vhangup() is called from user space (by getty or login) to
559 * disable writing to the tty by other applications).
560 * If the tty has been opened and an established IUCV path has been severed
561 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
562 *
563 * Locking: struct hvc_iucv_private->lock
564 */
565 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
566 {
567 struct hvc_iucv_private *priv;
568
569 priv = hvc_iucv_get_private(id);
570 if (!priv)
571 return;
572
573 flush_sndbuf_sync(priv);
574
575 spin_lock_bh(&priv->lock);
576 /* NOTE: If the hangup was scheduled by ourself (from the iucv
577 * path_servered callback [IUCV_SEVERED]), we have to clean up
578 * our structure and to set state to TTY_CLOSED.
579 * If the tty was hung up otherwise (e.g. vhangup()), then we
580 * ignore this hangup and keep an established IUCV path open...
581 * (...the reason is that we are not able to connect back to the
582 * client if we disconnect on hang up) */
583 priv->tty_state = TTY_CLOSED;
584
585 if (priv->iucv_state == IUCV_SEVERED)
586 hvc_iucv_cleanup(priv);
587 spin_unlock_bh(&priv->lock);
588 }
589
590 /**
591 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
592 * @hp: Pointer to the HVC device (struct hvc_struct)
593 * @id: Additional data (originally passed to hvc_alloc):
594 * the index of an struct hvc_iucv_private instance.
595 *
596 * This routine notifies the HVC back-end that the last tty device fd has been
597 * closed. The function calls hvc_iucv_cleanup() to clean up the struct
598 * hvc_iucv_private instance.
599 *
600 * Locking: struct hvc_iucv_private->lock
601 */
602 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
603 {
604 struct hvc_iucv_private *priv;
605 struct iucv_path *path;
606
607 priv = hvc_iucv_get_private(id);
608 if (!priv)
609 return;
610
611 flush_sndbuf_sync(priv);
612
613 spin_lock_bh(&priv->lock);
614 path = priv->path; /* save reference to IUCV path */
615 priv->path = NULL;
616 hvc_iucv_cleanup(priv);
617 spin_unlock_bh(&priv->lock);
618
619 /* sever IUCV path outside of priv->lock due to lock ordering of:
620 * priv->lock <--> iucv_table_lock */
621 if (path) {
622 iucv_path_sever(path, NULL);
623 iucv_path_free(path);
624 }
625 }
626
627 /**
628 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
629 * @ipvmid: Originating z/VM user ID (right padded with blanks)
630 *
631 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
632 * non-zero.
633 */
634 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
635 {
636 size_t i;
637
638 /* Note: default policy is ACCEPT if no filter is set */
639 if (!hvc_iucv_filter_size)
640 return 0;
641
642 for (i = 0; i < hvc_iucv_filter_size; i++)
643 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
644 return 0;
645 return 1;
646 }
647
648 /**
649 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
650 * @path: Pending path (struct iucv_path)
651 * @ipvmid: z/VM system identifier of originator
652 * @ipuser: User specified data for this path
653 * (AF_IUCV: port/service name and originator port)
654 *
655 * The function uses the @ipuser data to determine if the pending path belongs
656 * to a terminal managed by this device driver.
657 * If the path belongs to this driver, ensure that the terminal is not accessed
658 * multiple times (only one connection to a terminal is allowed).
659 * If the terminal is not yet connected, the pending path is accepted and is
660 * associated to the appropriate struct hvc_iucv_private instance.
661 *
662 * Returns 0 if @path belongs to a terminal managed by the this device driver;
663 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
664 *
665 * Locking: struct hvc_iucv_private->lock
666 */
667 static int hvc_iucv_path_pending(struct iucv_path *path,
668 u8 ipvmid[8], u8 ipuser[16])
669 {
670 struct hvc_iucv_private *priv;
671 u8 nuser_data[16];
672 u8 vm_user_id[9];
673 int i, rc;
674
675 priv = NULL;
676 for (i = 0; i < hvc_iucv_devices; i++)
677 if (hvc_iucv_table[i] &&
678 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
679 priv = hvc_iucv_table[i];
680 break;
681 }
682 if (!priv)
683 return -ENODEV;
684
685 /* Enforce that ipvmid is allowed to connect to us */
686 read_lock(&hvc_iucv_filter_lock);
687 rc = hvc_iucv_filter_connreq(ipvmid);
688 read_unlock(&hvc_iucv_filter_lock);
689 if (rc) {
690 iucv_path_sever(path, ipuser);
691 iucv_path_free(path);
692 memcpy(vm_user_id, ipvmid, 8);
693 vm_user_id[8] = 0;
694 pr_info("A connection request from z/VM user ID %s "
695 "was refused\n", vm_user_id);
696 return 0;
697 }
698
699 spin_lock(&priv->lock);
700
701 /* If the terminal is already connected or being severed, then sever
702 * this path to enforce that there is only ONE established communication
703 * path per terminal. */
704 if (priv->iucv_state != IUCV_DISCONN) {
705 iucv_path_sever(path, ipuser);
706 iucv_path_free(path);
707 goto out_path_handled;
708 }
709
710 /* accept path */
711 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
712 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
713 path->msglim = 0xffff; /* IUCV MSGLIMIT */
714 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
715 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
716 if (rc) {
717 iucv_path_sever(path, ipuser);
718 iucv_path_free(path);
719 goto out_path_handled;
720 }
721 priv->path = path;
722 priv->iucv_state = IUCV_CONNECTED;
723
724 /* flush buffered output data... */
725 schedule_delayed_work(&priv->sndbuf_work, 5);
726
727 out_path_handled:
728 spin_unlock(&priv->lock);
729 return 0;
730 }
731
732 /**
733 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
734 * @path: Pending path (struct iucv_path)
735 * @ipuser: User specified data for this path
736 * (AF_IUCV: port/service name and originator port)
737 *
738 * The function also severs the path (as required by the IUCV protocol) and
739 * sets the iucv state to IUCV_SEVERED for the associated struct
740 * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
741 * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
742 * If tty portion of the HVC is closed, clean up the outqueue.
743 *
744 * Locking: struct hvc_iucv_private->lock
745 */
746 static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
747 {
748 struct hvc_iucv_private *priv = path->private;
749
750 spin_lock(&priv->lock);
751 priv->iucv_state = IUCV_SEVERED;
752
753 /* If the tty has not yet been opened, clean up the hvc_iucv_private
754 * structure to allow re-connects.
755 * This is also done for our console device because console hangups
756 * are handled specially and no notifier is called by HVC.
757 * The tty session is active (TTY_OPEN) and ready for re-connects...
758 *
759 * If it has been opened, let get_chars() return -EPIPE to signal the
760 * HVC layer to hang up the tty.
761 * If so, we need to wake up the HVC thread to call get_chars()...
762 */
763 priv->path = NULL;
764 if (priv->tty_state == TTY_CLOSED)
765 hvc_iucv_cleanup(priv);
766 else
767 if (priv->is_console) {
768 hvc_iucv_cleanup(priv);
769 priv->tty_state = TTY_OPENED;
770 } else
771 hvc_kick();
772 spin_unlock(&priv->lock);
773
774 /* finally sever path (outside of priv->lock due to lock ordering) */
775 iucv_path_sever(path, ipuser);
776 iucv_path_free(path);
777 }
778
779 /**
780 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
781 * @path: Pending path (struct iucv_path)
782 * @msg: Pointer to the IUCV message
783 *
784 * The function puts an incoming message on the input queue for later
785 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
786 * If the tty has not yet been opened, the message is rejected.
787 *
788 * Locking: struct hvc_iucv_private->lock
789 */
790 static void hvc_iucv_msg_pending(struct iucv_path *path,
791 struct iucv_message *msg)
792 {
793 struct hvc_iucv_private *priv = path->private;
794 struct iucv_tty_buffer *rb;
795
796 /* reject messages that exceed max size of iucv_tty_msg->datalen */
797 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
798 iucv_message_reject(path, msg);
799 return;
800 }
801
802 spin_lock(&priv->lock);
803
804 /* reject messages if tty has not yet been opened */
805 if (priv->tty_state == TTY_CLOSED) {
806 iucv_message_reject(path, msg);
807 goto unlock_return;
808 }
809
810 /* allocate tty buffer to save iucv msg only */
811 rb = alloc_tty_buffer(0, GFP_ATOMIC);
812 if (!rb) {
813 iucv_message_reject(path, msg);
814 goto unlock_return; /* -ENOMEM */
815 }
816 rb->msg = *msg;
817
818 list_add_tail(&rb->list, &priv->tty_inqueue);
819
820 hvc_kick(); /* wake up hvc thread */
821
822 unlock_return:
823 spin_unlock(&priv->lock);
824 }
825
826 /**
827 * hvc_iucv_msg_complete() - IUCV handler to process message completion
828 * @path: Pending path (struct iucv_path)
829 * @msg: Pointer to the IUCV message
830 *
831 * The function is called upon completion of message delivery to remove the
832 * message from the outqueue. Additional delivery information can be found
833 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
834 * purged messages (0x010000 (IPADPGNR)).
835 *
836 * Locking: struct hvc_iucv_private->lock
837 */
838 static void hvc_iucv_msg_complete(struct iucv_path *path,
839 struct iucv_message *msg)
840 {
841 struct hvc_iucv_private *priv = path->private;
842 struct iucv_tty_buffer *ent, *next;
843 LIST_HEAD(list_remove);
844
845 spin_lock(&priv->lock);
846 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
847 if (ent->msg.id == msg->id) {
848 list_move(&ent->list, &list_remove);
849 break;
850 }
851 wake_up(&priv->sndbuf_waitq);
852 spin_unlock(&priv->lock);
853 destroy_tty_buffer_list(&list_remove);
854 }
855
856
857 /* HVC operations */
858 static struct hv_ops hvc_iucv_ops = {
859 .get_chars = hvc_iucv_get_chars,
860 .put_chars = hvc_iucv_put_chars,
861 .notifier_add = hvc_iucv_notifier_add,
862 .notifier_del = hvc_iucv_notifier_del,
863 .notifier_hangup = hvc_iucv_notifier_hangup,
864 };
865
866 /**
867 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
868 * @id: hvc_iucv_table index
869 * @is_console: Flag if the instance is used as Linux console
870 *
871 * This function allocates a new hvc_iucv_private structure and stores
872 * the instance in hvc_iucv_table at index @id.
873 * Returns 0 on success; otherwise non-zero.
874 */
875 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
876 {
877 struct hvc_iucv_private *priv;
878 char name[9];
879 int rc;
880
881 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
882 if (!priv)
883 return -ENOMEM;
884
885 spin_lock_init(&priv->lock);
886 INIT_LIST_HEAD(&priv->tty_outqueue);
887 INIT_LIST_HEAD(&priv->tty_inqueue);
888 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
889 init_waitqueue_head(&priv->sndbuf_waitq);
890
891 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
892 if (!priv->sndbuf) {
893 kfree(priv);
894 return -ENOMEM;
895 }
896
897 /* set console flag */
898 priv->is_console = is_console;
899
900 /* finally allocate hvc */
901 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
902 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
903 if (IS_ERR(priv->hvc)) {
904 rc = PTR_ERR(priv->hvc);
905 free_page((unsigned long) priv->sndbuf);
906 kfree(priv);
907 return rc;
908 }
909
910 /* notify HVC thread instead of using polling */
911 priv->hvc->irq_requested = 1;
912
913 /* setup iucv related information */
914 snprintf(name, 9, "lnxhvc%-2d", id);
915 memcpy(priv->srv_name, name, 8);
916 ASCEBC(priv->srv_name, 8);
917
918 hvc_iucv_table[id] = priv;
919 return 0;
920 }
921
922 /**
923 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
924 * @filter: String containing a comma-separated list of z/VM user IDs
925 */
926 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
927 {
928 const char *nextdelim, *residual;
929 size_t len;
930
931 nextdelim = strchr(filter, ',');
932 if (nextdelim) {
933 len = nextdelim - filter;
934 residual = nextdelim + 1;
935 } else {
936 len = strlen(filter);
937 residual = filter + len;
938 }
939
940 if (len == 0)
941 return ERR_PTR(-EINVAL);
942
943 /* check for '\n' (if called from sysfs) */
944 if (filter[len - 1] == '\n')
945 len--;
946
947 if (len > 8)
948 return ERR_PTR(-EINVAL);
949
950 /* pad with blanks and save upper case version of user ID */
951 memset(dest, ' ', 8);
952 while (len--)
953 dest[len] = toupper(filter[len]);
954 return residual;
955 }
956
957 /**
958 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
959 * @filter: String consisting of a comma-separated list of z/VM user IDs
960 *
961 * The function parses the @filter string and creates an array containing
962 * the list of z/VM user ID filter entries.
963 * Return code 0 means success, -EINVAL if the filter is syntactically
964 * incorrect, -ENOMEM if there was not enough memory to allocate the
965 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
966 */
967 static int hvc_iucv_setup_filter(const char *val)
968 {
969 const char *residual;
970 int err;
971 size_t size, count;
972 void *array, *old_filter;
973
974 count = strlen(val);
975 if (count == 0 || (count == 1 && val[0] == '\n')) {
976 size = 0;
977 array = NULL;
978 goto out_replace_filter; /* clear filter */
979 }
980
981 /* count user IDs in order to allocate sufficient memory */
982 size = 1;
983 residual = val;
984 while ((residual = strchr(residual, ',')) != NULL) {
985 residual++;
986 size++;
987 }
988
989 /* check if the specified list exceeds the filter limit */
990 if (size > MAX_VMID_FILTER)
991 return -ENOSPC;
992
993 array = kzalloc(size * 8, GFP_KERNEL);
994 if (!array)
995 return -ENOMEM;
996
997 count = size;
998 residual = val;
999 while (*residual && count) {
1000 residual = hvc_iucv_parse_filter(residual,
1001 array + ((size - count) * 8));
1002 if (IS_ERR(residual)) {
1003 err = PTR_ERR(residual);
1004 kfree(array);
1005 goto out_err;
1006 }
1007 count--;
1008 }
1009
1010 out_replace_filter:
1011 write_lock_bh(&hvc_iucv_filter_lock);
1012 old_filter = hvc_iucv_filter;
1013 hvc_iucv_filter_size = size;
1014 hvc_iucv_filter = array;
1015 write_unlock_bh(&hvc_iucv_filter_lock);
1016 kfree(old_filter);
1017
1018 err = 0;
1019 out_err:
1020 return err;
1021 }
1022
1023 /**
1024 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1025 * @val: String consisting of a comma-separated list of z/VM user IDs
1026 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1027 *
1028 * The function sets up the z/VM user ID filter specified as comma-separated
1029 * list of user IDs in @val.
1030 * Note: If it is called early in the boot process, @val is stored and
1031 * parsed later in hvc_iucv_init().
1032 */
1033 static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
1034 {
1035 int rc;
1036
1037 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1038 return -ENODEV;
1039
1040 if (!val)
1041 return -EINVAL;
1042
1043 rc = 0;
1044 if (slab_is_available())
1045 rc = hvc_iucv_setup_filter(val);
1046 else
1047 hvc_iucv_filter_string = val; /* defer... */
1048 return rc;
1049 }
1050
1051 /**
1052 * param_get_vmidfilter() - Get z/VM user ID filter
1053 * @buffer: Buffer to store z/VM user ID filter,
1054 * (buffer size assumption PAGE_SIZE)
1055 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1056 *
1057 * The function stores the filter as a comma-separated list of z/VM user IDs
1058 * in @buffer. Typically, sysfs routines call this function for attr show.
1059 */
1060 static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
1061 {
1062 int rc;
1063 size_t index, len;
1064 void *start, *end;
1065
1066 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1067 return -ENODEV;
1068
1069 rc = 0;
1070 read_lock_bh(&hvc_iucv_filter_lock);
1071 for (index = 0; index < hvc_iucv_filter_size; index++) {
1072 start = hvc_iucv_filter + (8 * index);
1073 end = memchr(start, ' ', 8);
1074 len = (end) ? end - start : 8;
1075 memcpy(buffer + rc, start, len);
1076 rc += len;
1077 buffer[rc++] = ',';
1078 }
1079 read_unlock_bh(&hvc_iucv_filter_lock);
1080 if (rc)
1081 buffer[--rc] = '\0'; /* replace last comma and update rc */
1082 return rc;
1083 }
1084
1085 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1086
1087 /**
1088 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1089 */
1090 static int __init hvc_iucv_init(void)
1091 {
1092 int rc;
1093 unsigned int i;
1094
1095 if (!hvc_iucv_devices)
1096 return -ENODEV;
1097
1098 if (!MACHINE_IS_VM) {
1099 pr_notice("The z/VM IUCV HVC device driver cannot "
1100 "be used without z/VM\n");
1101 rc = -ENODEV;
1102 goto out_error;
1103 }
1104
1105 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1106 pr_err("%lu is not a valid value for the hvc_iucv= "
1107 "kernel parameter\n", hvc_iucv_devices);
1108 rc = -EINVAL;
1109 goto out_error;
1110 }
1111
1112 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1113 if (hvc_iucv_filter_string) {
1114 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1115 switch (rc) {
1116 case 0:
1117 break;
1118 case -ENOMEM:
1119 pr_err("Allocating memory failed with "
1120 "reason code=%d\n", 3);
1121 goto out_error;
1122 case -EINVAL:
1123 pr_err("hvc_iucv_allow= does not specify a valid "
1124 "z/VM user ID list\n");
1125 goto out_error;
1126 case -ENOSPC:
1127 pr_err("hvc_iucv_allow= specifies too many "
1128 "z/VM user IDs\n");
1129 goto out_error;
1130 default:
1131 goto out_error;
1132 }
1133 }
1134
1135 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1136 sizeof(struct iucv_tty_buffer),
1137 0, 0, NULL);
1138 if (!hvc_iucv_buffer_cache) {
1139 pr_err("Allocating memory failed with reason code=%d\n", 1);
1140 rc = -ENOMEM;
1141 goto out_error;
1142 }
1143
1144 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1145 hvc_iucv_buffer_cache);
1146 if (!hvc_iucv_mempool) {
1147 pr_err("Allocating memory failed with reason code=%d\n", 2);
1148 kmem_cache_destroy(hvc_iucv_buffer_cache);
1149 rc = -ENOMEM;
1150 goto out_error;
1151 }
1152
1153 /* register the first terminal device as console
1154 * (must be done before allocating hvc terminal devices) */
1155 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1156 if (rc) {
1157 pr_err("Registering HVC terminal device as "
1158 "Linux console failed\n");
1159 goto out_error_memory;
1160 }
1161
1162 /* allocate hvc_iucv_private structs */
1163 for (i = 0; i < hvc_iucv_devices; i++) {
1164 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1165 if (rc) {
1166 pr_err("Creating a new HVC terminal device "
1167 "failed with error code=%d\n", rc);
1168 goto out_error_hvc;
1169 }
1170 }
1171
1172 /* register IUCV callback handler */
1173 rc = iucv_register(&hvc_iucv_handler, 0);
1174 if (rc) {
1175 pr_err("Registering IUCV handlers failed with error code=%d\n",
1176 rc);
1177 goto out_error_iucv;
1178 }
1179
1180 return 0;
1181
1182 out_error_iucv:
1183 iucv_unregister(&hvc_iucv_handler, 0);
1184 out_error_hvc:
1185 for (i = 0; i < hvc_iucv_devices; i++)
1186 if (hvc_iucv_table[i]) {
1187 if (hvc_iucv_table[i]->hvc)
1188 hvc_remove(hvc_iucv_table[i]->hvc);
1189 kfree(hvc_iucv_table[i]);
1190 }
1191 out_error_memory:
1192 mempool_destroy(hvc_iucv_mempool);
1193 kmem_cache_destroy(hvc_iucv_buffer_cache);
1194 out_error:
1195 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1196 return rc;
1197 }
1198
1199 /**
1200 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1201 * @val: Parameter value (numeric)
1202 */
1203 static int __init hvc_iucv_config(char *val)
1204 {
1205 return strict_strtoul(val, 10, &hvc_iucv_devices);
1206 }
1207
1208
1209 device_initcall(hvc_iucv_init);
1210 __setup("hvc_iucv=", hvc_iucv_config);
1211 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
This page took 0.082485 seconds and 6 git commands to generate.