Merge remote-tracking branch 'asoc/topic/max98088' into asoc-next
[deliverable/linux.git] / drivers / tty / hvc / hvc_iucv.c
CommitLineData
44a01d5b 1/*
17e19f04 2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
44a01d5b 3 *
17e19f04 4 * This HVC device driver provides terminal access using
44a01d5b
HB
5 * z/VM IUCV communication paths.
6 *
0259162e 7 * Copyright IBM Corp. 2008, 2009
44a01d5b
HB
8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11#define KMSG_COMPONENT "hvc_iucv"
17e19f04 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44a01d5b
HB
13
14#include <linux/types.h>
5a0e3ad6 15#include <linux/slab.h>
44a01d5b 16#include <asm/ebcdic.h>
431429ff 17#include <linux/ctype.h>
c45ce4b5 18#include <linux/delay.h>
0259162e 19#include <linux/device.h>
68c6b3d2 20#include <linux/init.h>
44a01d5b 21#include <linux/mempool.h>
431429ff 22#include <linux/moduleparam.h>
44a01d5b 23#include <linux/tty.h>
c45ce4b5 24#include <linux/wait.h>
44a01d5b
HB
25#include <net/iucv/iucv.h>
26
27#include "hvc_console.h"
28
29
17e19f04 30/* General device driver settings */
44a01d5b
HB
31#define HVC_IUCV_MAGIC 0xc9e4c3e5
32#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
33#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34
35/* IUCV TTY message */
36#define MSG_VERSION 0x02 /* Message version */
37#define MSG_TYPE_ERROR 0x01 /* Error message */
38#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
39#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
40#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
41#define MSG_TYPE_DATA 0x10 /* Terminal data */
42
44a01d5b
HB
43struct iucv_tty_msg {
44 u8 version; /* Message version */
45 u8 type; /* Message type */
c45ce4b5 46#define MSG_MAX_DATALEN ((u16)(~0))
44a01d5b
HB
47 u16 datalen; /* Payload length */
48 u8 data[]; /* Payload buffer */
49} __attribute__((packed));
17e19f04 50#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
44a01d5b
HB
51
52enum iucv_state_t {
53 IUCV_DISCONN = 0,
54 IUCV_CONNECTED = 1,
55 IUCV_SEVERED = 2,
56};
57
58enum tty_state_t {
59 TTY_CLOSED = 0,
60 TTY_OPENED = 1,
61};
62
63struct hvc_iucv_private {
17e19f04 64 struct hvc_struct *hvc; /* HVC struct reference */
44a01d5b 65 u8 srv_name[8]; /* IUCV service name (ebcdic) */
6c089fd3 66 unsigned char is_console; /* Linux console usage flag */
44a01d5b
HB
67 enum iucv_state_t iucv_state; /* IUCV connection status */
68 enum tty_state_t tty_state; /* TTY status */
69 struct iucv_path *path; /* IUCV path pointer */
70 spinlock_t lock; /* hvc_iucv_private lock */
c45ce4b5
HB
71#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
72 void *sndbuf; /* send buffer */
73 size_t sndbuf_len; /* length of send buffer */
74#define QUEUE_SNDBUF_DELAY (HZ / 25)
75 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
76 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
44a01d5b
HB
77 struct list_head tty_outqueue; /* outgoing IUCV messages */
78 struct list_head tty_inqueue; /* incoming IUCV messages */
0259162e 79 struct device *dev; /* device structure */
44a01d5b
HB
80};
81
82struct iucv_tty_buffer {
83 struct list_head list; /* list pointer */
17e19f04 84 struct iucv_message msg; /* store an IUCV message */
44a01d5b
HB
85 size_t offset; /* data buffer offset */
86 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
87};
88
89/* IUCV callback handler */
90static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
91static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
92static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
93static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
94
95
2dc184c0
HB
96/* Kernel module parameter: use one terminal device as default */
97static unsigned long hvc_iucv_devices = 1;
44a01d5b
HB
98
99/* Array of allocated hvc iucv tty lines... */
100static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
6c089fd3 101#define IUCV_HVC_CON_IDX (0)
431429ff
HB
102/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
103#define MAX_VMID_FILTER (500)
104static size_t hvc_iucv_filter_size;
105static void *hvc_iucv_filter;
106static const char *hvc_iucv_filter_string;
107static DEFINE_RWLOCK(hvc_iucv_filter_lock);
44a01d5b
HB
108
109/* Kmem cache and mempool for iucv_tty_buffer elements */
110static struct kmem_cache *hvc_iucv_buffer_cache;
111static mempool_t *hvc_iucv_mempool;
112
113/* IUCV handler callback functions */
114static struct iucv_handler hvc_iucv_handler = {
115 .path_pending = hvc_iucv_path_pending,
116 .path_severed = hvc_iucv_path_severed,
117 .message_complete = hvc_iucv_msg_complete,
118 .message_pending = hvc_iucv_msg_pending,
119};
120
121
122/**
123 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
124 * @num: The HVC virtual terminal number (vtermno)
125 *
126 * This function returns the struct hvc_iucv_private instance that corresponds
127 * to the HVC virtual terminal number specified as parameter @num.
128 */
129struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
130{
131 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
132 return NULL;
133 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
134}
135
136/**
17e19f04 137 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
44a01d5b
HB
138 * @size: Size of the internal buffer used to store data.
139 * @flags: Memory allocation flags passed to mempool.
140 *
141 * This function allocates a new struct iucv_tty_buffer element and, optionally,
142 * allocates an internal data buffer with the specified size @size.
91a970d9
HB
143 * The internal data buffer is always allocated with GFP_DMA which is
144 * required for receiving and sending data with IUCV.
44a01d5b
HB
145 * Note: The total message size arises from the internal buffer size and the
146 * members of the iucv_tty_msg structure.
44a01d5b
HB
147 * The function returns NULL if memory allocation has failed.
148 */
149static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
150{
151 struct iucv_tty_buffer *bufp;
152
153 bufp = mempool_alloc(hvc_iucv_mempool, flags);
154 if (!bufp)
155 return NULL;
6c089fd3 156 memset(bufp, 0, sizeof(*bufp));
44a01d5b
HB
157
158 if (size > 0) {
159 bufp->msg.length = MSG_SIZE(size);
91a970d9 160 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
44a01d5b
HB
161 if (!bufp->mbuf) {
162 mempool_free(bufp, hvc_iucv_mempool);
163 return NULL;
164 }
165 bufp->mbuf->version = MSG_VERSION;
166 bufp->mbuf->type = MSG_TYPE_DATA;
167 bufp->mbuf->datalen = (u16) size;
168 }
169 return bufp;
170}
171
172/**
173 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
174 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
44a01d5b
HB
175 */
176static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
177{
178 kfree(bufp->mbuf);
179 mempool_free(bufp, hvc_iucv_mempool);
180}
181
182/**
183 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
17e19f04 184 * @list: List containing struct iucv_tty_buffer elements.
44a01d5b
HB
185 */
186static void destroy_tty_buffer_list(struct list_head *list)
187{
188 struct iucv_tty_buffer *ent, *next;
189
190 list_for_each_entry_safe(ent, next, list, list) {
191 list_del(&ent->list);
192 destroy_tty_buffer(ent);
193 }
194}
195
196/**
17e19f04
HB
197 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
198 * @priv: Pointer to struct hvc_iucv_private
199 * @buf: HVC buffer for writing received terminal data.
200 * @count: HVC buffer size.
44a01d5b
HB
201 * @has_more_data: Pointer to an int variable.
202 *
203 * The function picks up pending messages from the input queue and receives
204 * the message data that is then written to the specified buffer @buf.
17e19f04 205 * If the buffer size @count is less than the data message size, the
44a01d5b 206 * message is kept on the input queue and @has_more_data is set to 1.
17e19f04 207 * If all message data has been written, the message is removed from
44a01d5b
HB
208 * the input queue.
209 *
210 * The function returns the number of bytes written to the terminal, zero if
211 * there are no pending data messages available or if there is no established
212 * IUCV path.
213 * If the IUCV path has been severed, then -EPIPE is returned to cause a
17e19f04 214 * hang up (that is issued by the HVC layer).
44a01d5b
HB
215 */
216static int hvc_iucv_write(struct hvc_iucv_private *priv,
217 char *buf, int count, int *has_more_data)
218{
219 struct iucv_tty_buffer *rb;
220 int written;
221 int rc;
222
17e19f04 223 /* immediately return if there is no IUCV connection */
44a01d5b
HB
224 if (priv->iucv_state == IUCV_DISCONN)
225 return 0;
226
17e19f04
HB
227 /* if the IUCV path has been severed, return -EPIPE to inform the
228 * HVC layer to hang up the tty device. */
44a01d5b
HB
229 if (priv->iucv_state == IUCV_SEVERED)
230 return -EPIPE;
231
232 /* check if there are pending messages */
233 if (list_empty(&priv->tty_inqueue))
234 return 0;
235
17e19f04 236 /* receive an iucv message and flip data to the tty (ldisc) */
44a01d5b
HB
237 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
238
239 written = 0;
240 if (!rb->mbuf) { /* message not yet received ... */
241 /* allocate mem to store msg data; if no memory is available
242 * then leave the buffer on the list and re-try later */
91a970d9 243 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
44a01d5b
HB
244 if (!rb->mbuf)
245 return -ENOMEM;
246
247 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
248 rb->mbuf, rb->msg.length, NULL);
249 switch (rc) {
250 case 0: /* Successful */
251 break;
252 case 2: /* No message found */
253 case 9: /* Message purged */
254 break;
255 default:
256 written = -EIO;
257 }
25985edc 258 /* remove buffer if an error has occurred or received data
44a01d5b
HB
259 * is not correct */
260 if (rc || (rb->mbuf->version != MSG_VERSION) ||
261 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
262 goto out_remove_buffer;
263 }
264
265 switch (rb->mbuf->type) {
266 case MSG_TYPE_DATA:
267 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
268 memcpy(buf, rb->mbuf->data + rb->offset, written);
269 if (written < (rb->mbuf->datalen - rb->offset)) {
270 rb->offset += written;
271 *has_more_data = 1;
272 goto out_written;
273 }
274 break;
275
276 case MSG_TYPE_WINSIZE:
277 if (rb->mbuf->datalen != sizeof(struct winsize))
278 break;
254be490
HB
279 /* The caller must ensure that the hvc is locked, which
280 * is the case when called from hvc_iucv_get_chars() */
281 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
44a01d5b
HB
282 break;
283
284 case MSG_TYPE_ERROR: /* ignored ... */
285 case MSG_TYPE_TERMENV: /* ignored ... */
286 case MSG_TYPE_TERMIOS: /* ignored ... */
287 break;
288 }
289
290out_remove_buffer:
291 list_del(&rb->list);
292 destroy_tty_buffer(rb);
293 *has_more_data = !list_empty(&priv->tty_inqueue);
294
295out_written:
296 return written;
297}
298
299/**
300 * hvc_iucv_get_chars() - HVC get_chars operation.
301 * @vtermno: HVC virtual terminal number.
302 * @buf: Pointer to a buffer to store data
303 * @count: Size of buffer available for writing
304 *
17e19f04
HB
305 * The HVC thread calls this method to read characters from the back-end.
306 * If an IUCV communication path has been established, pending IUCV messages
307 * are received and data is copied into buffer @buf up to @count bytes.
44a01d5b
HB
308 *
309 * Locking: The routine gets called under an irqsave() spinlock; and
310 * the routine locks the struct hvc_iucv_private->lock to call
311 * helper functions.
312 */
313static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
314{
315 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
316 int written;
317 int has_more_data;
318
319 if (count <= 0)
320 return 0;
321
322 if (!priv)
323 return -ENODEV;
324
325 spin_lock(&priv->lock);
326 has_more_data = 0;
327 written = hvc_iucv_write(priv, buf, count, &has_more_data);
328 spin_unlock(&priv->lock);
329
330 /* if there are still messages on the queue... schedule another run */
331 if (has_more_data)
332 hvc_kick();
333
334 return written;
335}
336
337/**
c45ce4b5 338 * hvc_iucv_queue() - Buffer terminal data for sending.
44a01d5b
HB
339 * @priv: Pointer to struct hvc_iucv_private instance.
340 * @buf: Buffer containing data to send.
c45ce4b5 341 * @count: Size of buffer and amount of data to send.
44a01d5b 342 *
c45ce4b5 343 * The function queues data for sending. To actually send the buffered data,
17e19f04 344 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
c45ce4b5 345 * The function returns the number of data bytes that has been buffered.
44a01d5b 346 *
c45ce4b5
HB
347 * If the device is not connected, data is ignored and the function returns
348 * @count.
349 * If the buffer is full, the function returns 0.
17e19f04
HB
350 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
351 * (that can be passed to HVC layer to cause a tty hangup).
44a01d5b 352 */
c45ce4b5 353static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
17e19f04 354 int count)
c45ce4b5
HB
355{
356 size_t len;
357
358 if (priv->iucv_state == IUCV_DISCONN)
359 return count; /* ignore data */
360
361 if (priv->iucv_state == IUCV_SEVERED)
362 return -EPIPE;
363
364 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
365 if (!len)
366 return 0;
367
368 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
369 priv->sndbuf_len += len;
370
371 if (priv->iucv_state == IUCV_CONNECTED)
372 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
373
374 return len;
375}
376
377/**
378 * hvc_iucv_send() - Send an IUCV message containing terminal data.
379 * @priv: Pointer to struct hvc_iucv_private instance.
380 *
17e19f04
HB
381 * If an IUCV communication path has been established, the buffered output data
382 * is sent via an IUCV message and the number of bytes sent is returned.
383 * Returns 0 if there is no established IUCV communication path or
384 * -EPIPE if an existing IUCV communicaton path has been severed.
c45ce4b5
HB
385 */
386static int hvc_iucv_send(struct hvc_iucv_private *priv)
44a01d5b
HB
387{
388 struct iucv_tty_buffer *sb;
c45ce4b5 389 int rc, len;
44a01d5b
HB
390
391 if (priv->iucv_state == IUCV_SEVERED)
392 return -EPIPE;
393
394 if (priv->iucv_state == IUCV_DISCONN)
c45ce4b5 395 return -EIO;
44a01d5b 396
c45ce4b5
HB
397 if (!priv->sndbuf_len)
398 return 0;
44a01d5b
HB
399
400 /* allocate internal buffer to store msg data and also compute total
401 * message length */
c45ce4b5 402 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
44a01d5b
HB
403 if (!sb)
404 return -ENOMEM;
405
c45ce4b5
HB
406 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
407 sb->mbuf->datalen = (u16) priv->sndbuf_len;
408 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
44a01d5b
HB
409
410 list_add_tail(&sb->list, &priv->tty_outqueue);
411
412 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
413 (void *) sb->mbuf, sb->msg.length);
414 if (rc) {
c45ce4b5
HB
415 /* drop the message here; however we might want to handle
416 * 0x03 (msg limit reached) by trying again... */
44a01d5b
HB
417 list_del(&sb->list);
418 destroy_tty_buffer(sb);
44a01d5b 419 }
c45ce4b5
HB
420 len = priv->sndbuf_len;
421 priv->sndbuf_len = 0;
44a01d5b
HB
422
423 return len;
424}
425
c45ce4b5
HB
426/**
427 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
428 * @work: Work structure.
429 *
17e19f04
HB
430 * This work queue function sends buffered output data over IUCV and,
431 * if not all buffered data could be sent, reschedules itself.
c45ce4b5
HB
432 */
433static void hvc_iucv_sndbuf_work(struct work_struct *work)
434{
435 struct hvc_iucv_private *priv;
436
437 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
c45ce4b5
HB
438 if (!priv)
439 return;
440
441 spin_lock_bh(&priv->lock);
442 hvc_iucv_send(priv);
443 spin_unlock_bh(&priv->lock);
444}
445
44a01d5b
HB
446/**
447 * hvc_iucv_put_chars() - HVC put_chars operation.
448 * @vtermno: HVC virtual terminal number.
449 * @buf: Pointer to an buffer to read data from
450 * @count: Size of buffer available for reading
451 *
17e19f04
HB
452 * The HVC thread calls this method to write characters to the back-end.
453 * The function calls hvc_iucv_queue() to queue terminal data for sending.
44a01d5b
HB
454 *
455 * Locking: The method gets called under an irqsave() spinlock; and
456 * locks struct hvc_iucv_private->lock.
457 */
458static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
459{
460 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
c45ce4b5 461 int queued;
44a01d5b
HB
462
463 if (count <= 0)
464 return 0;
465
466 if (!priv)
467 return -ENODEV;
468
469 spin_lock(&priv->lock);
c45ce4b5 470 queued = hvc_iucv_queue(priv, buf, count);
44a01d5b
HB
471 spin_unlock(&priv->lock);
472
c45ce4b5 473 return queued;
44a01d5b
HB
474}
475
476/**
477 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
478 * @hp: Pointer to the HVC device (struct hvc_struct)
479 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
480 * hvc_iucv_private instance.
481 *
6c089fd3 482 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
44a01d5b
HB
483 * instance that is derived from @id. Always returns 0.
484 *
485 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
486 */
487static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
488{
489 struct hvc_iucv_private *priv;
490
491 priv = hvc_iucv_get_private(id);
492 if (!priv)
493 return 0;
494
495 spin_lock_bh(&priv->lock);
496 priv->tty_state = TTY_OPENED;
497 spin_unlock_bh(&priv->lock);
498
499 return 0;
500}
501
502/**
17e19f04 503 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
44a01d5b 504 * @priv: Pointer to the struct hvc_iucv_private instance.
44a01d5b
HB
505 */
506static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
507{
508 destroy_tty_buffer_list(&priv->tty_outqueue);
509 destroy_tty_buffer_list(&priv->tty_inqueue);
510
511 priv->tty_state = TTY_CLOSED;
512 priv->iucv_state = IUCV_DISCONN;
c45ce4b5
HB
513
514 priv->sndbuf_len = 0;
515}
516
517/**
518 * tty_outqueue_empty() - Test if the tty outq is empty
519 * @priv: Pointer to struct hvc_iucv_private instance.
520 */
521static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
522{
523 int rc;
524
525 spin_lock_bh(&priv->lock);
526 rc = list_empty(&priv->tty_outqueue);
527 spin_unlock_bh(&priv->lock);
528
529 return rc;
530}
531
532/**
533 * flush_sndbuf_sync() - Flush send buffer and wait for completion
534 * @priv: Pointer to struct hvc_iucv_private instance.
535 *
536 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
537 * to flush any buffered terminal output data and waits for completion.
538 */
539static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
540{
541 int sync_wait;
542
543 cancel_delayed_work_sync(&priv->sndbuf_work);
544
545 spin_lock_bh(&priv->lock);
546 hvc_iucv_send(priv); /* force sending buffered data */
547 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
548 spin_unlock_bh(&priv->lock);
549
550 if (sync_wait)
551 wait_event_timeout(priv->sndbuf_waitq,
0259162e
HB
552 tty_outqueue_empty(priv), HZ/10);
553}
554
555/**
556 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
557 * @priv: Pointer to hvc_iucv_private structure
558 *
559 * This routine severs an existing IUCV communication path and hangs
560 * up the underlying HVC terminal device.
561 * The hang-up occurs only if an IUCV communication path is established;
562 * otherwise there is no need to hang up the terminal device.
563 *
564 * The IUCV HVC hang-up is separated into two steps:
565 * 1. After the IUCV path has been severed, the iucv_state is set to
566 * IUCV_SEVERED.
567 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
568 * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
569 *
570 * If the tty has not yet been opened, clean up the hvc_iucv_private
571 * structure to allow re-connects.
572 * If the tty has been opened, let get_chars() return -EPIPE to signal
573 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
574 * to call get_chars()...
575 *
576 * Special notes on hanging up a HVC terminal instantiated as console:
577 * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
578 * 2. do_tty_hangup() calls tty->ops->close() for console_filp
579 * => no hangup notifier is called by HVC (default)
580 * 2. hvc_close() returns because of tty_hung_up_p(filp)
581 * => no delete notifier is called!
582 * Finally, the back-end is not being notified, thus, the tty session is
583 * kept active (TTY_OPEN) to be ready for re-connects.
584 *
585 * Locking: spin_lock(&priv->lock) w/o disabling bh
586 */
587static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
588{
589 struct iucv_path *path;
590
591 path = NULL;
592 spin_lock(&priv->lock);
593 if (priv->iucv_state == IUCV_CONNECTED) {
594 path = priv->path;
595 priv->path = NULL;
596 priv->iucv_state = IUCV_SEVERED;
597 if (priv->tty_state == TTY_CLOSED)
598 hvc_iucv_cleanup(priv);
599 else
600 /* console is special (see above) */
601 if (priv->is_console) {
602 hvc_iucv_cleanup(priv);
603 priv->tty_state = TTY_OPENED;
604 } else
605 hvc_kick();
606 }
607 spin_unlock(&priv->lock);
608
609 /* finally sever path (outside of priv->lock due to lock ordering) */
610 if (path) {
611 iucv_path_sever(path, NULL);
612 iucv_path_free(path);
613 }
44a01d5b
HB
614}
615
616/**
17e19f04
HB
617 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
618 * @hp: Pointer to the HVC device (struct hvc_struct)
619 * @id: Additional data (originally passed to hvc_alloc):
620 * the index of an struct hvc_iucv_private instance.
44a01d5b 621 *
17e19f04 622 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
25985edc 623 * virtual or otherwise) has occurred.
17e19f04
HB
624 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
625 * to keep an existing IUCV communication path established.
44a01d5b
HB
626 * (Background: vhangup() is called from user space (by getty or login) to
627 * disable writing to the tty by other applications).
17e19f04
HB
628 * If the tty has been opened and an established IUCV path has been severed
629 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
44a01d5b
HB
630 *
631 * Locking: struct hvc_iucv_private->lock
632 */
633static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
634{
635 struct hvc_iucv_private *priv;
636
637 priv = hvc_iucv_get_private(id);
638 if (!priv)
639 return;
640
c45ce4b5
HB
641 flush_sndbuf_sync(priv);
642
44a01d5b
HB
643 spin_lock_bh(&priv->lock);
644 /* NOTE: If the hangup was scheduled by ourself (from the iucv
17e19f04
HB
645 * path_servered callback [IUCV_SEVERED]), we have to clean up
646 * our structure and to set state to TTY_CLOSED.
44a01d5b
HB
647 * If the tty was hung up otherwise (e.g. vhangup()), then we
648 * ignore this hangup and keep an established IUCV path open...
649 * (...the reason is that we are not able to connect back to the
650 * client if we disconnect on hang up) */
651 priv->tty_state = TTY_CLOSED;
652
653 if (priv->iucv_state == IUCV_SEVERED)
654 hvc_iucv_cleanup(priv);
655 spin_unlock_bh(&priv->lock);
656}
657
74b3b4cd
HB
658/**
659 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
660 * @hp: Pointer the HVC device (struct hvc_struct)
661 * @raise: Non-zero to raise or zero to lower DTR/RTS lines
662 *
663 * This routine notifies the HVC back-end to raise or lower DTR/RTS
664 * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
665 * drop the IUCV connection (similar to hang up the modem).
666 */
667static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
668{
669 struct hvc_iucv_private *priv;
670 struct iucv_path *path;
671
672 /* Raising the DTR/RTS is ignored as IUCV connections can be
673 * established at any times.
674 */
675 if (raise)
676 return;
677
678 priv = hvc_iucv_get_private(hp->vtermno);
679 if (!priv)
680 return;
681
682 /* Lowering the DTR/RTS lines disconnects an established IUCV
683 * connection.
684 */
685 flush_sndbuf_sync(priv);
686
687 spin_lock_bh(&priv->lock);
688 path = priv->path; /* save reference to IUCV path */
689 priv->path = NULL;
690 priv->iucv_state = IUCV_DISCONN;
691 spin_unlock_bh(&priv->lock);
692
693 /* Sever IUCV path outside of priv->lock due to lock ordering of:
694 * priv->lock <--> iucv_table_lock */
695 if (path) {
696 iucv_path_sever(path, NULL);
697 iucv_path_free(path);
698 }
699}
700
44a01d5b
HB
701/**
702 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
703 * @hp: Pointer to the HVC device (struct hvc_struct)
704 * @id: Additional data (originally passed to hvc_alloc):
705 * the index of an struct hvc_iucv_private instance.
706 *
17e19f04 707 * This routine notifies the HVC back-end that the last tty device fd has been
74b3b4cd
HB
708 * closed. The function cleans up tty resources. The clean-up of the IUCV
709 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
710 * control setting.
44a01d5b
HB
711 *
712 * Locking: struct hvc_iucv_private->lock
713 */
714static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
715{
716 struct hvc_iucv_private *priv;
44a01d5b
HB
717
718 priv = hvc_iucv_get_private(id);
719 if (!priv)
720 return;
721
c45ce4b5
HB
722 flush_sndbuf_sync(priv);
723
44a01d5b 724 spin_lock_bh(&priv->lock);
74b3b4cd
HB
725 destroy_tty_buffer_list(&priv->tty_outqueue);
726 destroy_tty_buffer_list(&priv->tty_inqueue);
727 priv->tty_state = TTY_CLOSED;
728 priv->sndbuf_len = 0;
44a01d5b 729 spin_unlock_bh(&priv->lock);
44a01d5b
HB
730}
731
431429ff
HB
732/**
733 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
734 * @ipvmid: Originating z/VM user ID (right padded with blanks)
735 *
736 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
737 * non-zero.
738 */
739static int hvc_iucv_filter_connreq(u8 ipvmid[8])
740{
741 size_t i;
742
743 /* Note: default policy is ACCEPT if no filter is set */
744 if (!hvc_iucv_filter_size)
745 return 0;
746
747 for (i = 0; i < hvc_iucv_filter_size; i++)
748 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
749 return 0;
750 return 1;
751}
752
44a01d5b
HB
753/**
754 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
755 * @path: Pending path (struct iucv_path)
17e19f04 756 * @ipvmid: z/VM system identifier of originator
44a01d5b
HB
757 * @ipuser: User specified data for this path
758 * (AF_IUCV: port/service name and originator port)
759 *
17e19f04
HB
760 * The function uses the @ipuser data to determine if the pending path belongs
761 * to a terminal managed by this device driver.
762 * If the path belongs to this driver, ensure that the terminal is not accessed
763 * multiple times (only one connection to a terminal is allowed).
764 * If the terminal is not yet connected, the pending path is accepted and is
765 * associated to the appropriate struct hvc_iucv_private instance.
44a01d5b 766 *
17e19f04 767 * Returns 0 if @path belongs to a terminal managed by the this device driver;
44a01d5b
HB
768 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
769 *
770 * Locking: struct hvc_iucv_private->lock
771 */
772static int hvc_iucv_path_pending(struct iucv_path *path,
773 u8 ipvmid[8], u8 ipuser[16])
774{
775 struct hvc_iucv_private *priv;
776 u8 nuser_data[16];
431429ff 777 u8 vm_user_id[9];
44a01d5b
HB
778 int i, rc;
779
780 priv = NULL;
781 for (i = 0; i < hvc_iucv_devices; i++)
782 if (hvc_iucv_table[i] &&
783 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
784 priv = hvc_iucv_table[i];
785 break;
786 }
44a01d5b
HB
787 if (!priv)
788 return -ENODEV;
789
431429ff
HB
790 /* Enforce that ipvmid is allowed to connect to us */
791 read_lock(&hvc_iucv_filter_lock);
792 rc = hvc_iucv_filter_connreq(ipvmid);
793 read_unlock(&hvc_iucv_filter_lock);
794 if (rc) {
795 iucv_path_sever(path, ipuser);
796 iucv_path_free(path);
797 memcpy(vm_user_id, ipvmid, 8);
798 vm_user_id[8] = 0;
799 pr_info("A connection request from z/VM user ID %s "
800 "was refused\n", vm_user_id);
801 return 0;
802 }
803
44a01d5b
HB
804 spin_lock(&priv->lock);
805
806 /* If the terminal is already connected or being severed, then sever
807 * this path to enforce that there is only ONE established communication
808 * path per terminal. */
809 if (priv->iucv_state != IUCV_DISCONN) {
810 iucv_path_sever(path, ipuser);
811 iucv_path_free(path);
812 goto out_path_handled;
813 }
814
815 /* accept path */
816 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
817 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
818 path->msglim = 0xffff; /* IUCV MSGLIMIT */
819 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
820 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
821 if (rc) {
822 iucv_path_sever(path, ipuser);
823 iucv_path_free(path);
824 goto out_path_handled;
825 }
826 priv->path = path;
827 priv->iucv_state = IUCV_CONNECTED;
828
c45ce4b5
HB
829 /* flush buffered output data... */
830 schedule_delayed_work(&priv->sndbuf_work, 5);
831
44a01d5b
HB
832out_path_handled:
833 spin_unlock(&priv->lock);
834 return 0;
835}
836
837/**
838 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
839 * @path: Pending path (struct iucv_path)
840 * @ipuser: User specified data for this path
841 * (AF_IUCV: port/service name and originator port)
842 *
0259162e
HB
843 * This function calls the hvc_iucv_hangup() function for the
844 * respective IUCV HVC terminal.
44a01d5b
HB
845 *
846 * Locking: struct hvc_iucv_private->lock
847 */
848static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
849{
850 struct hvc_iucv_private *priv = path->private;
851
0259162e 852 hvc_iucv_hangup(priv);
44a01d5b
HB
853}
854
855/**
856 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
857 * @path: Pending path (struct iucv_path)
858 * @msg: Pointer to the IUCV message
859 *
17e19f04 860 * The function puts an incoming message on the input queue for later
44a01d5b 861 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
17e19f04 862 * If the tty has not yet been opened, the message is rejected.
44a01d5b
HB
863 *
864 * Locking: struct hvc_iucv_private->lock
865 */
866static void hvc_iucv_msg_pending(struct iucv_path *path,
867 struct iucv_message *msg)
868{
869 struct hvc_iucv_private *priv = path->private;
870 struct iucv_tty_buffer *rb;
871
c45ce4b5
HB
872 /* reject messages that exceed max size of iucv_tty_msg->datalen */
873 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
874 iucv_message_reject(path, msg);
875 return;
876 }
877
44a01d5b
HB
878 spin_lock(&priv->lock);
879
880 /* reject messages if tty has not yet been opened */
881 if (priv->tty_state == TTY_CLOSED) {
882 iucv_message_reject(path, msg);
883 goto unlock_return;
884 }
885
c45ce4b5 886 /* allocate tty buffer to save iucv msg only */
44a01d5b
HB
887 rb = alloc_tty_buffer(0, GFP_ATOMIC);
888 if (!rb) {
889 iucv_message_reject(path, msg);
890 goto unlock_return; /* -ENOMEM */
891 }
892 rb->msg = *msg;
893
894 list_add_tail(&rb->list, &priv->tty_inqueue);
895
17e19f04 896 hvc_kick(); /* wake up hvc thread */
44a01d5b
HB
897
898unlock_return:
899 spin_unlock(&priv->lock);
900}
901
902/**
903 * hvc_iucv_msg_complete() - IUCV handler to process message completion
904 * @path: Pending path (struct iucv_path)
905 * @msg: Pointer to the IUCV message
906 *
17e19f04
HB
907 * The function is called upon completion of message delivery to remove the
908 * message from the outqueue. Additional delivery information can be found
909 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
910 * purged messages (0x010000 (IPADPGNR)).
44a01d5b
HB
911 *
912 * Locking: struct hvc_iucv_private->lock
913 */
914static void hvc_iucv_msg_complete(struct iucv_path *path,
915 struct iucv_message *msg)
916{
917 struct hvc_iucv_private *priv = path->private;
918 struct iucv_tty_buffer *ent, *next;
919 LIST_HEAD(list_remove);
920
921 spin_lock(&priv->lock);
922 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
923 if (ent->msg.id == msg->id) {
924 list_move(&ent->list, &list_remove);
925 break;
926 }
c45ce4b5 927 wake_up(&priv->sndbuf_waitq);
44a01d5b
HB
928 spin_unlock(&priv->lock);
929 destroy_tty_buffer_list(&list_remove);
930}
931
0259162e
HB
932/**
933 * hvc_iucv_pm_freeze() - Freeze PM callback
934 * @dev: IUVC HVC terminal device
935 *
936 * Sever an established IUCV communication path and
937 * trigger a hang-up of the underlying HVC terminal.
938 */
939static int hvc_iucv_pm_freeze(struct device *dev)
940{
941 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
942
943 local_bh_disable();
944 hvc_iucv_hangup(priv);
945 local_bh_enable();
946
947 return 0;
948}
949
950/**
951 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
952 * @dev: IUVC HVC terminal device
953 *
954 * Wake up the HVC thread to trigger hang-up and respective
955 * HVC back-end notifier invocations.
956 */
957static int hvc_iucv_pm_restore_thaw(struct device *dev)
958{
959 hvc_kick();
960 return 0;
961}
962
44a01d5b
HB
963
964/* HVC operations */
1dff3996 965static const struct hv_ops hvc_iucv_ops = {
44a01d5b
HB
966 .get_chars = hvc_iucv_get_chars,
967 .put_chars = hvc_iucv_put_chars,
968 .notifier_add = hvc_iucv_notifier_add,
969 .notifier_del = hvc_iucv_notifier_del,
970 .notifier_hangup = hvc_iucv_notifier_hangup,
74b3b4cd 971 .dtr_rts = hvc_iucv_dtr_rts,
44a01d5b
HB
972};
973
0259162e 974/* Suspend / resume device operations */
47145210 975static const struct dev_pm_ops hvc_iucv_pm_ops = {
0259162e
HB
976 .freeze = hvc_iucv_pm_freeze,
977 .thaw = hvc_iucv_pm_restore_thaw,
978 .restore = hvc_iucv_pm_restore_thaw,
979};
980
981/* IUCV HVC device driver */
982static struct device_driver hvc_iucv_driver = {
983 .name = KMSG_COMPONENT,
984 .bus = &iucv_bus,
985 .pm = &hvc_iucv_pm_ops,
986};
987
44a01d5b
HB
988/**
989 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
6c089fd3
HB
990 * @id: hvc_iucv_table index
991 * @is_console: Flag if the instance is used as Linux console
44a01d5b 992 *
17e19f04
HB
993 * This function allocates a new hvc_iucv_private structure and stores
994 * the instance in hvc_iucv_table at index @id.
44a01d5b
HB
995 * Returns 0 on success; otherwise non-zero.
996 */
6c089fd3 997static int __init hvc_iucv_alloc(int id, unsigned int is_console)
44a01d5b
HB
998{
999 struct hvc_iucv_private *priv;
1000 char name[9];
1001 int rc;
1002
1003 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1004 if (!priv)
1005 return -ENOMEM;
1006
1007 spin_lock_init(&priv->lock);
1008 INIT_LIST_HEAD(&priv->tty_outqueue);
1009 INIT_LIST_HEAD(&priv->tty_inqueue);
c45ce4b5
HB
1010 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1011 init_waitqueue_head(&priv->sndbuf_waitq);
1012
1013 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1014 if (!priv->sndbuf) {
1015 kfree(priv);
1016 return -ENOMEM;
1017 }
44a01d5b 1018
6c089fd3
HB
1019 /* set console flag */
1020 priv->is_console = is_console;
1021
0259162e 1022 /* allocate hvc device */
c45ce4b5
HB
1023 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
1024 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
44a01d5b
HB
1025 if (IS_ERR(priv->hvc)) {
1026 rc = PTR_ERR(priv->hvc);
0259162e 1027 goto out_error_hvc;
44a01d5b
HB
1028 }
1029
17e19f04 1030 /* notify HVC thread instead of using polling */
c45ce4b5
HB
1031 priv->hvc->irq_requested = 1;
1032
44a01d5b 1033 /* setup iucv related information */
2dc184c0 1034 snprintf(name, 9, "lnxhvc%-2d", id);
44a01d5b
HB
1035 memcpy(priv->srv_name, name, 8);
1036 ASCEBC(priv->srv_name, 8);
1037
0259162e
HB
1038 /* create and setup device */
1039 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1040 if (!priv->dev) {
1041 rc = -ENOMEM;
1042 goto out_error_dev;
1043 }
1044 dev_set_name(priv->dev, "hvc_iucv%d", id);
1045 dev_set_drvdata(priv->dev, priv);
1046 priv->dev->bus = &iucv_bus;
1047 priv->dev->parent = iucv_root;
1048 priv->dev->driver = &hvc_iucv_driver;
1049 priv->dev->release = (void (*)(struct device *)) kfree;
1050 rc = device_register(priv->dev);
1051 if (rc) {
c6304933 1052 put_device(priv->dev);
0259162e
HB
1053 goto out_error_dev;
1054 }
1055
44a01d5b
HB
1056 hvc_iucv_table[id] = priv;
1057 return 0;
0259162e
HB
1058
1059out_error_dev:
1060 hvc_remove(priv->hvc);
1061out_error_hvc:
1062 free_page((unsigned long) priv->sndbuf);
1063 kfree(priv);
1064
1065 return rc;
1066}
1067
1068/**
1069 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1070 */
1071static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1072{
1073 hvc_remove(priv->hvc);
1074 device_unregister(priv->dev);
1075 free_page((unsigned long) priv->sndbuf);
1076 kfree(priv);
44a01d5b
HB
1077}
1078
431429ff
HB
1079/**
1080 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1081 * @filter: String containing a comma-separated list of z/VM user IDs
1082 */
1083static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1084{
1085 const char *nextdelim, *residual;
1086 size_t len;
1087
1088 nextdelim = strchr(filter, ',');
1089 if (nextdelim) {
1090 len = nextdelim - filter;
1091 residual = nextdelim + 1;
1092 } else {
1093 len = strlen(filter);
1094 residual = filter + len;
1095 }
1096
1097 if (len == 0)
1098 return ERR_PTR(-EINVAL);
1099
1100 /* check for '\n' (if called from sysfs) */
1101 if (filter[len - 1] == '\n')
1102 len--;
1103
1104 if (len > 8)
1105 return ERR_PTR(-EINVAL);
1106
1107 /* pad with blanks and save upper case version of user ID */
1108 memset(dest, ' ', 8);
1109 while (len--)
1110 dest[len] = toupper(filter[len]);
1111 return residual;
1112}
1113
1114/**
1115 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1116 * @filter: String consisting of a comma-separated list of z/VM user IDs
1117 *
1118 * The function parses the @filter string and creates an array containing
1119 * the list of z/VM user ID filter entries.
1120 * Return code 0 means success, -EINVAL if the filter is syntactically
1121 * incorrect, -ENOMEM if there was not enough memory to allocate the
1122 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1123 */
1124static int hvc_iucv_setup_filter(const char *val)
1125{
1126 const char *residual;
1127 int err;
1128 size_t size, count;
1129 void *array, *old_filter;
1130
1131 count = strlen(val);
1132 if (count == 0 || (count == 1 && val[0] == '\n')) {
1133 size = 0;
1134 array = NULL;
1135 goto out_replace_filter; /* clear filter */
1136 }
1137
1138 /* count user IDs in order to allocate sufficient memory */
1139 size = 1;
1140 residual = val;
1141 while ((residual = strchr(residual, ',')) != NULL) {
1142 residual++;
1143 size++;
1144 }
1145
1146 /* check if the specified list exceeds the filter limit */
1147 if (size > MAX_VMID_FILTER)
1148 return -ENOSPC;
1149
1150 array = kzalloc(size * 8, GFP_KERNEL);
1151 if (!array)
1152 return -ENOMEM;
1153
1154 count = size;
1155 residual = val;
1156 while (*residual && count) {
1157 residual = hvc_iucv_parse_filter(residual,
1158 array + ((size - count) * 8));
1159 if (IS_ERR(residual)) {
1160 err = PTR_ERR(residual);
1161 kfree(array);
1162 goto out_err;
1163 }
1164 count--;
1165 }
1166
1167out_replace_filter:
1168 write_lock_bh(&hvc_iucv_filter_lock);
1169 old_filter = hvc_iucv_filter;
1170 hvc_iucv_filter_size = size;
1171 hvc_iucv_filter = array;
1172 write_unlock_bh(&hvc_iucv_filter_lock);
1173 kfree(old_filter);
1174
1175 err = 0;
1176out_err:
1177 return err;
1178}
1179
1180/**
1181 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1182 * @val: String consisting of a comma-separated list of z/VM user IDs
1183 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1184 *
1185 * The function sets up the z/VM user ID filter specified as comma-separated
1186 * list of user IDs in @val.
1187 * Note: If it is called early in the boot process, @val is stored and
1188 * parsed later in hvc_iucv_init().
1189 */
549a8a03 1190static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
431429ff
HB
1191{
1192 int rc;
1193
1194 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1195 return -ENODEV;
1196
1197 if (!val)
1198 return -EINVAL;
1199
1200 rc = 0;
1201 if (slab_is_available())
1202 rc = hvc_iucv_setup_filter(val);
1203 else
1204 hvc_iucv_filter_string = val; /* defer... */
1205 return rc;
1206}
1207
1208/**
1209 * param_get_vmidfilter() - Get z/VM user ID filter
1210 * @buffer: Buffer to store z/VM user ID filter,
1211 * (buffer size assumption PAGE_SIZE)
1212 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1213 *
1214 * The function stores the filter as a comma-separated list of z/VM user IDs
1215 * in @buffer. Typically, sysfs routines call this function for attr show.
1216 */
549a8a03 1217static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
431429ff
HB
1218{
1219 int rc;
1220 size_t index, len;
1221 void *start, *end;
1222
1223 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1224 return -ENODEV;
1225
1226 rc = 0;
1227 read_lock_bh(&hvc_iucv_filter_lock);
1228 for (index = 0; index < hvc_iucv_filter_size; index++) {
1229 start = hvc_iucv_filter + (8 * index);
1230 end = memchr(start, ' ', 8);
1231 len = (end) ? end - start : 8;
1232 memcpy(buffer + rc, start, len);
1233 rc += len;
1234 buffer[rc++] = ',';
1235 }
1236 read_unlock_bh(&hvc_iucv_filter_lock);
1237 if (rc)
1238 buffer[--rc] = '\0'; /* replace last comma and update rc */
1239 return rc;
1240}
1241
1242#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1243
549a8a03
SS
1244static struct kernel_param_ops param_ops_vmidfilter = {
1245 .set = param_set_vmidfilter,
1246 .get = param_get_vmidfilter,
1247};
1248
44a01d5b 1249/**
17e19f04 1250 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
44a01d5b
HB
1251 */
1252static int __init hvc_iucv_init(void)
1253{
6c089fd3
HB
1254 int rc;
1255 unsigned int i;
44a01d5b 1256
431429ff
HB
1257 if (!hvc_iucv_devices)
1258 return -ENODEV;
1259
44a01d5b 1260 if (!MACHINE_IS_VM) {
82f3a79b 1261 pr_notice("The z/VM IUCV HVC device driver cannot "
c45ce4b5 1262 "be used without z/VM\n");
431429ff
HB
1263 rc = -ENODEV;
1264 goto out_error;
44a01d5b
HB
1265 }
1266
82f3a79b
HB
1267 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1268 pr_err("%lu is not a valid value for the hvc_iucv= "
1269 "kernel parameter\n", hvc_iucv_devices);
431429ff
HB
1270 rc = -EINVAL;
1271 goto out_error;
1272 }
1273
0259162e
HB
1274 /* register IUCV HVC device driver */
1275 rc = driver_register(&hvc_iucv_driver);
1276 if (rc)
1277 goto out_error;
1278
431429ff
HB
1279 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1280 if (hvc_iucv_filter_string) {
1281 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1282 switch (rc) {
1283 case 0:
1284 break;
1285 case -ENOMEM:
1286 pr_err("Allocating memory failed with "
1287 "reason code=%d\n", 3);
1288 goto out_error;
1289 case -EINVAL:
1290 pr_err("hvc_iucv_allow= does not specify a valid "
1291 "z/VM user ID list\n");
1292 goto out_error;
1293 case -ENOSPC:
1294 pr_err("hvc_iucv_allow= specifies too many "
1295 "z/VM user IDs\n");
1296 goto out_error;
1297 default:
1298 goto out_error;
1299 }
82f3a79b 1300 }
44a01d5b
HB
1301
1302 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1303 sizeof(struct iucv_tty_buffer),
1304 0, 0, NULL);
1305 if (!hvc_iucv_buffer_cache) {
c45ce4b5 1306 pr_err("Allocating memory failed with reason code=%d\n", 1);
431429ff
HB
1307 rc = -ENOMEM;
1308 goto out_error;
44a01d5b
HB
1309 }
1310
1311 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1312 hvc_iucv_buffer_cache);
1313 if (!hvc_iucv_mempool) {
c45ce4b5 1314 pr_err("Allocating memory failed with reason code=%d\n", 2);
44a01d5b 1315 kmem_cache_destroy(hvc_iucv_buffer_cache);
431429ff
HB
1316 rc = -ENOMEM;
1317 goto out_error;
44a01d5b
HB
1318 }
1319
68c6b3d2
HB
1320 /* register the first terminal device as console
1321 * (must be done before allocating hvc terminal devices) */
6c089fd3
HB
1322 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1323 if (rc) {
1324 pr_err("Registering HVC terminal device as "
1325 "Linux console failed\n");
1326 goto out_error_memory;
1327 }
68c6b3d2 1328
44a01d5b
HB
1329 /* allocate hvc_iucv_private structs */
1330 for (i = 0; i < hvc_iucv_devices; i++) {
6c089fd3 1331 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
44a01d5b 1332 if (rc) {
c45ce4b5 1333 pr_err("Creating a new HVC terminal device "
17e19f04 1334 "failed with error code=%d\n", rc);
44a01d5b
HB
1335 goto out_error_hvc;
1336 }
1337 }
1338
1339 /* register IUCV callback handler */
1340 rc = iucv_register(&hvc_iucv_handler, 0);
1341 if (rc) {
c45ce4b5
HB
1342 pr_err("Registering IUCV handlers failed with error code=%d\n",
1343 rc);
c77f7cf7 1344 goto out_error_hvc;
44a01d5b
HB
1345 }
1346
1347 return 0;
1348
44a01d5b
HB
1349out_error_hvc:
1350 for (i = 0; i < hvc_iucv_devices; i++)
0259162e
HB
1351 if (hvc_iucv_table[i])
1352 hvc_iucv_destroy(hvc_iucv_table[i]);
6c089fd3 1353out_error_memory:
44a01d5b
HB
1354 mempool_destroy(hvc_iucv_mempool);
1355 kmem_cache_destroy(hvc_iucv_buffer_cache);
431429ff 1356out_error:
0259162e
HB
1357 if (hvc_iucv_filter)
1358 kfree(hvc_iucv_filter);
431429ff 1359 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
44a01d5b
HB
1360 return rc;
1361}
1362
44a01d5b
HB
1363/**
1364 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1365 * @val: Parameter value (numeric)
1366 */
1367static int __init hvc_iucv_config(char *val)
1368{
86b40567 1369 return kstrtoul(val, 10, &hvc_iucv_devices);
44a01d5b
HB
1370}
1371
1372
68c6b3d2 1373device_initcall(hvc_iucv_init);
44a01d5b 1374__setup("hvc_iucv=", hvc_iucv_config);
431429ff 1375core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
This page took 0.636664 seconds and 5 git commands to generate.