Merge tag 'master-2014-11-25' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[deliverable/linux.git] / drivers / s390 / kvm / virtio_ccw.c
1 /*
2 * ccw based virtio transport
3 *
4 * Copyright IBM Corp. 2012, 2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12
13 #include <linux/kernel_stat.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 #include <linux/err.h>
17 #include <linux/virtio.h>
18 #include <linux/virtio_config.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/virtio_ring.h>
22 #include <linux/pfn.h>
23 #include <linux/async.h>
24 #include <linux/wait.h>
25 #include <linux/list.h>
26 #include <linux/bitops.h>
27 #include <linux/module.h>
28 #include <linux/io.h>
29 #include <linux/kvm_para.h>
30 #include <linux/notifier.h>
31 #include <asm/setup.h>
32 #include <asm/irq.h>
33 #include <asm/cio.h>
34 #include <asm/ccwdev.h>
35 #include <asm/virtio-ccw.h>
36 #include <asm/isc.h>
37 #include <asm/airq.h>
38
39 /*
40 * virtio related functions
41 */
42
43 struct vq_config_block {
44 __u16 index;
45 __u16 num;
46 } __packed;
47
48 #define VIRTIO_CCW_CONFIG_SIZE 0x100
49 /* same as PCI config space size, should be enough for all drivers */
50
51 struct virtio_ccw_device {
52 struct virtio_device vdev;
53 __u8 *status;
54 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
55 struct ccw_device *cdev;
56 __u32 curr_io;
57 int err;
58 wait_queue_head_t wait_q;
59 spinlock_t lock;
60 struct list_head virtqueues;
61 unsigned long indicators;
62 unsigned long indicators2;
63 struct vq_config_block *config_block;
64 bool is_thinint;
65 bool going_away;
66 bool device_lost;
67 void *airq_info;
68 };
69
70 struct vq_info_block {
71 __u64 queue;
72 __u32 align;
73 __u16 index;
74 __u16 num;
75 } __packed;
76
77 struct virtio_feature_desc {
78 __u32 features;
79 __u8 index;
80 } __packed;
81
82 struct virtio_thinint_area {
83 unsigned long summary_indicator;
84 unsigned long indicator;
85 u64 bit_nr;
86 u8 isc;
87 } __packed;
88
89 struct virtio_ccw_vq_info {
90 struct virtqueue *vq;
91 int num;
92 void *queue;
93 struct vq_info_block *info_block;
94 int bit_nr;
95 struct list_head node;
96 long cookie;
97 };
98
99 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
100
101 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
102 #define MAX_AIRQ_AREAS 20
103
104 static int virtio_ccw_use_airq = 1;
105
106 struct airq_info {
107 rwlock_t lock;
108 u8 summary_indicator;
109 struct airq_struct airq;
110 struct airq_iv *aiv;
111 };
112 static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
113
114 #define CCW_CMD_SET_VQ 0x13
115 #define CCW_CMD_VDEV_RESET 0x33
116 #define CCW_CMD_SET_IND 0x43
117 #define CCW_CMD_SET_CONF_IND 0x53
118 #define CCW_CMD_READ_FEAT 0x12
119 #define CCW_CMD_WRITE_FEAT 0x11
120 #define CCW_CMD_READ_CONF 0x22
121 #define CCW_CMD_WRITE_CONF 0x21
122 #define CCW_CMD_WRITE_STATUS 0x31
123 #define CCW_CMD_READ_VQ_CONF 0x32
124 #define CCW_CMD_SET_IND_ADAPTER 0x73
125
126 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
127 #define VIRTIO_CCW_DOING_RESET 0x00040000
128 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
129 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
130 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
131 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
132 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
133 #define VIRTIO_CCW_DOING_SET_IND 0x01000000
134 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
135 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
136 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
137 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
138
139 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
140 {
141 return container_of(vdev, struct virtio_ccw_device, vdev);
142 }
143
144 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
145 {
146 unsigned long i, flags;
147
148 write_lock_irqsave(&info->lock, flags);
149 for (i = 0; i < airq_iv_end(info->aiv); i++) {
150 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
151 airq_iv_free_bit(info->aiv, i);
152 airq_iv_set_ptr(info->aiv, i, 0);
153 break;
154 }
155 }
156 write_unlock_irqrestore(&info->lock, flags);
157 }
158
159 static void virtio_airq_handler(struct airq_struct *airq)
160 {
161 struct airq_info *info = container_of(airq, struct airq_info, airq);
162 unsigned long ai;
163
164 inc_irq_stat(IRQIO_VAI);
165 read_lock(&info->lock);
166 /* Walk through indicators field, summary indicator active. */
167 for (ai = 0;;) {
168 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
169 if (ai == -1UL)
170 break;
171 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
172 }
173 info->summary_indicator = 0;
174 smp_wmb();
175 /* Walk through indicators field, summary indicator not active. */
176 for (ai = 0;;) {
177 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
178 if (ai == -1UL)
179 break;
180 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
181 }
182 read_unlock(&info->lock);
183 }
184
185 static struct airq_info *new_airq_info(void)
186 {
187 struct airq_info *info;
188 int rc;
189
190 info = kzalloc(sizeof(*info), GFP_KERNEL);
191 if (!info)
192 return NULL;
193 rwlock_init(&info->lock);
194 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
195 if (!info->aiv) {
196 kfree(info);
197 return NULL;
198 }
199 info->airq.handler = virtio_airq_handler;
200 info->airq.lsi_ptr = &info->summary_indicator;
201 info->airq.lsi_mask = 0xff;
202 info->airq.isc = VIRTIO_AIRQ_ISC;
203 rc = register_adapter_interrupt(&info->airq);
204 if (rc) {
205 airq_iv_release(info->aiv);
206 kfree(info);
207 return NULL;
208 }
209 return info;
210 }
211
212 static void destroy_airq_info(struct airq_info *info)
213 {
214 if (!info)
215 return;
216
217 unregister_adapter_interrupt(&info->airq);
218 airq_iv_release(info->aiv);
219 kfree(info);
220 }
221
222 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
223 u64 *first, void **airq_info)
224 {
225 int i, j;
226 struct airq_info *info;
227 unsigned long indicator_addr = 0;
228 unsigned long bit, flags;
229
230 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
231 if (!airq_areas[i])
232 airq_areas[i] = new_airq_info();
233 info = airq_areas[i];
234 if (!info)
235 return 0;
236 write_lock_irqsave(&info->lock, flags);
237 bit = airq_iv_alloc(info->aiv, nvqs);
238 if (bit == -1UL) {
239 /* Not enough vacancies. */
240 write_unlock_irqrestore(&info->lock, flags);
241 continue;
242 }
243 *first = bit;
244 *airq_info = info;
245 indicator_addr = (unsigned long)info->aiv->vector;
246 for (j = 0; j < nvqs; j++) {
247 airq_iv_set_ptr(info->aiv, bit + j,
248 (unsigned long)vqs[j]);
249 }
250 write_unlock_irqrestore(&info->lock, flags);
251 }
252 return indicator_addr;
253 }
254
255 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
256 {
257 struct virtio_ccw_vq_info *info;
258
259 list_for_each_entry(info, &vcdev->virtqueues, node)
260 drop_airq_indicator(info->vq, vcdev->airq_info);
261 }
262
263 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
264 {
265 unsigned long flags;
266 __u32 ret;
267
268 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
269 if (vcdev->err)
270 ret = 0;
271 else
272 ret = vcdev->curr_io & flag;
273 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
274 return ret;
275 }
276
277 static int ccw_io_helper(struct virtio_ccw_device *vcdev,
278 struct ccw1 *ccw, __u32 intparm)
279 {
280 int ret;
281 unsigned long flags;
282 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
283
284 do {
285 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
286 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
287 if (!ret) {
288 if (!vcdev->curr_io)
289 vcdev->err = 0;
290 vcdev->curr_io |= flag;
291 }
292 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
293 cpu_relax();
294 } while (ret == -EBUSY);
295 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
296 return ret ? ret : vcdev->err;
297 }
298
299 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
300 struct ccw1 *ccw)
301 {
302 int ret;
303 unsigned long *indicatorp = NULL;
304 struct virtio_thinint_area *thinint_area = NULL;
305 struct airq_info *airq_info = vcdev->airq_info;
306
307 if (vcdev->is_thinint) {
308 thinint_area = kzalloc(sizeof(*thinint_area),
309 GFP_DMA | GFP_KERNEL);
310 if (!thinint_area)
311 return;
312 thinint_area->summary_indicator =
313 (unsigned long) &airq_info->summary_indicator;
314 thinint_area->isc = VIRTIO_AIRQ_ISC;
315 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
316 ccw->count = sizeof(*thinint_area);
317 ccw->cda = (__u32)(unsigned long) thinint_area;
318 } else {
319 indicatorp = kmalloc(sizeof(&vcdev->indicators),
320 GFP_DMA | GFP_KERNEL);
321 if (!indicatorp)
322 return;
323 *indicatorp = 0;
324 ccw->cmd_code = CCW_CMD_SET_IND;
325 ccw->count = sizeof(vcdev->indicators);
326 ccw->cda = (__u32)(unsigned long) indicatorp;
327 }
328 /* Deregister indicators from host. */
329 vcdev->indicators = 0;
330 ccw->flags = 0;
331 ret = ccw_io_helper(vcdev, ccw,
332 vcdev->is_thinint ?
333 VIRTIO_CCW_DOING_SET_IND_ADAPTER :
334 VIRTIO_CCW_DOING_SET_IND);
335 if (ret && (ret != -ENODEV))
336 dev_info(&vcdev->cdev->dev,
337 "Failed to deregister indicators (%d)\n", ret);
338 else if (vcdev->is_thinint)
339 virtio_ccw_drop_indicators(vcdev);
340 kfree(indicatorp);
341 kfree(thinint_area);
342 }
343
344 static inline long do_kvm_notify(struct subchannel_id schid,
345 unsigned long queue_index,
346 long cookie)
347 {
348 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
349 register struct subchannel_id __schid asm("2") = schid;
350 register unsigned long __index asm("3") = queue_index;
351 register long __rc asm("2");
352 register long __cookie asm("4") = cookie;
353
354 asm volatile ("diag 2,4,0x500\n"
355 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
356 "d"(__cookie)
357 : "memory", "cc");
358 return __rc;
359 }
360
361 static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
362 {
363 struct virtio_ccw_vq_info *info = vq->priv;
364 struct virtio_ccw_device *vcdev;
365 struct subchannel_id schid;
366
367 vcdev = to_vc_device(info->vq->vdev);
368 ccw_device_get_schid(vcdev->cdev, &schid);
369 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
370 if (info->cookie < 0)
371 return false;
372 return true;
373 }
374
375 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
376 struct ccw1 *ccw, int index)
377 {
378 vcdev->config_block->index = index;
379 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
380 ccw->flags = 0;
381 ccw->count = sizeof(struct vq_config_block);
382 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
383 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
384 return vcdev->config_block->num;
385 }
386
387 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
388 {
389 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
390 struct virtio_ccw_vq_info *info = vq->priv;
391 unsigned long flags;
392 unsigned long size;
393 int ret;
394 unsigned int index = vq->index;
395
396 /* Remove from our list. */
397 spin_lock_irqsave(&vcdev->lock, flags);
398 list_del(&info->node);
399 spin_unlock_irqrestore(&vcdev->lock, flags);
400
401 /* Release from host. */
402 info->info_block->queue = 0;
403 info->info_block->align = 0;
404 info->info_block->index = index;
405 info->info_block->num = 0;
406 ccw->cmd_code = CCW_CMD_SET_VQ;
407 ccw->flags = 0;
408 ccw->count = sizeof(*info->info_block);
409 ccw->cda = (__u32)(unsigned long)(info->info_block);
410 ret = ccw_io_helper(vcdev, ccw,
411 VIRTIO_CCW_DOING_SET_VQ | index);
412 /*
413 * -ENODEV isn't considered an error: The device is gone anyway.
414 * This may happen on device detach.
415 */
416 if (ret && (ret != -ENODEV))
417 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
418 ret, index);
419
420 vring_del_virtqueue(vq);
421 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
422 free_pages_exact(info->queue, size);
423 kfree(info->info_block);
424 kfree(info);
425 }
426
427 static void virtio_ccw_del_vqs(struct virtio_device *vdev)
428 {
429 struct virtqueue *vq, *n;
430 struct ccw1 *ccw;
431 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
432
433 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
434 if (!ccw)
435 return;
436
437 virtio_ccw_drop_indicator(vcdev, ccw);
438
439 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
440 virtio_ccw_del_vq(vq, ccw);
441
442 kfree(ccw);
443 }
444
445 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
446 int i, vq_callback_t *callback,
447 const char *name,
448 struct ccw1 *ccw)
449 {
450 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
451 int err;
452 struct virtqueue *vq = NULL;
453 struct virtio_ccw_vq_info *info;
454 unsigned long size = 0; /* silence the compiler */
455 unsigned long flags;
456
457 /* Allocate queue. */
458 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
459 if (!info) {
460 dev_warn(&vcdev->cdev->dev, "no info\n");
461 err = -ENOMEM;
462 goto out_err;
463 }
464 info->info_block = kzalloc(sizeof(*info->info_block),
465 GFP_DMA | GFP_KERNEL);
466 if (!info->info_block) {
467 dev_warn(&vcdev->cdev->dev, "no info block\n");
468 err = -ENOMEM;
469 goto out_err;
470 }
471 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
472 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
473 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
474 if (info->queue == NULL) {
475 dev_warn(&vcdev->cdev->dev, "no queue\n");
476 err = -ENOMEM;
477 goto out_err;
478 }
479
480 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
481 true, info->queue, virtio_ccw_kvm_notify,
482 callback, name);
483 if (!vq) {
484 /* For now, we fail if we can't get the requested size. */
485 dev_warn(&vcdev->cdev->dev, "no vq\n");
486 err = -ENOMEM;
487 goto out_err;
488 }
489
490 /* Register it with the host. */
491 info->info_block->queue = (__u64)info->queue;
492 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
493 info->info_block->index = i;
494 info->info_block->num = info->num;
495 ccw->cmd_code = CCW_CMD_SET_VQ;
496 ccw->flags = 0;
497 ccw->count = sizeof(*info->info_block);
498 ccw->cda = (__u32)(unsigned long)(info->info_block);
499 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
500 if (err) {
501 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
502 goto out_err;
503 }
504
505 info->vq = vq;
506 vq->priv = info;
507
508 /* Save it to our list. */
509 spin_lock_irqsave(&vcdev->lock, flags);
510 list_add(&info->node, &vcdev->virtqueues);
511 spin_unlock_irqrestore(&vcdev->lock, flags);
512
513 return vq;
514
515 out_err:
516 if (vq)
517 vring_del_virtqueue(vq);
518 if (info) {
519 if (info->queue)
520 free_pages_exact(info->queue, size);
521 kfree(info->info_block);
522 }
523 kfree(info);
524 return ERR_PTR(err);
525 }
526
527 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
528 struct virtqueue *vqs[], int nvqs,
529 struct ccw1 *ccw)
530 {
531 int ret;
532 struct virtio_thinint_area *thinint_area = NULL;
533 struct airq_info *info;
534
535 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
536 if (!thinint_area) {
537 ret = -ENOMEM;
538 goto out;
539 }
540 /* Try to get an indicator. */
541 thinint_area->indicator = get_airq_indicator(vqs, nvqs,
542 &thinint_area->bit_nr,
543 &vcdev->airq_info);
544 if (!thinint_area->indicator) {
545 ret = -ENOSPC;
546 goto out;
547 }
548 info = vcdev->airq_info;
549 thinint_area->summary_indicator =
550 (unsigned long) &info->summary_indicator;
551 thinint_area->isc = VIRTIO_AIRQ_ISC;
552 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
553 ccw->flags = CCW_FLAG_SLI;
554 ccw->count = sizeof(*thinint_area);
555 ccw->cda = (__u32)(unsigned long)thinint_area;
556 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
557 if (ret) {
558 if (ret == -EOPNOTSUPP) {
559 /*
560 * The host does not support adapter interrupts
561 * for virtio-ccw, stop trying.
562 */
563 virtio_ccw_use_airq = 0;
564 pr_info("Adapter interrupts unsupported on host\n");
565 } else
566 dev_warn(&vcdev->cdev->dev,
567 "enabling adapter interrupts = %d\n", ret);
568 virtio_ccw_drop_indicators(vcdev);
569 }
570 out:
571 kfree(thinint_area);
572 return ret;
573 }
574
575 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
576 struct virtqueue *vqs[],
577 vq_callback_t *callbacks[],
578 const char *names[])
579 {
580 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
581 unsigned long *indicatorp = NULL;
582 int ret, i;
583 struct ccw1 *ccw;
584
585 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
586 if (!ccw)
587 return -ENOMEM;
588
589 for (i = 0; i < nvqs; ++i) {
590 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
591 ccw);
592 if (IS_ERR(vqs[i])) {
593 ret = PTR_ERR(vqs[i]);
594 vqs[i] = NULL;
595 goto out;
596 }
597 }
598 ret = -ENOMEM;
599 /* We need a data area under 2G to communicate. */
600 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
601 if (!indicatorp)
602 goto out;
603 *indicatorp = (unsigned long) &vcdev->indicators;
604 if (vcdev->is_thinint) {
605 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
606 if (ret)
607 /* no error, just fall back to legacy interrupts */
608 vcdev->is_thinint = 0;
609 }
610 if (!vcdev->is_thinint) {
611 /* Register queue indicators with host. */
612 vcdev->indicators = 0;
613 ccw->cmd_code = CCW_CMD_SET_IND;
614 ccw->flags = 0;
615 ccw->count = sizeof(vcdev->indicators);
616 ccw->cda = (__u32)(unsigned long) indicatorp;
617 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
618 if (ret)
619 goto out;
620 }
621 /* Register indicators2 with host for config changes */
622 *indicatorp = (unsigned long) &vcdev->indicators2;
623 vcdev->indicators2 = 0;
624 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
625 ccw->flags = 0;
626 ccw->count = sizeof(vcdev->indicators2);
627 ccw->cda = (__u32)(unsigned long) indicatorp;
628 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
629 if (ret)
630 goto out;
631
632 kfree(indicatorp);
633 kfree(ccw);
634 return 0;
635 out:
636 kfree(indicatorp);
637 kfree(ccw);
638 virtio_ccw_del_vqs(vdev);
639 return ret;
640 }
641
642 static void virtio_ccw_reset(struct virtio_device *vdev)
643 {
644 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
645 struct ccw1 *ccw;
646
647 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
648 if (!ccw)
649 return;
650
651 /* Zero status bits. */
652 *vcdev->status = 0;
653
654 /* Send a reset ccw on device. */
655 ccw->cmd_code = CCW_CMD_VDEV_RESET;
656 ccw->flags = 0;
657 ccw->count = 0;
658 ccw->cda = 0;
659 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
660 kfree(ccw);
661 }
662
663 static u32 virtio_ccw_get_features(struct virtio_device *vdev)
664 {
665 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
666 struct virtio_feature_desc *features;
667 int ret, rc;
668 struct ccw1 *ccw;
669
670 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
671 if (!ccw)
672 return 0;
673
674 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
675 if (!features) {
676 rc = 0;
677 goto out_free;
678 }
679 /* Read the feature bits from the host. */
680 /* TODO: Features > 32 bits */
681 features->index = 0;
682 ccw->cmd_code = CCW_CMD_READ_FEAT;
683 ccw->flags = 0;
684 ccw->count = sizeof(*features);
685 ccw->cda = (__u32)(unsigned long)features;
686 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
687 if (ret) {
688 rc = 0;
689 goto out_free;
690 }
691
692 rc = le32_to_cpu(features->features);
693
694 out_free:
695 kfree(features);
696 kfree(ccw);
697 return rc;
698 }
699
700 static void virtio_ccw_finalize_features(struct virtio_device *vdev)
701 {
702 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
703 struct virtio_feature_desc *features;
704 int i;
705 struct ccw1 *ccw;
706
707 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
708 if (!ccw)
709 return;
710
711 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
712 if (!features)
713 goto out_free;
714
715 /* Give virtio_ring a chance to accept features. */
716 vring_transport_features(vdev);
717
718 for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
719 i++) {
720 int highbits = i % 2 ? 32 : 0;
721 features->index = i;
722 features->features = cpu_to_le32(vdev->features[i / 2]
723 >> highbits);
724 /* Write the feature bits to the host. */
725 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
726 ccw->flags = 0;
727 ccw->count = sizeof(*features);
728 ccw->cda = (__u32)(unsigned long)features;
729 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
730 }
731 out_free:
732 kfree(features);
733 kfree(ccw);
734 }
735
736 static void virtio_ccw_get_config(struct virtio_device *vdev,
737 unsigned int offset, void *buf, unsigned len)
738 {
739 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
740 int ret;
741 struct ccw1 *ccw;
742 void *config_area;
743
744 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
745 if (!ccw)
746 return;
747
748 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
749 if (!config_area)
750 goto out_free;
751
752 /* Read the config area from the host. */
753 ccw->cmd_code = CCW_CMD_READ_CONF;
754 ccw->flags = 0;
755 ccw->count = offset + len;
756 ccw->cda = (__u32)(unsigned long)config_area;
757 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
758 if (ret)
759 goto out_free;
760
761 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
762 memcpy(buf, &vcdev->config[offset], len);
763
764 out_free:
765 kfree(config_area);
766 kfree(ccw);
767 }
768
769 static void virtio_ccw_set_config(struct virtio_device *vdev,
770 unsigned int offset, const void *buf,
771 unsigned len)
772 {
773 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
774 struct ccw1 *ccw;
775 void *config_area;
776
777 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
778 if (!ccw)
779 return;
780
781 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
782 if (!config_area)
783 goto out_free;
784
785 memcpy(&vcdev->config[offset], buf, len);
786 /* Write the config area to the host. */
787 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
788 ccw->cmd_code = CCW_CMD_WRITE_CONF;
789 ccw->flags = 0;
790 ccw->count = offset + len;
791 ccw->cda = (__u32)(unsigned long)config_area;
792 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
793
794 out_free:
795 kfree(config_area);
796 kfree(ccw);
797 }
798
799 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
800 {
801 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
802
803 return *vcdev->status;
804 }
805
806 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
807 {
808 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
809 struct ccw1 *ccw;
810
811 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
812 if (!ccw)
813 return;
814
815 /* Write the status to the host. */
816 *vcdev->status = status;
817 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
818 ccw->flags = 0;
819 ccw->count = sizeof(status);
820 ccw->cda = (__u32)(unsigned long)vcdev->status;
821 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
822 kfree(ccw);
823 }
824
825 static struct virtio_config_ops virtio_ccw_config_ops = {
826 .get_features = virtio_ccw_get_features,
827 .finalize_features = virtio_ccw_finalize_features,
828 .get = virtio_ccw_get_config,
829 .set = virtio_ccw_set_config,
830 .get_status = virtio_ccw_get_status,
831 .set_status = virtio_ccw_set_status,
832 .reset = virtio_ccw_reset,
833 .find_vqs = virtio_ccw_find_vqs,
834 .del_vqs = virtio_ccw_del_vqs,
835 };
836
837
838 /*
839 * ccw bus driver related functions
840 */
841
842 static void virtio_ccw_release_dev(struct device *_d)
843 {
844 struct virtio_device *dev = container_of(_d, struct virtio_device,
845 dev);
846 struct virtio_ccw_device *vcdev = to_vc_device(dev);
847
848 kfree(vcdev->status);
849 kfree(vcdev->config_block);
850 kfree(vcdev);
851 }
852
853 static int irb_is_error(struct irb *irb)
854 {
855 if (scsw_cstat(&irb->scsw) != 0)
856 return 1;
857 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
858 return 1;
859 if (scsw_cc(&irb->scsw) != 0)
860 return 1;
861 return 0;
862 }
863
864 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
865 int index)
866 {
867 struct virtio_ccw_vq_info *info;
868 unsigned long flags;
869 struct virtqueue *vq;
870
871 vq = NULL;
872 spin_lock_irqsave(&vcdev->lock, flags);
873 list_for_each_entry(info, &vcdev->virtqueues, node) {
874 if (info->vq->index == index) {
875 vq = info->vq;
876 break;
877 }
878 }
879 spin_unlock_irqrestore(&vcdev->lock, flags);
880 return vq;
881 }
882
883 static void virtio_ccw_int_handler(struct ccw_device *cdev,
884 unsigned long intparm,
885 struct irb *irb)
886 {
887 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
888 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
889 int i;
890 struct virtqueue *vq;
891
892 if (!vcdev)
893 return;
894 /* Check if it's a notification from the host. */
895 if ((intparm == 0) &&
896 (scsw_stctl(&irb->scsw) ==
897 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
898 /* OK */
899 }
900 if (irb_is_error(irb)) {
901 /* Command reject? */
902 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
903 (irb->ecw[0] & SNS0_CMD_REJECT))
904 vcdev->err = -EOPNOTSUPP;
905 else
906 /* Map everything else to -EIO. */
907 vcdev->err = -EIO;
908 }
909 if (vcdev->curr_io & activity) {
910 switch (activity) {
911 case VIRTIO_CCW_DOING_READ_FEAT:
912 case VIRTIO_CCW_DOING_WRITE_FEAT:
913 case VIRTIO_CCW_DOING_READ_CONFIG:
914 case VIRTIO_CCW_DOING_WRITE_CONFIG:
915 case VIRTIO_CCW_DOING_WRITE_STATUS:
916 case VIRTIO_CCW_DOING_SET_VQ:
917 case VIRTIO_CCW_DOING_SET_IND:
918 case VIRTIO_CCW_DOING_SET_CONF_IND:
919 case VIRTIO_CCW_DOING_RESET:
920 case VIRTIO_CCW_DOING_READ_VQ_CONF:
921 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
922 vcdev->curr_io &= ~activity;
923 wake_up(&vcdev->wait_q);
924 break;
925 default:
926 /* don't know what to do... */
927 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
928 activity);
929 WARN_ON(1);
930 break;
931 }
932 }
933 for_each_set_bit(i, &vcdev->indicators,
934 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
935 /* The bit clear must happen before the vring kick. */
936 clear_bit(i, &vcdev->indicators);
937 barrier();
938 vq = virtio_ccw_vq_by_ind(vcdev, i);
939 vring_interrupt(0, vq);
940 }
941 if (test_bit(0, &vcdev->indicators2)) {
942 virtio_config_changed(&vcdev->vdev);
943 clear_bit(0, &vcdev->indicators2);
944 }
945 }
946
947 /*
948 * We usually want to autoonline all devices, but give the admin
949 * a way to exempt devices from this.
950 */
951 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
952 (8*sizeof(long)))
953 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
954
955 static char *no_auto = "";
956
957 module_param(no_auto, charp, 0444);
958 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
959
960 static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
961 {
962 struct ccw_dev_id id;
963
964 ccw_device_get_id(cdev, &id);
965 if (test_bit(id.devno, devs_no_auto[id.ssid]))
966 return 0;
967 return 1;
968 }
969
970 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
971 {
972 struct ccw_device *cdev = data;
973 int ret;
974
975 ret = ccw_device_set_online(cdev);
976 if (ret)
977 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
978 }
979
980 static int virtio_ccw_probe(struct ccw_device *cdev)
981 {
982 cdev->handler = virtio_ccw_int_handler;
983
984 if (virtio_ccw_check_autoonline(cdev))
985 async_schedule(virtio_ccw_auto_online, cdev);
986 return 0;
987 }
988
989 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
990 {
991 unsigned long flags;
992 struct virtio_ccw_device *vcdev;
993
994 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
995 vcdev = dev_get_drvdata(&cdev->dev);
996 if (!vcdev || vcdev->going_away) {
997 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
998 return NULL;
999 }
1000 vcdev->going_away = true;
1001 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1002 return vcdev;
1003 }
1004
1005 static void virtio_ccw_remove(struct ccw_device *cdev)
1006 {
1007 unsigned long flags;
1008 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
1009
1010 if (vcdev && cdev->online) {
1011 if (vcdev->device_lost)
1012 virtio_break_device(&vcdev->vdev);
1013 unregister_virtio_device(&vcdev->vdev);
1014 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1015 dev_set_drvdata(&cdev->dev, NULL);
1016 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1017 }
1018 cdev->handler = NULL;
1019 }
1020
1021 static int virtio_ccw_offline(struct ccw_device *cdev)
1022 {
1023 unsigned long flags;
1024 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
1025
1026 if (!vcdev)
1027 return 0;
1028 if (vcdev->device_lost)
1029 virtio_break_device(&vcdev->vdev);
1030 unregister_virtio_device(&vcdev->vdev);
1031 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1032 dev_set_drvdata(&cdev->dev, NULL);
1033 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1034 return 0;
1035 }
1036
1037
1038 static int virtio_ccw_online(struct ccw_device *cdev)
1039 {
1040 int ret;
1041 struct virtio_ccw_device *vcdev;
1042 unsigned long flags;
1043
1044 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
1045 if (!vcdev) {
1046 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
1047 ret = -ENOMEM;
1048 goto out_free;
1049 }
1050 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
1051 GFP_DMA | GFP_KERNEL);
1052 if (!vcdev->config_block) {
1053 ret = -ENOMEM;
1054 goto out_free;
1055 }
1056 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
1057 if (!vcdev->status) {
1058 ret = -ENOMEM;
1059 goto out_free;
1060 }
1061
1062 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
1063
1064 vcdev->vdev.dev.parent = &cdev->dev;
1065 vcdev->vdev.dev.release = virtio_ccw_release_dev;
1066 vcdev->vdev.config = &virtio_ccw_config_ops;
1067 vcdev->cdev = cdev;
1068 init_waitqueue_head(&vcdev->wait_q);
1069 INIT_LIST_HEAD(&vcdev->virtqueues);
1070 spin_lock_init(&vcdev->lock);
1071
1072 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1073 dev_set_drvdata(&cdev->dev, vcdev);
1074 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1075 vcdev->vdev.id.vendor = cdev->id.cu_type;
1076 vcdev->vdev.id.device = cdev->id.cu_model;
1077 ret = register_virtio_device(&vcdev->vdev);
1078 if (ret) {
1079 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
1080 ret);
1081 goto out_put;
1082 }
1083 return 0;
1084 out_put:
1085 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1086 dev_set_drvdata(&cdev->dev, NULL);
1087 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1088 put_device(&vcdev->vdev.dev);
1089 return ret;
1090 out_free:
1091 if (vcdev) {
1092 kfree(vcdev->status);
1093 kfree(vcdev->config_block);
1094 }
1095 kfree(vcdev);
1096 return ret;
1097 }
1098
1099 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1100 {
1101 int rc;
1102 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1103
1104 /*
1105 * Make sure vcdev is set
1106 * i.e. set_offline/remove callback not already running
1107 */
1108 if (!vcdev)
1109 return NOTIFY_DONE;
1110
1111 switch (event) {
1112 case CIO_GONE:
1113 vcdev->device_lost = true;
1114 rc = NOTIFY_DONE;
1115 break;
1116 default:
1117 rc = NOTIFY_DONE;
1118 break;
1119 }
1120 return rc;
1121 }
1122
1123 static struct ccw_device_id virtio_ids[] = {
1124 { CCW_DEVICE(0x3832, 0) },
1125 {},
1126 };
1127 MODULE_DEVICE_TABLE(ccw, virtio_ids);
1128
1129 static struct ccw_driver virtio_ccw_driver = {
1130 .driver = {
1131 .owner = THIS_MODULE,
1132 .name = "virtio_ccw",
1133 },
1134 .ids = virtio_ids,
1135 .probe = virtio_ccw_probe,
1136 .remove = virtio_ccw_remove,
1137 .set_offline = virtio_ccw_offline,
1138 .set_online = virtio_ccw_online,
1139 .notify = virtio_ccw_cio_notify,
1140 .int_class = IRQIO_VIR,
1141 };
1142
1143 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
1144 int max_digit, int max_val)
1145 {
1146 int diff;
1147
1148 diff = 0;
1149 *val = 0;
1150
1151 while (diff <= max_digit) {
1152 int value = hex_to_bin(**cp);
1153
1154 if (value < 0)
1155 break;
1156 *val = *val * 16 + value;
1157 (*cp)++;
1158 diff++;
1159 }
1160
1161 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
1162 return 1;
1163
1164 return 0;
1165 }
1166
1167 static int __init parse_busid(char *str, unsigned int *cssid,
1168 unsigned int *ssid, unsigned int *devno)
1169 {
1170 char *str_work;
1171 int rc, ret;
1172
1173 rc = 1;
1174
1175 if (*str == '\0')
1176 goto out;
1177
1178 str_work = str;
1179 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
1180 if (ret || (str_work[0] != '.'))
1181 goto out;
1182 str_work++;
1183 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
1184 if (ret || (str_work[0] != '.'))
1185 goto out;
1186 str_work++;
1187 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
1188 if (ret || (str_work[0] != '\0'))
1189 goto out;
1190
1191 rc = 0;
1192 out:
1193 return rc;
1194 }
1195
1196 static void __init no_auto_parse(void)
1197 {
1198 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
1199 char *parm, *str;
1200 int rc;
1201
1202 str = no_auto;
1203 while ((parm = strsep(&str, ","))) {
1204 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
1205 &from_ssid, &from);
1206 if (rc)
1207 continue;
1208 if (parm != NULL) {
1209 rc = parse_busid(parm, &to_cssid,
1210 &to_ssid, &to);
1211 if ((from_ssid > to_ssid) ||
1212 ((from_ssid == to_ssid) && (from > to)))
1213 rc = -EINVAL;
1214 } else {
1215 to_cssid = from_cssid;
1216 to_ssid = from_ssid;
1217 to = from;
1218 }
1219 if (rc)
1220 continue;
1221 while ((from_ssid < to_ssid) ||
1222 ((from_ssid == to_ssid) && (from <= to))) {
1223 set_bit(from, devs_no_auto[from_ssid]);
1224 from++;
1225 if (from > __MAX_SUBCHANNEL) {
1226 from_ssid++;
1227 from = 0;
1228 }
1229 }
1230 }
1231 }
1232
1233 static int __init virtio_ccw_init(void)
1234 {
1235 /* parse no_auto string before we do anything further */
1236 no_auto_parse();
1237 return ccw_driver_register(&virtio_ccw_driver);
1238 }
1239 module_init(virtio_ccw_init);
1240
1241 static void __exit virtio_ccw_exit(void)
1242 {
1243 int i;
1244
1245 ccw_driver_unregister(&virtio_ccw_driver);
1246 for (i = 0; i < MAX_AIRQ_AREAS; i++)
1247 destroy_airq_info(airq_areas[i]);
1248 }
1249 module_exit(virtio_ccw_exit);
This page took 0.11492 seconds and 5 git commands to generate.