virtio_ccw: legacy: don't negotiate rev 1/features
[deliverable/linux.git] / drivers / s390 / kvm / virtio_ccw.c
CommitLineData
7e64e059
CH
1/*
2 * ccw based virtio transport
3 *
96b14536 4 * Copyright IBM Corp. 2012, 2014
7e64e059
CH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12
13#include <linux/kernel_stat.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16#include <linux/err.h>
17#include <linux/virtio.h>
18#include <linux/virtio_config.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21#include <linux/virtio_ring.h>
22#include <linux/pfn.h>
23#include <linux/async.h>
24#include <linux/wait.h>
25#include <linux/list.h>
26#include <linux/bitops.h>
27#include <linux/module.h>
28#include <linux/io.h>
29#include <linux/kvm_para.h>
e75279c4 30#include <linux/notifier.h>
7e64e059
CH
31#include <asm/setup.h>
32#include <asm/irq.h>
33#include <asm/cio.h>
34#include <asm/ccwdev.h>
6a773cb8 35#include <asm/virtio-ccw.h>
96b14536
CH
36#include <asm/isc.h>
37#include <asm/airq.h>
7e64e059
CH
38
39/*
40 * virtio related functions
41 */
42
43struct vq_config_block {
44 __u16 index;
45 __u16 num;
46} __packed;
47
48#define VIRTIO_CCW_CONFIG_SIZE 0x100
49/* same as PCI config space size, should be enough for all drivers */
50
51struct virtio_ccw_device {
52 struct virtio_device vdev;
73fa21ea 53 __u8 *status;
7e64e059
CH
54 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
55 struct ccw_device *cdev;
7e64e059
CH
56 __u32 curr_io;
57 int err;
6bb2c835 58 unsigned int revision; /* Transport revision */
7e64e059
CH
59 wait_queue_head_t wait_q;
60 spinlock_t lock;
61 struct list_head virtqueues;
62 unsigned long indicators;
63 unsigned long indicators2;
64 struct vq_config_block *config_block;
96b14536 65 bool is_thinint;
79629b20 66 bool going_away;
e75279c4 67 bool device_lost;
96b14536 68 void *airq_info;
7e64e059
CH
69};
70
d4674240 71struct vq_info_block_legacy {
7e64e059
CH
72 __u64 queue;
73 __u32 align;
74 __u16 index;
75 __u16 num;
76} __packed;
77
d4674240
CH
78struct vq_info_block {
79 __u64 desc;
80 __u32 res0;
81 __u16 index;
82 __u16 num;
83 __u64 avail;
84 __u64 used;
85} __packed;
86
7e64e059
CH
87struct virtio_feature_desc {
88 __u32 features;
89 __u8 index;
90} __packed;
91
96b14536
CH
92struct virtio_thinint_area {
93 unsigned long summary_indicator;
94 unsigned long indicator;
95 u64 bit_nr;
96 u8 isc;
97} __packed;
98
6bb2c835
TH
99struct virtio_rev_info {
100 __u16 revision;
101 __u16 length;
102 __u8 data[];
103};
104
105/* the highest virtio-ccw revision we support */
af9ca13b 106#define VIRTIO_CCW_REV_MAX 1
6bb2c835 107
7e64e059
CH
108struct virtio_ccw_vq_info {
109 struct virtqueue *vq;
110 int num;
111 void *queue;
d4674240
CH
112 union {
113 struct vq_info_block s;
114 struct vq_info_block_legacy l;
115 } *info_block;
96b14536 116 int bit_nr;
7e64e059 117 struct list_head node;
07e16933 118 long cookie;
7e64e059
CH
119};
120
96b14536
CH
121#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
122
123#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
124#define MAX_AIRQ_AREAS 20
125
126static int virtio_ccw_use_airq = 1;
127
128struct airq_info {
129 rwlock_t lock;
130 u8 summary_indicator;
131 struct airq_struct airq;
132 struct airq_iv *aiv;
133};
134static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
135
7e64e059
CH
136#define CCW_CMD_SET_VQ 0x13
137#define CCW_CMD_VDEV_RESET 0x33
138#define CCW_CMD_SET_IND 0x43
139#define CCW_CMD_SET_CONF_IND 0x53
140#define CCW_CMD_READ_FEAT 0x12
141#define CCW_CMD_WRITE_FEAT 0x11
142#define CCW_CMD_READ_CONF 0x22
143#define CCW_CMD_WRITE_CONF 0x21
144#define CCW_CMD_WRITE_STATUS 0x31
145#define CCW_CMD_READ_VQ_CONF 0x32
96b14536 146#define CCW_CMD_SET_IND_ADAPTER 0x73
6bb2c835 147#define CCW_CMD_SET_VIRTIO_REV 0x83
7e64e059
CH
148
149#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
150#define VIRTIO_CCW_DOING_RESET 0x00040000
151#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
152#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
153#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
154#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
155#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
156#define VIRTIO_CCW_DOING_SET_IND 0x01000000
157#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
158#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
96b14536 159#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
6bb2c835 160#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
7e64e059
CH
161#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
162
163static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
164{
165 return container_of(vdev, struct virtio_ccw_device, vdev);
166}
167
96b14536
CH
168static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
169{
170 unsigned long i, flags;
171
172 write_lock_irqsave(&info->lock, flags);
173 for (i = 0; i < airq_iv_end(info->aiv); i++) {
174 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
175 airq_iv_free_bit(info->aiv, i);
176 airq_iv_set_ptr(info->aiv, i, 0);
177 break;
178 }
179 }
180 write_unlock_irqrestore(&info->lock, flags);
181}
182
183static void virtio_airq_handler(struct airq_struct *airq)
184{
185 struct airq_info *info = container_of(airq, struct airq_info, airq);
186 unsigned long ai;
187
188 inc_irq_stat(IRQIO_VAI);
189 read_lock(&info->lock);
190 /* Walk through indicators field, summary indicator active. */
191 for (ai = 0;;) {
192 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
193 if (ai == -1UL)
194 break;
195 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
196 }
197 info->summary_indicator = 0;
198 smp_wmb();
199 /* Walk through indicators field, summary indicator not active. */
200 for (ai = 0;;) {
201 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
202 if (ai == -1UL)
203 break;
204 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
205 }
206 read_unlock(&info->lock);
207}
208
209static struct airq_info *new_airq_info(void)
210{
211 struct airq_info *info;
212 int rc;
213
214 info = kzalloc(sizeof(*info), GFP_KERNEL);
215 if (!info)
216 return NULL;
217 rwlock_init(&info->lock);
218 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
219 if (!info->aiv) {
220 kfree(info);
221 return NULL;
222 }
223 info->airq.handler = virtio_airq_handler;
224 info->airq.lsi_ptr = &info->summary_indicator;
225 info->airq.lsi_mask = 0xff;
226 info->airq.isc = VIRTIO_AIRQ_ISC;
227 rc = register_adapter_interrupt(&info->airq);
228 if (rc) {
229 airq_iv_release(info->aiv);
230 kfree(info);
231 return NULL;
232 }
233 return info;
234}
235
236static void destroy_airq_info(struct airq_info *info)
237{
238 if (!info)
239 return;
240
241 unregister_adapter_interrupt(&info->airq);
242 airq_iv_release(info->aiv);
243 kfree(info);
244}
245
246static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
247 u64 *first, void **airq_info)
248{
249 int i, j;
250 struct airq_info *info;
251 unsigned long indicator_addr = 0;
252 unsigned long bit, flags;
253
254 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
255 if (!airq_areas[i])
256 airq_areas[i] = new_airq_info();
257 info = airq_areas[i];
258 if (!info)
259 return 0;
260 write_lock_irqsave(&info->lock, flags);
261 bit = airq_iv_alloc(info->aiv, nvqs);
262 if (bit == -1UL) {
263 /* Not enough vacancies. */
264 write_unlock_irqrestore(&info->lock, flags);
265 continue;
266 }
267 *first = bit;
268 *airq_info = info;
269 indicator_addr = (unsigned long)info->aiv->vector;
270 for (j = 0; j < nvqs; j++) {
271 airq_iv_set_ptr(info->aiv, bit + j,
272 (unsigned long)vqs[j]);
273 }
274 write_unlock_irqrestore(&info->lock, flags);
275 }
276 return indicator_addr;
277}
278
279static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
280{
281 struct virtio_ccw_vq_info *info;
282
283 list_for_each_entry(info, &vcdev->virtqueues, node)
284 drop_airq_indicator(info->vq, vcdev->airq_info);
285}
286
7e64e059
CH
287static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
288{
289 unsigned long flags;
290 __u32 ret;
291
292 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
293 if (vcdev->err)
294 ret = 0;
295 else
296 ret = vcdev->curr_io & flag;
297 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
298 return ret;
299}
300
73fa21ea
CH
301static int ccw_io_helper(struct virtio_ccw_device *vcdev,
302 struct ccw1 *ccw, __u32 intparm)
7e64e059
CH
303{
304 int ret;
305 unsigned long flags;
306 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
307
b26ba22b
CB
308 do {
309 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
310 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
99437a27
CH
311 if (!ret) {
312 if (!vcdev->curr_io)
313 vcdev->err = 0;
b26ba22b 314 vcdev->curr_io |= flag;
99437a27 315 }
b26ba22b
CB
316 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
317 cpu_relax();
318 } while (ret == -EBUSY);
7e64e059
CH
319 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
320 return ret ? ret : vcdev->err;
321}
322
96b14536
CH
323static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
324 struct ccw1 *ccw)
325{
326 int ret;
327 unsigned long *indicatorp = NULL;
328 struct virtio_thinint_area *thinint_area = NULL;
329 struct airq_info *airq_info = vcdev->airq_info;
330
331 if (vcdev->is_thinint) {
332 thinint_area = kzalloc(sizeof(*thinint_area),
333 GFP_DMA | GFP_KERNEL);
334 if (!thinint_area)
335 return;
336 thinint_area->summary_indicator =
337 (unsigned long) &airq_info->summary_indicator;
338 thinint_area->isc = VIRTIO_AIRQ_ISC;
339 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
340 ccw->count = sizeof(*thinint_area);
341 ccw->cda = (__u32)(unsigned long) thinint_area;
342 } else {
343 indicatorp = kmalloc(sizeof(&vcdev->indicators),
344 GFP_DMA | GFP_KERNEL);
345 if (!indicatorp)
346 return;
347 *indicatorp = 0;
348 ccw->cmd_code = CCW_CMD_SET_IND;
349 ccw->count = sizeof(vcdev->indicators);
350 ccw->cda = (__u32)(unsigned long) indicatorp;
351 }
352 /* Deregister indicators from host. */
353 vcdev->indicators = 0;
354 ccw->flags = 0;
355 ret = ccw_io_helper(vcdev, ccw,
356 vcdev->is_thinint ?
357 VIRTIO_CCW_DOING_SET_IND_ADAPTER :
358 VIRTIO_CCW_DOING_SET_IND);
359 if (ret && (ret != -ENODEV))
360 dev_info(&vcdev->cdev->dev,
361 "Failed to deregister indicators (%d)\n", ret);
362 else if (vcdev->is_thinint)
363 virtio_ccw_drop_indicators(vcdev);
364 kfree(indicatorp);
365 kfree(thinint_area);
366}
367
7e64e059 368static inline long do_kvm_notify(struct subchannel_id schid,
07e16933
MT
369 unsigned long queue_index,
370 long cookie)
7e64e059
CH
371{
372 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
373 register struct subchannel_id __schid asm("2") = schid;
374 register unsigned long __index asm("3") = queue_index;
375 register long __rc asm("2");
07e16933 376 register long __cookie asm("4") = cookie;
7e64e059
CH
377
378 asm volatile ("diag 2,4,0x500\n"
07e16933
MT
379 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
380 "d"(__cookie)
7e64e059
CH
381 : "memory", "cc");
382 return __rc;
383}
384
46f9c2b9 385static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
7e64e059
CH
386{
387 struct virtio_ccw_vq_info *info = vq->priv;
388 struct virtio_ccw_device *vcdev;
389 struct subchannel_id schid;
390
391 vcdev = to_vc_device(info->vq->vdev);
392 ccw_device_get_schid(vcdev->cdev, &schid);
01227a88 393 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
46f9c2b9
HG
394 if (info->cookie < 0)
395 return false;
396 return true;
7e64e059
CH
397}
398
73fa21ea
CH
399static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
400 struct ccw1 *ccw, int index)
7e64e059
CH
401{
402 vcdev->config_block->index = index;
73fa21ea
CH
403 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
404 ccw->flags = 0;
405 ccw->count = sizeof(struct vq_config_block);
406 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
407 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
7e64e059
CH
408 return vcdev->config_block->num;
409}
410
73fa21ea 411static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
7e64e059
CH
412{
413 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
414 struct virtio_ccw_vq_info *info = vq->priv;
415 unsigned long flags;
416 unsigned long size;
417 int ret;
9d0ca6ed 418 unsigned int index = vq->index;
7e64e059
CH
419
420 /* Remove from our list. */
421 spin_lock_irqsave(&vcdev->lock, flags);
422 list_del(&info->node);
423 spin_unlock_irqrestore(&vcdev->lock, flags);
424
425 /* Release from host. */
d4674240
CH
426 if (vcdev->revision == 0) {
427 info->info_block->l.queue = 0;
428 info->info_block->l.align = 0;
429 info->info_block->l.index = index;
430 info->info_block->l.num = 0;
431 ccw->count = sizeof(info->info_block->l);
432 } else {
433 info->info_block->s.desc = 0;
434 info->info_block->s.index = index;
435 info->info_block->s.num = 0;
436 info->info_block->s.avail = 0;
437 info->info_block->s.used = 0;
438 ccw->count = sizeof(info->info_block->s);
439 }
73fa21ea
CH
440 ccw->cmd_code = CCW_CMD_SET_VQ;
441 ccw->flags = 0;
73fa21ea
CH
442 ccw->cda = (__u32)(unsigned long)(info->info_block);
443 ret = ccw_io_helper(vcdev, ccw,
444 VIRTIO_CCW_DOING_SET_VQ | index);
7e64e059
CH
445 /*
446 * -ENODEV isn't considered an error: The device is gone anyway.
447 * This may happen on device detach.
448 */
449 if (ret && (ret != -ENODEV))
450 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
451 ret, index);
452
453 vring_del_virtqueue(vq);
454 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
455 free_pages_exact(info->queue, size);
456 kfree(info->info_block);
457 kfree(info);
458}
459
460static void virtio_ccw_del_vqs(struct virtio_device *vdev)
461{
462 struct virtqueue *vq, *n;
73fa21ea 463 struct ccw1 *ccw;
96b14536 464 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
465
466 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
467 if (!ccw)
468 return;
469
96b14536 470 virtio_ccw_drop_indicator(vcdev, ccw);
7e64e059
CH
471
472 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
73fa21ea
CH
473 virtio_ccw_del_vq(vq, ccw);
474
475 kfree(ccw);
7e64e059
CH
476}
477
478static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
479 int i, vq_callback_t *callback,
73fa21ea
CH
480 const char *name,
481 struct ccw1 *ccw)
7e64e059
CH
482{
483 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
484 int err;
c98d3683 485 struct virtqueue *vq = NULL;
7e64e059 486 struct virtio_ccw_vq_info *info;
c98d3683 487 unsigned long size = 0; /* silence the compiler */
7e64e059
CH
488 unsigned long flags;
489
490 /* Allocate queue. */
491 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
492 if (!info) {
493 dev_warn(&vcdev->cdev->dev, "no info\n");
494 err = -ENOMEM;
495 goto out_err;
496 }
497 info->info_block = kzalloc(sizeof(*info->info_block),
498 GFP_DMA | GFP_KERNEL);
499 if (!info->info_block) {
500 dev_warn(&vcdev->cdev->dev, "no info block\n");
501 err = -ENOMEM;
502 goto out_err;
503 }
73fa21ea 504 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
7e64e059
CH
505 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
506 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
507 if (info->queue == NULL) {
508 dev_warn(&vcdev->cdev->dev, "no queue\n");
509 err = -ENOMEM;
510 goto out_err;
511 }
512
513 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
514 true, info->queue, virtio_ccw_kvm_notify,
515 callback, name);
516 if (!vq) {
517 /* For now, we fail if we can't get the requested size. */
518 dev_warn(&vcdev->cdev->dev, "no vq\n");
519 err = -ENOMEM;
7e64e059
CH
520 goto out_err;
521 }
7e64e059
CH
522
523 /* Register it with the host. */
d4674240
CH
524 if (vcdev->revision == 0) {
525 info->info_block->l.queue = (__u64)info->queue;
526 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
527 info->info_block->l.index = i;
528 info->info_block->l.num = info->num;
529 ccw->count = sizeof(info->info_block->l);
530 } else {
531 info->info_block->s.desc = (__u64)info->queue;
532 info->info_block->s.index = i;
533 info->info_block->s.num = info->num;
534 info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
535 info->info_block->s.used = (__u64)virtqueue_get_used(vq);
536 ccw->count = sizeof(info->info_block->s);
537 }
73fa21ea
CH
538 ccw->cmd_code = CCW_CMD_SET_VQ;
539 ccw->flags = 0;
73fa21ea
CH
540 ccw->cda = (__u32)(unsigned long)(info->info_block);
541 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
7e64e059
CH
542 if (err) {
543 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
7e64e059
CH
544 goto out_err;
545 }
546
c98d3683
CH
547 info->vq = vq;
548 vq->priv = info;
549
7e64e059
CH
550 /* Save it to our list. */
551 spin_lock_irqsave(&vcdev->lock, flags);
552 list_add(&info->node, &vcdev->virtqueues);
553 spin_unlock_irqrestore(&vcdev->lock, flags);
554
555 return vq;
556
557out_err:
c98d3683
CH
558 if (vq)
559 vring_del_virtqueue(vq);
560 if (info) {
561 if (info->queue)
562 free_pages_exact(info->queue, size);
7e64e059 563 kfree(info->info_block);
c98d3683 564 }
7e64e059
CH
565 kfree(info);
566 return ERR_PTR(err);
567}
568
96b14536
CH
569static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
570 struct virtqueue *vqs[], int nvqs,
571 struct ccw1 *ccw)
572{
573 int ret;
574 struct virtio_thinint_area *thinint_area = NULL;
575 struct airq_info *info;
576
577 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
578 if (!thinint_area) {
579 ret = -ENOMEM;
580 goto out;
581 }
582 /* Try to get an indicator. */
583 thinint_area->indicator = get_airq_indicator(vqs, nvqs,
584 &thinint_area->bit_nr,
585 &vcdev->airq_info);
586 if (!thinint_area->indicator) {
587 ret = -ENOSPC;
588 goto out;
589 }
590 info = vcdev->airq_info;
591 thinint_area->summary_indicator =
592 (unsigned long) &info->summary_indicator;
593 thinint_area->isc = VIRTIO_AIRQ_ISC;
594 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
595 ccw->flags = CCW_FLAG_SLI;
596 ccw->count = sizeof(*thinint_area);
597 ccw->cda = (__u32)(unsigned long)thinint_area;
598 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
599 if (ret) {
600 if (ret == -EOPNOTSUPP) {
601 /*
602 * The host does not support adapter interrupts
603 * for virtio-ccw, stop trying.
604 */
605 virtio_ccw_use_airq = 0;
606 pr_info("Adapter interrupts unsupported on host\n");
607 } else
608 dev_warn(&vcdev->cdev->dev,
609 "enabling adapter interrupts = %d\n", ret);
610 virtio_ccw_drop_indicators(vcdev);
611 }
612out:
613 kfree(thinint_area);
614 return ret;
615}
616
7e64e059
CH
617static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
618 struct virtqueue *vqs[],
619 vq_callback_t *callbacks[],
620 const char *names[])
621{
622 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
623 unsigned long *indicatorp = NULL;
624 int ret, i;
73fa21ea
CH
625 struct ccw1 *ccw;
626
627 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
628 if (!ccw)
629 return -ENOMEM;
7e64e059
CH
630
631 for (i = 0; i < nvqs; ++i) {
73fa21ea
CH
632 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
633 ccw);
7e64e059
CH
634 if (IS_ERR(vqs[i])) {
635 ret = PTR_ERR(vqs[i]);
636 vqs[i] = NULL;
637 goto out;
638 }
639 }
640 ret = -ENOMEM;
641 /* We need a data area under 2G to communicate. */
642 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
643 if (!indicatorp)
644 goto out;
645 *indicatorp = (unsigned long) &vcdev->indicators;
96b14536
CH
646 if (vcdev->is_thinint) {
647 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
648 if (ret)
649 /* no error, just fall back to legacy interrupts */
650 vcdev->is_thinint = 0;
651 }
652 if (!vcdev->is_thinint) {
653 /* Register queue indicators with host. */
654 vcdev->indicators = 0;
655 ccw->cmd_code = CCW_CMD_SET_IND;
656 ccw->flags = 0;
657 ccw->count = sizeof(vcdev->indicators);
658 ccw->cda = (__u32)(unsigned long) indicatorp;
659 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
660 if (ret)
661 goto out;
662 }
7e64e059
CH
663 /* Register indicators2 with host for config changes */
664 *indicatorp = (unsigned long) &vcdev->indicators2;
665 vcdev->indicators2 = 0;
73fa21ea
CH
666 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
667 ccw->flags = 0;
668 ccw->count = sizeof(vcdev->indicators2);
669 ccw->cda = (__u32)(unsigned long) indicatorp;
670 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
7e64e059
CH
671 if (ret)
672 goto out;
673
674 kfree(indicatorp);
73fa21ea 675 kfree(ccw);
7e64e059
CH
676 return 0;
677out:
678 kfree(indicatorp);
73fa21ea 679 kfree(ccw);
7e64e059
CH
680 virtio_ccw_del_vqs(vdev);
681 return ret;
682}
683
684static void virtio_ccw_reset(struct virtio_device *vdev)
685{
686 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
687 struct ccw1 *ccw;
688
689 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
690 if (!ccw)
691 return;
7e64e059
CH
692
693 /* Zero status bits. */
73fa21ea 694 *vcdev->status = 0;
7e64e059
CH
695
696 /* Send a reset ccw on device. */
73fa21ea
CH
697 ccw->cmd_code = CCW_CMD_VDEV_RESET;
698 ccw->flags = 0;
699 ccw->count = 0;
700 ccw->cda = 0;
701 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
702 kfree(ccw);
7e64e059
CH
703}
704
d0254773 705static u64 virtio_ccw_get_features(struct virtio_device *vdev)
7e64e059
CH
706{
707 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea 708 struct virtio_feature_desc *features;
732c56e9
MT
709 int ret;
710 u64 rc;
73fa21ea 711 struct ccw1 *ccw;
7e64e059 712
73fa21ea
CH
713 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
714 if (!ccw)
715 return 0;
716
717 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
718 if (!features) {
719 rc = 0;
720 goto out_free;
721 }
7e64e059 722 /* Read the feature bits from the host. */
73fa21ea
CH
723 features->index = 0;
724 ccw->cmd_code = CCW_CMD_READ_FEAT;
725 ccw->flags = 0;
726 ccw->count = sizeof(*features);
727 ccw->cda = (__u32)(unsigned long)features;
728 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
729 if (ret) {
730 rc = 0;
731 goto out_free;
732 }
733
734 rc = le32_to_cpu(features->features);
7e64e059 735
ce15408f
MT
736 if (vcdev->revision == 0)
737 goto out_free;
738
732c56e9
MT
739 /* Read second half of the feature bits from the host. */
740 features->index = 1;
741 ccw->cmd_code = CCW_CMD_READ_FEAT;
742 ccw->flags = 0;
743 ccw->count = sizeof(*features);
744 ccw->cda = (__u32)(unsigned long)features;
745 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
746 if (ret == 0)
747 rc |= (u64)le32_to_cpu(features->features) << 32;
748
73fa21ea
CH
749out_free:
750 kfree(features);
751 kfree(ccw);
752 return rc;
7e64e059
CH
753}
754
755static void virtio_ccw_finalize_features(struct virtio_device *vdev)
756{
757 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea 758 struct virtio_feature_desc *features;
73fa21ea
CH
759 struct ccw1 *ccw;
760
761 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
762 if (!ccw)
763 return;
764
765 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
766 if (!features)
767 goto out_free;
7e64e059
CH
768
769 /* Give virtio_ring a chance to accept features. */
770 vring_transport_features(vdev);
771
e16e12be 772 features->index = 0;
732c56e9
MT
773 features->features = cpu_to_le32((u32)vdev->features);
774 /* Write the first half of the feature bits to the host. */
775 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
776 ccw->flags = 0;
777 ccw->count = sizeof(*features);
778 ccw->cda = (__u32)(unsigned long)features;
779 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
780
ce15408f
MT
781 if (vcdev->revision == 0)
782 goto out_free;
783
732c56e9
MT
784 features->index = 1;
785 features->features = cpu_to_le32(vdev->features >> 32);
786 /* Write the second half of the feature bits to the host. */
e16e12be
MT
787 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
788 ccw->flags = 0;
789 ccw->count = sizeof(*features);
790 ccw->cda = (__u32)(unsigned long)features;
791 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
792
73fa21ea
CH
793out_free:
794 kfree(features);
795 kfree(ccw);
7e64e059
CH
796}
797
798static void virtio_ccw_get_config(struct virtio_device *vdev,
799 unsigned int offset, void *buf, unsigned len)
800{
801 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
802 int ret;
73fa21ea
CH
803 struct ccw1 *ccw;
804 void *config_area;
805
806 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
807 if (!ccw)
808 return;
809
810 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
811 if (!config_area)
812 goto out_free;
7e64e059
CH
813
814 /* Read the config area from the host. */
73fa21ea
CH
815 ccw->cmd_code = CCW_CMD_READ_CONF;
816 ccw->flags = 0;
817 ccw->count = offset + len;
818 ccw->cda = (__u32)(unsigned long)config_area;
819 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
7e64e059 820 if (ret)
73fa21ea 821 goto out_free;
7e64e059 822
73fa21ea 823 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
7e64e059 824 memcpy(buf, &vcdev->config[offset], len);
73fa21ea
CH
825
826out_free:
827 kfree(config_area);
828 kfree(ccw);
7e64e059
CH
829}
830
831static void virtio_ccw_set_config(struct virtio_device *vdev,
832 unsigned int offset, const void *buf,
833 unsigned len)
834{
835 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
836 struct ccw1 *ccw;
837 void *config_area;
838
839 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
840 if (!ccw)
841 return;
842
843 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
844 if (!config_area)
845 goto out_free;
7e64e059
CH
846
847 memcpy(&vcdev->config[offset], buf, len);
848 /* Write the config area to the host. */
73fa21ea
CH
849 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
850 ccw->cmd_code = CCW_CMD_WRITE_CONF;
851 ccw->flags = 0;
852 ccw->count = offset + len;
853 ccw->cda = (__u32)(unsigned long)config_area;
854 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
855
856out_free:
857 kfree(config_area);
858 kfree(ccw);
7e64e059
CH
859}
860
861static u8 virtio_ccw_get_status(struct virtio_device *vdev)
862{
863 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
864
73fa21ea 865 return *vcdev->status;
7e64e059
CH
866}
867
868static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
869{
870 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
48555136 871 u8 old_status = *vcdev->status;
73fa21ea 872 struct ccw1 *ccw;
48555136 873 int ret;
73fa21ea
CH
874
875 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
876 if (!ccw)
877 return;
7e64e059
CH
878
879 /* Write the status to the host. */
73fa21ea
CH
880 *vcdev->status = status;
881 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
882 ccw->flags = 0;
883 ccw->count = sizeof(status);
884 ccw->cda = (__u32)(unsigned long)vcdev->status;
48555136
MT
885 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
886 /* Write failed? We assume status is unchanged. */
887 if (ret)
888 *vcdev->status = old_status;
73fa21ea 889 kfree(ccw);
7e64e059
CH
890}
891
892static struct virtio_config_ops virtio_ccw_config_ops = {
893 .get_features = virtio_ccw_get_features,
894 .finalize_features = virtio_ccw_finalize_features,
895 .get = virtio_ccw_get_config,
896 .set = virtio_ccw_set_config,
897 .get_status = virtio_ccw_get_status,
898 .set_status = virtio_ccw_set_status,
899 .reset = virtio_ccw_reset,
900 .find_vqs = virtio_ccw_find_vqs,
901 .del_vqs = virtio_ccw_del_vqs,
902};
903
904
905/*
906 * ccw bus driver related functions
907 */
908
909static void virtio_ccw_release_dev(struct device *_d)
910{
911 struct virtio_device *dev = container_of(_d, struct virtio_device,
912 dev);
913 struct virtio_ccw_device *vcdev = to_vc_device(dev);
914
73fa21ea 915 kfree(vcdev->status);
7e64e059 916 kfree(vcdev->config_block);
7e64e059
CH
917 kfree(vcdev);
918}
919
920static int irb_is_error(struct irb *irb)
921{
922 if (scsw_cstat(&irb->scsw) != 0)
923 return 1;
924 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
925 return 1;
926 if (scsw_cc(&irb->scsw) != 0)
927 return 1;
928 return 0;
929}
930
931static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
932 int index)
933{
934 struct virtio_ccw_vq_info *info;
935 unsigned long flags;
936 struct virtqueue *vq;
937
938 vq = NULL;
939 spin_lock_irqsave(&vcdev->lock, flags);
940 list_for_each_entry(info, &vcdev->virtqueues, node) {
9d0ca6ed 941 if (info->vq->index == index) {
7e64e059
CH
942 vq = info->vq;
943 break;
944 }
945 }
946 spin_unlock_irqrestore(&vcdev->lock, flags);
947 return vq;
948}
949
950static void virtio_ccw_int_handler(struct ccw_device *cdev,
951 unsigned long intparm,
952 struct irb *irb)
953{
954 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
955 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
956 int i;
957 struct virtqueue *vq;
7e64e059 958
2e021043
HG
959 if (!vcdev)
960 return;
7e64e059
CH
961 /* Check if it's a notification from the host. */
962 if ((intparm == 0) &&
963 (scsw_stctl(&irb->scsw) ==
964 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
965 /* OK */
966 }
19e4735b
CH
967 if (irb_is_error(irb)) {
968 /* Command reject? */
969 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
970 (irb->ecw[0] & SNS0_CMD_REJECT))
971 vcdev->err = -EOPNOTSUPP;
972 else
973 /* Map everything else to -EIO. */
974 vcdev->err = -EIO;
975 }
7e64e059
CH
976 if (vcdev->curr_io & activity) {
977 switch (activity) {
978 case VIRTIO_CCW_DOING_READ_FEAT:
979 case VIRTIO_CCW_DOING_WRITE_FEAT:
980 case VIRTIO_CCW_DOING_READ_CONFIG:
981 case VIRTIO_CCW_DOING_WRITE_CONFIG:
982 case VIRTIO_CCW_DOING_WRITE_STATUS:
983 case VIRTIO_CCW_DOING_SET_VQ:
984 case VIRTIO_CCW_DOING_SET_IND:
985 case VIRTIO_CCW_DOING_SET_CONF_IND:
986 case VIRTIO_CCW_DOING_RESET:
987 case VIRTIO_CCW_DOING_READ_VQ_CONF:
96b14536 988 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
6bb2c835 989 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
7e64e059
CH
990 vcdev->curr_io &= ~activity;
991 wake_up(&vcdev->wait_q);
992 break;
993 default:
994 /* don't know what to do... */
995 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
996 activity);
997 WARN_ON(1);
998 break;
999 }
1000 }
1001 for_each_set_bit(i, &vcdev->indicators,
1002 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
1003 /* The bit clear must happen before the vring kick. */
1004 clear_bit(i, &vcdev->indicators);
1005 barrier();
1006 vq = virtio_ccw_vq_by_ind(vcdev, i);
1007 vring_interrupt(0, vq);
1008 }
1009 if (test_bit(0, &vcdev->indicators2)) {
016c98c6 1010 virtio_config_changed(&vcdev->vdev);
7e64e059
CH
1011 clear_bit(0, &vcdev->indicators2);
1012 }
1013}
1014
1015/*
1016 * We usually want to autoonline all devices, but give the admin
1017 * a way to exempt devices from this.
1018 */
1019#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
1020 (8*sizeof(long)))
1021static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
1022
1023static char *no_auto = "";
1024
1025module_param(no_auto, charp, 0444);
1026MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
1027
1028static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
1029{
1030 struct ccw_dev_id id;
1031
1032 ccw_device_get_id(cdev, &id);
1033 if (test_bit(id.devno, devs_no_auto[id.ssid]))
1034 return 0;
1035 return 1;
1036}
1037
1038static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
1039{
1040 struct ccw_device *cdev = data;
1041 int ret;
1042
1043 ret = ccw_device_set_online(cdev);
1044 if (ret)
1045 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
1046}
1047
1048static int virtio_ccw_probe(struct ccw_device *cdev)
1049{
1050 cdev->handler = virtio_ccw_int_handler;
1051
1052 if (virtio_ccw_check_autoonline(cdev))
1053 async_schedule(virtio_ccw_auto_online, cdev);
1054 return 0;
1055}
1056
2e021043
HG
1057static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
1058{
1059 unsigned long flags;
1060 struct virtio_ccw_device *vcdev;
1061
1062 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1063 vcdev = dev_get_drvdata(&cdev->dev);
79629b20 1064 if (!vcdev || vcdev->going_away) {
2e021043
HG
1065 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1066 return NULL;
1067 }
79629b20 1068 vcdev->going_away = true;
2e021043
HG
1069 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1070 return vcdev;
1071}
1072
7e64e059
CH
1073static void virtio_ccw_remove(struct ccw_device *cdev)
1074{
79629b20 1075 unsigned long flags;
2e021043 1076 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
7e64e059 1077
e75279c4
HG
1078 if (vcdev && cdev->online) {
1079 if (vcdev->device_lost)
1080 virtio_break_device(&vcdev->vdev);
7e64e059 1081 unregister_virtio_device(&vcdev->vdev);
e75279c4
HG
1082 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1083 dev_set_drvdata(&cdev->dev, NULL);
1084 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1085 }
7e64e059
CH
1086 cdev->handler = NULL;
1087}
1088
1089static int virtio_ccw_offline(struct ccw_device *cdev)
1090{
79629b20 1091 unsigned long flags;
2e021043 1092 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
7e64e059 1093
e75279c4
HG
1094 if (!vcdev)
1095 return 0;
1096 if (vcdev->device_lost)
1097 virtio_break_device(&vcdev->vdev);
1098 unregister_virtio_device(&vcdev->vdev);
1099 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1100 dev_set_drvdata(&cdev->dev, NULL);
1101 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1102 return 0;
1103}
1104
6bb2c835
TH
1105static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
1106{
1107 struct virtio_rev_info *rev;
1108 struct ccw1 *ccw;
1109 int ret;
1110
1111 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
1112 if (!ccw)
1113 return -ENOMEM;
1114 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
1115 if (!rev) {
1116 kfree(ccw);
1117 return -ENOMEM;
1118 }
1119
1120 /* Set transport revision */
1121 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
1122 ccw->flags = 0;
1123 ccw->count = sizeof(*rev);
1124 ccw->cda = (__u32)(unsigned long)rev;
1125
1126 vcdev->revision = VIRTIO_CCW_REV_MAX;
1127 do {
1128 rev->revision = vcdev->revision;
1129 /* none of our supported revisions carry payload */
1130 rev->length = 0;
1131 ret = ccw_io_helper(vcdev, ccw,
1132 VIRTIO_CCW_DOING_SET_VIRTIO_REV);
1133 if (ret == -EOPNOTSUPP) {
1134 if (vcdev->revision == 0)
1135 /*
1136 * The host device does not support setting
1137 * the revision: let's operate it in legacy
1138 * mode.
1139 */
1140 ret = 0;
1141 else
1142 vcdev->revision--;
1143 }
1144 } while (ret == -EOPNOTSUPP);
1145
1146 kfree(ccw);
1147 kfree(rev);
1148 return ret;
1149}
7e64e059 1150
7e64e059
CH
1151static int virtio_ccw_online(struct ccw_device *cdev)
1152{
1153 int ret;
1154 struct virtio_ccw_device *vcdev;
2e021043 1155 unsigned long flags;
7e64e059
CH
1156
1157 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
1158 if (!vcdev) {
1159 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
1160 ret = -ENOMEM;
1161 goto out_free;
1162 }
7e64e059
CH
1163 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
1164 GFP_DMA | GFP_KERNEL);
1165 if (!vcdev->config_block) {
1166 ret = -ENOMEM;
1167 goto out_free;
1168 }
73fa21ea
CH
1169 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
1170 if (!vcdev->status) {
7e64e059
CH
1171 ret = -ENOMEM;
1172 goto out_free;
1173 }
1174
96b14536
CH
1175 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
1176
7e64e059
CH
1177 vcdev->vdev.dev.parent = &cdev->dev;
1178 vcdev->vdev.dev.release = virtio_ccw_release_dev;
1179 vcdev->vdev.config = &virtio_ccw_config_ops;
1180 vcdev->cdev = cdev;
1181 init_waitqueue_head(&vcdev->wait_q);
1182 INIT_LIST_HEAD(&vcdev->virtqueues);
1183 spin_lock_init(&vcdev->lock);
1184
2e021043 1185 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
7e64e059 1186 dev_set_drvdata(&cdev->dev, vcdev);
2e021043 1187 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1188 vcdev->vdev.id.vendor = cdev->id.cu_type;
1189 vcdev->vdev.id.device = cdev->id.cu_model;
6bb2c835 1190
ce15408f
MT
1191 if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
1192 vcdev->revision = 0;
1193 } else {
1194 ret = virtio_ccw_set_transport_rev(vcdev);
1195 if (ret)
1196 goto out_free;
1197 }
6bb2c835 1198
7e64e059
CH
1199 ret = register_virtio_device(&vcdev->vdev);
1200 if (ret) {
1201 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
1202 ret);
1203 goto out_put;
1204 }
1205 return 0;
1206out_put:
2e021043 1207 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
7e64e059 1208 dev_set_drvdata(&cdev->dev, NULL);
2e021043 1209 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1210 put_device(&vcdev->vdev.dev);
1211 return ret;
1212out_free:
1213 if (vcdev) {
73fa21ea 1214 kfree(vcdev->status);
7e64e059 1215 kfree(vcdev->config_block);
7e64e059
CH
1216 }
1217 kfree(vcdev);
1218 return ret;
1219}
1220
1221static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1222{
e75279c4
HG
1223 int rc;
1224 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1225
1226 /*
1227 * Make sure vcdev is set
1228 * i.e. set_offline/remove callback not already running
1229 */
1230 if (!vcdev)
1231 return NOTIFY_DONE;
1232
1233 switch (event) {
1234 case CIO_GONE:
1235 vcdev->device_lost = true;
1236 rc = NOTIFY_DONE;
1237 break;
1238 default:
1239 rc = NOTIFY_DONE;
1240 break;
1241 }
1242 return rc;
7e64e059
CH
1243}
1244
1245static struct ccw_device_id virtio_ids[] = {
1246 { CCW_DEVICE(0x3832, 0) },
1247 {},
1248};
1249MODULE_DEVICE_TABLE(ccw, virtio_ids);
1250
1251static struct ccw_driver virtio_ccw_driver = {
1252 .driver = {
1253 .owner = THIS_MODULE,
1254 .name = "virtio_ccw",
1255 },
1256 .ids = virtio_ids,
1257 .probe = virtio_ccw_probe,
1258 .remove = virtio_ccw_remove,
1259 .set_offline = virtio_ccw_offline,
1260 .set_online = virtio_ccw_online,
1261 .notify = virtio_ccw_cio_notify,
89f88337 1262 .int_class = IRQIO_VIR,
7e64e059
CH
1263};
1264
1265static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
1266 int max_digit, int max_val)
1267{
1268 int diff;
1269
1270 diff = 0;
1271 *val = 0;
1272
1273 while (diff <= max_digit) {
1274 int value = hex_to_bin(**cp);
1275
1276 if (value < 0)
1277 break;
1278 *val = *val * 16 + value;
1279 (*cp)++;
1280 diff++;
1281 }
1282
1283 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
1284 return 1;
1285
1286 return 0;
1287}
1288
1289static int __init parse_busid(char *str, unsigned int *cssid,
1290 unsigned int *ssid, unsigned int *devno)
1291{
1292 char *str_work;
1293 int rc, ret;
1294
1295 rc = 1;
1296
1297 if (*str == '\0')
1298 goto out;
1299
1300 str_work = str;
1301 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
1302 if (ret || (str_work[0] != '.'))
1303 goto out;
1304 str_work++;
1305 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
1306 if (ret || (str_work[0] != '.'))
1307 goto out;
1308 str_work++;
1309 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
1310 if (ret || (str_work[0] != '\0'))
1311 goto out;
1312
1313 rc = 0;
1314out:
1315 return rc;
1316}
1317
1318static void __init no_auto_parse(void)
1319{
1320 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
1321 char *parm, *str;
1322 int rc;
1323
1324 str = no_auto;
1325 while ((parm = strsep(&str, ","))) {
1326 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
1327 &from_ssid, &from);
1328 if (rc)
1329 continue;
1330 if (parm != NULL) {
1331 rc = parse_busid(parm, &to_cssid,
1332 &to_ssid, &to);
1333 if ((from_ssid > to_ssid) ||
1334 ((from_ssid == to_ssid) && (from > to)))
1335 rc = -EINVAL;
1336 } else {
1337 to_cssid = from_cssid;
1338 to_ssid = from_ssid;
1339 to = from;
1340 }
1341 if (rc)
1342 continue;
1343 while ((from_ssid < to_ssid) ||
1344 ((from_ssid == to_ssid) && (from <= to))) {
1345 set_bit(from, devs_no_auto[from_ssid]);
1346 from++;
1347 if (from > __MAX_SUBCHANNEL) {
1348 from_ssid++;
1349 from = 0;
1350 }
1351 }
1352 }
1353}
1354
1355static int __init virtio_ccw_init(void)
1356{
1357 /* parse no_auto string before we do anything further */
1358 no_auto_parse();
1359 return ccw_driver_register(&virtio_ccw_driver);
1360}
1361module_init(virtio_ccw_init);
1362
1363static void __exit virtio_ccw_exit(void)
1364{
96b14536
CH
1365 int i;
1366
7e64e059 1367 ccw_driver_unregister(&virtio_ccw_driver);
96b14536
CH
1368 for (i = 0; i < MAX_AIRQ_AREAS; i++)
1369 destroy_airq_info(airq_areas[i]);
7e64e059
CH
1370}
1371module_exit(virtio_ccw_exit);
This page took 0.159929 seconds and 5 git commands to generate.