[SCSI] virtio-scsi: split scatterlist per target
[deliverable/linux.git] / drivers / scsi / virtio_scsi.c
CommitLineData
4fe74b1c
PB
1/*
2 * Virtio SCSI HBA driver
3 *
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/virtio.h>
20#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_cmnd.h>
26
27#define VIRTIO_SCSI_MEMPOOL_SZ 64
28
29/* Command queue element */
30struct virtio_scsi_cmd {
31 struct scsi_cmnd *sc;
32 struct completion *comp;
33 union {
34 struct virtio_scsi_cmd_req cmd;
35 struct virtio_scsi_ctrl_tmf_req tmf;
36 struct virtio_scsi_ctrl_an_req an;
37 } req;
38 union {
39 struct virtio_scsi_cmd_resp cmd;
40 struct virtio_scsi_ctrl_tmf_resp tmf;
41 struct virtio_scsi_ctrl_an_resp an;
42 struct virtio_scsi_event evt;
43 } resp;
44} ____cacheline_aligned_in_smp;
45
139fe45a
PB
46struct virtio_scsi_vq {
47 /* Protects vq */
48 spinlock_t vq_lock;
49
50 struct virtqueue *vq;
51};
52
2bd37f0f
PB
53/* Per-target queue state */
54struct virtio_scsi_target_state {
55 /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */
56 spinlock_t tgt_lock;
57
58 /* For sglist construction when adding commands to the virtqueue. */
59 struct scatterlist sg[];
60};
61
4fe74b1c
PB
62/* Driver instance state */
63struct virtio_scsi {
4fe74b1c 64 struct virtio_device *vdev;
2bd37f0f 65
139fe45a
PB
66 struct virtio_scsi_vq ctrl_vq;
67 struct virtio_scsi_vq event_vq;
68 struct virtio_scsi_vq req_vq;
4fe74b1c 69
2bd37f0f 70 struct virtio_scsi_target_state *tgt[];
4fe74b1c
PB
71};
72
73static struct kmem_cache *virtscsi_cmd_cache;
74static mempool_t *virtscsi_cmd_pool;
75
76static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
77{
78 return vdev->priv;
79}
80
81static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
82{
83 if (!resid)
84 return;
85
86 if (!scsi_bidi_cmnd(sc)) {
87 scsi_set_resid(sc, resid);
88 return;
89 }
90
91 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
92 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
93}
94
95/**
96 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
97 *
98 * Called with vq_lock held.
99 */
100static void virtscsi_complete_cmd(void *buf)
101{
102 struct virtio_scsi_cmd *cmd = buf;
103 struct scsi_cmnd *sc = cmd->sc;
104 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
105
106 dev_dbg(&sc->device->sdev_gendev,
107 "cmd %p response %u status %#02x sense_len %u\n",
108 sc, resp->response, resp->status, resp->sense_len);
109
110 sc->result = resp->status;
111 virtscsi_compute_resid(sc, resp->resid);
112 switch (resp->response) {
113 case VIRTIO_SCSI_S_OK:
114 set_host_byte(sc, DID_OK);
115 break;
116 case VIRTIO_SCSI_S_OVERRUN:
117 set_host_byte(sc, DID_ERROR);
118 break;
119 case VIRTIO_SCSI_S_ABORTED:
120 set_host_byte(sc, DID_ABORT);
121 break;
122 case VIRTIO_SCSI_S_BAD_TARGET:
123 set_host_byte(sc, DID_BAD_TARGET);
124 break;
125 case VIRTIO_SCSI_S_RESET:
126 set_host_byte(sc, DID_RESET);
127 break;
128 case VIRTIO_SCSI_S_BUSY:
129 set_host_byte(sc, DID_BUS_BUSY);
130 break;
131 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
132 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
133 break;
134 case VIRTIO_SCSI_S_TARGET_FAILURE:
135 set_host_byte(sc, DID_TARGET_FAILURE);
136 break;
137 case VIRTIO_SCSI_S_NEXUS_FAILURE:
138 set_host_byte(sc, DID_NEXUS_FAILURE);
139 break;
140 default:
141 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
142 resp->response);
143 /* fall through */
144 case VIRTIO_SCSI_S_FAILURE:
145 set_host_byte(sc, DID_ERROR);
146 break;
147 }
148
149 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
150 if (sc->sense_buffer) {
151 memcpy(sc->sense_buffer, resp->sense,
152 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
153 if (resp->sense_len)
154 set_driver_byte(sc, DRIVER_SENSE);
155 }
156
157 mempool_free(cmd, virtscsi_cmd_pool);
158 sc->scsi_done(sc);
159}
160
161static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
162{
4fe74b1c 163 void *buf;
4fe74b1c
PB
164 unsigned int len;
165
4fe74b1c
PB
166 do {
167 virtqueue_disable_cb(vq);
168 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
169 fn(buf);
170 } while (!virtqueue_enable_cb(vq));
4fe74b1c
PB
171}
172
173static void virtscsi_req_done(struct virtqueue *vq)
174{
139fe45a
PB
175 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
176 struct virtio_scsi *vscsi = shost_priv(sh);
177 unsigned long flags;
178
179 spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
4fe74b1c 180 virtscsi_vq_done(vq, virtscsi_complete_cmd);
139fe45a 181 spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
4fe74b1c
PB
182};
183
184static void virtscsi_complete_free(void *buf)
185{
186 struct virtio_scsi_cmd *cmd = buf;
187
188 if (cmd->comp)
189 complete_all(cmd->comp);
e4594bb5
PB
190 else
191 mempool_free(cmd, virtscsi_cmd_pool);
4fe74b1c
PB
192}
193
194static void virtscsi_ctrl_done(struct virtqueue *vq)
195{
139fe45a
PB
196 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
197 struct virtio_scsi *vscsi = shost_priv(sh);
198 unsigned long flags;
199
200 spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
4fe74b1c 201 virtscsi_vq_done(vq, virtscsi_complete_free);
139fe45a 202 spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
4fe74b1c
PB
203};
204
205static void virtscsi_event_done(struct virtqueue *vq)
206{
139fe45a
PB
207 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
208 struct virtio_scsi *vscsi = shost_priv(sh);
209 unsigned long flags;
210
211 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
4fe74b1c 212 virtscsi_vq_done(vq, virtscsi_complete_free);
139fe45a 213 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
4fe74b1c
PB
214};
215
216static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
217 struct scsi_data_buffer *sdb)
218{
219 struct sg_table *table = &sdb->table;
220 struct scatterlist *sg_elem;
221 unsigned int idx = *p_idx;
222 int i;
223
224 for_each_sg(table->sgl, sg_elem, table->nents, i)
225 sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
226
227 *p_idx = idx;
228}
229
230/**
231 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
232 * @vscsi : virtio_scsi state
233 * @cmd : command structure
234 * @out_num : number of read-only elements
235 * @in_num : number of write-only elements
236 * @req_size : size of the request buffer
237 * @resp_size : size of the response buffer
238 *
2bd37f0f 239 * Called with tgt_lock held.
4fe74b1c 240 */
2bd37f0f 241static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
4fe74b1c
PB
242 struct virtio_scsi_cmd *cmd,
243 unsigned *out_num, unsigned *in_num,
244 size_t req_size, size_t resp_size)
245{
246 struct scsi_cmnd *sc = cmd->sc;
2bd37f0f 247 struct scatterlist *sg = tgt->sg;
4fe74b1c
PB
248 unsigned int idx = 0;
249
4fe74b1c
PB
250 /* Request header. */
251 sg_set_buf(&sg[idx++], &cmd->req, req_size);
252
253 /* Data-out buffer. */
254 if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
255 virtscsi_map_sgl(sg, &idx, scsi_out(sc));
256
257 *out_num = idx;
258
259 /* Response header. */
260 sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
261
262 /* Data-in buffer */
263 if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
264 virtscsi_map_sgl(sg, &idx, scsi_in(sc));
265
266 *in_num = idx - *out_num;
267}
268
2bd37f0f
PB
269static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
270 struct virtio_scsi_vq *vq,
4fe74b1c
PB
271 struct virtio_scsi_cmd *cmd,
272 size_t req_size, size_t resp_size, gfp_t gfp)
273{
274 unsigned int out_num, in_num;
275 unsigned long flags;
276 int ret;
277
2bd37f0f
PB
278 spin_lock_irqsave(&tgt->tgt_lock, flags);
279 virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
4fe74b1c 280
139fe45a 281 spin_lock(&vq->vq_lock);
2bd37f0f
PB
282 ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
283 spin_unlock(&tgt->tgt_lock);
4fe74b1c 284 if (ret >= 0)
139fe45a
PB
285 ret = virtqueue_kick_prepare(vq->vq);
286
bce750b1 287 spin_unlock_irqrestore(&vq->vq_lock, flags);
4fe74b1c 288
b5ee8f28 289 if (ret > 0)
139fe45a 290 virtqueue_notify(vq->vq);
4fe74b1c
PB
291 return ret;
292}
293
294static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
295{
296 struct virtio_scsi *vscsi = shost_priv(sh);
2bd37f0f 297 struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
4fe74b1c
PB
298 struct virtio_scsi_cmd *cmd;
299 int ret;
300
2bd37f0f
PB
301 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
302 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
303
304 /* TODO: check feature bit and fail if unsupported? */
305 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
306
4fe74b1c
PB
307 dev_dbg(&sc->device->sdev_gendev,
308 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
309
310 ret = SCSI_MLQUEUE_HOST_BUSY;
311 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
312 if (!cmd)
313 goto out;
314
315 memset(cmd, 0, sizeof(*cmd));
316 cmd->sc = sc;
317 cmd->req.cmd = (struct virtio_scsi_cmd_req){
318 .lun[0] = 1,
319 .lun[1] = sc->device->id,
320 .lun[2] = (sc->device->lun >> 8) | 0x40,
321 .lun[3] = sc->device->lun & 0xff,
322 .tag = (unsigned long)sc,
323 .task_attr = VIRTIO_SCSI_S_SIMPLE,
324 .prio = 0,
325 .crn = 0,
326 };
327
328 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
329 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
330
2bd37f0f 331 if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
4fe74b1c
PB
332 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
333 GFP_ATOMIC) >= 0)
334 ret = 0;
335
336out:
337 return ret;
338}
339
340static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
341{
342 DECLARE_COMPLETION_ONSTACK(comp);
2bd37f0f 343 struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
e4594bb5 344 int ret = FAILED;
4fe74b1c
PB
345
346 cmd->comp = &comp;
2bd37f0f 347 if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
e4594bb5
PB
348 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
349 GFP_NOIO) < 0)
350 goto out;
4fe74b1c
PB
351
352 wait_for_completion(&comp);
e4594bb5
PB
353 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
354 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
355 ret = SUCCESS;
4fe74b1c 356
e4594bb5
PB
357out:
358 mempool_free(cmd, virtscsi_cmd_pool);
359 return ret;
4fe74b1c
PB
360}
361
362static int virtscsi_device_reset(struct scsi_cmnd *sc)
363{
364 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
365 struct virtio_scsi_cmd *cmd;
366
367 sdev_printk(KERN_INFO, sc->device, "device reset\n");
368 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
369 if (!cmd)
370 return FAILED;
371
372 memset(cmd, 0, sizeof(*cmd));
373 cmd->sc = sc;
374 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
375 .type = VIRTIO_SCSI_T_TMF,
376 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
377 .lun[0] = 1,
378 .lun[1] = sc->device->id,
379 .lun[2] = (sc->device->lun >> 8) | 0x40,
380 .lun[3] = sc->device->lun & 0xff,
381 };
382 return virtscsi_tmf(vscsi, cmd);
383}
384
385static int virtscsi_abort(struct scsi_cmnd *sc)
386{
387 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
388 struct virtio_scsi_cmd *cmd;
389
390 scmd_printk(KERN_INFO, sc, "abort\n");
391 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
392 if (!cmd)
393 return FAILED;
394
395 memset(cmd, 0, sizeof(*cmd));
396 cmd->sc = sc;
397 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
398 .type = VIRTIO_SCSI_T_TMF,
399 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
400 .lun[0] = 1,
401 .lun[1] = sc->device->id,
402 .lun[2] = (sc->device->lun >> 8) | 0x40,
403 .lun[3] = sc->device->lun & 0xff,
404 .tag = (unsigned long)sc,
405 };
406 return virtscsi_tmf(vscsi, cmd);
407}
408
409static struct scsi_host_template virtscsi_host_template = {
410 .module = THIS_MODULE,
411 .name = "Virtio SCSI HBA",
412 .proc_name = "virtio_scsi",
413 .queuecommand = virtscsi_queuecommand,
414 .this_id = -1,
415 .eh_abort_handler = virtscsi_abort,
416 .eh_device_reset_handler = virtscsi_device_reset,
417
418 .can_queue = 1024,
419 .dma_boundary = UINT_MAX,
420 .use_clustering = ENABLE_CLUSTERING,
421};
422
423#define virtscsi_config_get(vdev, fld) \
424 ({ \
425 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
426 vdev->config->get(vdev, \
427 offsetof(struct virtio_scsi_config, fld), \
428 &__val, sizeof(__val)); \
429 __val; \
430 })
431
432#define virtscsi_config_set(vdev, fld, val) \
433 (void)({ \
434 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
435 vdev->config->set(vdev, \
436 offsetof(struct virtio_scsi_config, fld), \
437 &__val, sizeof(__val)); \
438 })
439
139fe45a
PB
440static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
441 struct virtqueue *vq)
442{
443 spin_lock_init(&virtscsi_vq->vq_lock);
444 virtscsi_vq->vq = vq;
445}
446
2bd37f0f
PB
447static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
448 struct virtio_device *vdev, int sg_elems)
449{
450 struct virtio_scsi_target_state *tgt;
451 gfp_t gfp_mask = GFP_KERNEL;
452
453 /* We need extra sg elements at head and tail. */
454 tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
455 gfp_mask);
456
457 if (!tgt)
458 return NULL;
459
460 spin_lock_init(&tgt->tgt_lock);
461 sg_init_table(tgt->sg, sg_elems + 2);
462 return tgt;
463}
464
465static void virtscsi_remove_vqs(struct virtio_device *vdev)
466{
467 struct Scsi_Host *sh = virtio_scsi_host(vdev);
468 struct virtio_scsi *vscsi = shost_priv(sh);
469 u32 i, num_targets;
470
471 /* Stop all the virtqueues. */
472 vdev->config->reset(vdev);
473
474 num_targets = sh->max_id;
475 for (i = 0; i < num_targets; i++) {
476 kfree(vscsi->tgt[i]);
477 vscsi->tgt[i] = NULL;
478 }
479
480 vdev->config->del_vqs(vdev);
481}
482
4fe74b1c 483static int virtscsi_init(struct virtio_device *vdev,
2bd37f0f 484 struct virtio_scsi *vscsi, int num_targets)
4fe74b1c
PB
485{
486 int err;
487 struct virtqueue *vqs[3];
2bd37f0f
PB
488 u32 i, sg_elems;
489
4fe74b1c
PB
490 vq_callback_t *callbacks[] = {
491 virtscsi_ctrl_done,
492 virtscsi_event_done,
493 virtscsi_req_done
494 };
495 const char *names[] = {
496 "control",
497 "event",
498 "request"
499 };
500
501 /* Discover virtqueues and write information to configuration. */
502 err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
503 if (err)
504 return err;
505
139fe45a
PB
506 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
507 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
508 virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
4fe74b1c
PB
509
510 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
511 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
2bd37f0f
PB
512
513 /* We need to know how many segments before we allocate. */
514 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
515
516 for (i = 0; i < num_targets; i++) {
517 vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
518 if (!vscsi->tgt[i]) {
519 err = -ENOMEM;
520 goto out;
521 }
522 }
523 err = 0;
524
525out:
526 if (err)
527 virtscsi_remove_vqs(vdev);
528 return err;
4fe74b1c
PB
529}
530
531static int __devinit virtscsi_probe(struct virtio_device *vdev)
532{
533 struct Scsi_Host *shost;
534 struct virtio_scsi *vscsi;
535 int err;
2bd37f0f 536 u32 sg_elems, num_targets;
4fe74b1c
PB
537 u32 cmd_per_lun;
538
4fe74b1c 539 /* Allocate memory and link the structs together. */
2bd37f0f 540 num_targets = virtscsi_config_get(vdev, max_target) + 1;
4fe74b1c 541 shost = scsi_host_alloc(&virtscsi_host_template,
2bd37f0f
PB
542 sizeof(*vscsi)
543 + num_targets * sizeof(struct virtio_scsi_target_state));
4fe74b1c
PB
544
545 if (!shost)
546 return -ENOMEM;
547
2bd37f0f 548 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
4fe74b1c
PB
549 shost->sg_tablesize = sg_elems;
550 vscsi = shost_priv(shost);
551 vscsi->vdev = vdev;
552 vdev->priv = shost;
553
2bd37f0f 554 err = virtscsi_init(vdev, vscsi, num_targets);
4fe74b1c
PB
555 if (err)
556 goto virtscsi_init_failed;
557
558 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
559 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
560 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
561 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
2bd37f0f 562 shost->max_id = num_targets;
4fe74b1c
PB
563 shost->max_channel = 0;
564 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
565 err = scsi_add_host(shost, &vdev->dev);
566 if (err)
567 goto scsi_add_host_failed;
568
569 scsi_scan_host(shost);
570
571 return 0;
572
573scsi_add_host_failed:
574 vdev->config->del_vqs(vdev);
575virtscsi_init_failed:
576 scsi_host_put(shost);
577 return err;
578}
579
4fe74b1c
PB
580static void __devexit virtscsi_remove(struct virtio_device *vdev)
581{
582 struct Scsi_Host *shost = virtio_scsi_host(vdev);
583
584 scsi_remove_host(shost);
585
586 virtscsi_remove_vqs(vdev);
587 scsi_host_put(shost);
588}
589
590#ifdef CONFIG_PM
591static int virtscsi_freeze(struct virtio_device *vdev)
592{
593 virtscsi_remove_vqs(vdev);
594 return 0;
595}
596
597static int virtscsi_restore(struct virtio_device *vdev)
598{
599 struct Scsi_Host *sh = virtio_scsi_host(vdev);
600 struct virtio_scsi *vscsi = shost_priv(sh);
601
2bd37f0f 602 return virtscsi_init(vdev, vscsi, sh->max_id);
4fe74b1c
PB
603}
604#endif
605
606static struct virtio_device_id id_table[] = {
607 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
608 { 0 },
609};
610
611static struct virtio_driver virtio_scsi_driver = {
612 .driver.name = KBUILD_MODNAME,
613 .driver.owner = THIS_MODULE,
614 .id_table = id_table,
615 .probe = virtscsi_probe,
616#ifdef CONFIG_PM
617 .freeze = virtscsi_freeze,
618 .restore = virtscsi_restore,
619#endif
620 .remove = __devexit_p(virtscsi_remove),
621};
622
623static int __init init(void)
624{
625 int ret = -ENOMEM;
626
627 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
628 if (!virtscsi_cmd_cache) {
629 printk(KERN_ERR "kmem_cache_create() for "
630 "virtscsi_cmd_cache failed\n");
631 goto error;
632 }
633
634
635 virtscsi_cmd_pool =
636 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
637 virtscsi_cmd_cache);
638 if (!virtscsi_cmd_pool) {
639 printk(KERN_ERR "mempool_create() for"
640 "virtscsi_cmd_pool failed\n");
641 goto error;
642 }
643 ret = register_virtio_driver(&virtio_scsi_driver);
644 if (ret < 0)
645 goto error;
646
647 return 0;
648
649error:
650 if (virtscsi_cmd_pool) {
651 mempool_destroy(virtscsi_cmd_pool);
652 virtscsi_cmd_pool = NULL;
653 }
654 if (virtscsi_cmd_cache) {
655 kmem_cache_destroy(virtscsi_cmd_cache);
656 virtscsi_cmd_cache = NULL;
657 }
658 return ret;
659}
660
661static void __exit fini(void)
662{
663 unregister_virtio_driver(&virtio_scsi_driver);
664 mempool_destroy(virtscsi_cmd_pool);
665 kmem_cache_destroy(virtscsi_cmd_cache);
666}
667module_init(init);
668module_exit(fini);
669
670MODULE_DEVICE_TABLE(virtio, id_table);
671MODULE_DESCRIPTION("Virtio SCSI HBA driver");
672MODULE_LICENSE("GPL");
This page took 0.13309 seconds and 5 git commands to generate.