xen/blk[front|back]: Enhance discard support with secure erasing support.
[deliverable/linux.git] / drivers / block / xen-blkback / blkback.c
CommitLineData
4d05a28d 1/******************************************************************************
4d05a28d
KRW
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
a1397fa3 7 * drivers/block/xen-blkfront.c
4d05a28d
KRW
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#include <linux/spinlock.h>
38#include <linux/kthread.h>
39#include <linux/list.h>
40#include <linux/delay.h>
88122933 41#include <linux/freezer.h>
b3cb0d6a
LD
42#include <linux/loop.h>
43#include <linux/falloc.h>
44#include <linux/fs.h>
afd91d07 45
88122933
JF
46#include <xen/events.h>
47#include <xen/page.h>
48#include <asm/xen/hypervisor.h>
49#include <asm/xen/hypercall.h>
4d05a28d
KRW
50#include "common.h"
51
52/*
53 * These are rather arbitrary. They are fairly large because adjacent requests
54 * pulled from a communication ring are quite likely to end up being part of
55 * the same scatter/gather request at the disc.
56 *
8b6bf747 57 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
4d05a28d
KRW
58 *
59 * This will increase the chances of being able to write whole tracks.
60 * 64 should be enough to keep us competitive with Linux.
61 */
8b6bf747
KRW
62static int xen_blkif_reqs = 64;
63module_param_named(reqs, xen_blkif_reqs, int, 0);
4d05a28d
KRW
64MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
65
66/* Run-time switchable: /sys/module/blkback/parameters/ */
2e9977c2 67static unsigned int log_stats;
4d05a28d 68module_param(log_stats, int, 0644);
4d05a28d
KRW
69
70/*
71 * Each outstanding request that we've passed to the lower device layers has a
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements
73 * the pendcnt towards zero. When it hits zero, the specified domain has a
74 * response queued for it, with the saved 'id' passed back.
75 */
2e9977c2 76struct pending_req {
30fd1502 77 struct xen_blkif *blkif;
01f37f2d
KRW
78 u64 id;
79 int nr_pages;
80 atomic_t pendcnt;
81 unsigned short operation;
82 int status;
83 struct list_head free_list;
2e9977c2 84};
4d05a28d 85
4d05a28d
KRW
86#define BLKBACK_INVALID_HANDLE (~0)
87
e8e28871 88struct xen_blkbk {
2e9977c2 89 struct pending_req *pending_reqs;
a1397fa3 90 /* List of all 'pending_req' available */
e8e28871 91 struct list_head pending_free;
a1397fa3 92 /* And its spinlock. */
e8e28871
KRW
93 spinlock_t pending_free_lock;
94 wait_queue_head_t pending_free_wq;
a1397fa3 95 /* The list of all pages that are available. */
e8e28871 96 struct page **pending_pages;
a1397fa3 97 /* And the grant handles that are available. */
e8e28871
KRW
98 grant_handle_t *pending_grant_handles;
99};
100
101static struct xen_blkbk *blkbk;
4d05a28d 102
a1397fa3
KRW
103/*
104 * Little helpful macro to figure out the index and virtual address of the
105 * pending_pages[..]. For each 'pending_req' we have have up to
106 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
01f37f2d
KRW
107 * 10 and would index in the pending_pages[..].
108 */
2e9977c2 109static inline int vaddr_pagenr(struct pending_req *req, int seg)
4d05a28d 110{
2e9977c2
KRW
111 return (req - blkbk->pending_reqs) *
112 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
4d05a28d
KRW
113}
114
efe08a3e
JB
115#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
116
2e9977c2 117static inline unsigned long vaddr(struct pending_req *req, int seg)
4d05a28d 118{
e8e28871 119 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
4d05a28d
KRW
120 return (unsigned long)pfn_to_kaddr(pfn);
121}
122
123#define pending_handle(_req, _seg) \
e8e28871 124 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
4d05a28d
KRW
125
126
30fd1502
KRW
127static int do_block_io_op(struct xen_blkif *blkif);
128static int dispatch_rw_block_io(struct xen_blkif *blkif,
fc53bf75
KRW
129 struct blkif_request *req,
130 struct pending_req *pending_req);
30fd1502 131static void make_response(struct xen_blkif *blkif, u64 id,
4d05a28d
KRW
132 unsigned short op, int st);
133
a1397fa3
KRW
134/*
135 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
4d05a28d 136 */
2e9977c2 137static struct pending_req *alloc_req(void)
4d05a28d 138{
2e9977c2 139 struct pending_req *req = NULL;
4d05a28d
KRW
140 unsigned long flags;
141
e8e28871
KRW
142 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
143 if (!list_empty(&blkbk->pending_free)) {
2e9977c2
KRW
144 req = list_entry(blkbk->pending_free.next, struct pending_req,
145 free_list);
4d05a28d
KRW
146 list_del(&req->free_list);
147 }
e8e28871 148 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
4d05a28d
KRW
149 return req;
150}
151
a1397fa3
KRW
152/*
153 * Return the 'pending_req' structure back to the freepool. We also
154 * wake up the thread if it was waiting for a free page.
155 */
2e9977c2 156static void free_req(struct pending_req *req)
4d05a28d
KRW
157{
158 unsigned long flags;
159 int was_empty;
160
e8e28871
KRW
161 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
162 was_empty = list_empty(&blkbk->pending_free);
163 list_add(&req->free_list, &blkbk->pending_free);
164 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
4d05a28d 165 if (was_empty)
e8e28871 166 wake_up(&blkbk->pending_free_wq);
4d05a28d
KRW
167}
168
ee9ff853
KRW
169/*
170 * Routines for managing virtual block devices (vbds).
171 */
3d814731
KRW
172static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
173 int operation)
ee9ff853 174{
3d814731 175 struct xen_vbd *vbd = &blkif->vbd;
ee9ff853
KRW
176 int rc = -EACCES;
177
178 if ((operation != READ) && vbd->readonly)
179 goto out;
180
8ab52150
JB
181 if (likely(req->nr_sects)) {
182 blkif_sector_t end = req->sector_number + req->nr_sects;
183
184 if (unlikely(end < req->sector_number))
185 goto out;
186 if (unlikely(end > vbd_sz(vbd)))
187 goto out;
188 }
ee9ff853
KRW
189
190 req->dev = vbd->pdevice;
191 req->bdev = vbd->bdev;
192 rc = 0;
193
194 out:
195 return rc;
196}
197
3d814731 198static void xen_vbd_resize(struct xen_blkif *blkif)
ee9ff853 199{
3d814731 200 struct xen_vbd *vbd = &blkif->vbd;
ee9ff853
KRW
201 struct xenbus_transaction xbt;
202 int err;
8b6bf747 203 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
42c7841d 204 unsigned long long new_size = vbd_sz(vbd);
ee9ff853 205
22b20f2d 206 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
ee9ff853 207 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
22b20f2d 208 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
ee9ff853
KRW
209 vbd->size = new_size;
210again:
211 err = xenbus_transaction_start(&xbt);
212 if (err) {
22b20f2d 213 pr_warn(DRV_PFX "Error starting transaction");
ee9ff853
KRW
214 return;
215 }
216 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
42c7841d 217 (unsigned long long)vbd_sz(vbd));
ee9ff853 218 if (err) {
22b20f2d 219 pr_warn(DRV_PFX "Error writing new size");
ee9ff853
KRW
220 goto abort;
221 }
222 /*
223 * Write the current state; we will use this to synchronize
224 * the front-end. If the current state is "connected" the
225 * front-end will get the new size information online.
226 */
227 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
228 if (err) {
22b20f2d 229 pr_warn(DRV_PFX "Error writing the state");
ee9ff853
KRW
230 goto abort;
231 }
232
233 err = xenbus_transaction_end(xbt, 0);
234 if (err == -EAGAIN)
235 goto again;
236 if (err)
22b20f2d 237 pr_warn(DRV_PFX "Error ending transaction");
496b318e 238 return;
ee9ff853
KRW
239abort:
240 xenbus_transaction_end(xbt, 1);
241}
242
a1397fa3 243/*
b0aef179
KRW
244 * Notification from the guest OS.
245 */
30fd1502 246static void blkif_notify_work(struct xen_blkif *blkif)
4d05a28d 247{
b0aef179
KRW
248 blkif->waiting_reqs = 1;
249 wake_up(&blkif->wq);
250}
4d05a28d 251
8b6bf747 252irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
b0aef179
KRW
253{
254 blkif_notify_work(dev_id);
255 return IRQ_HANDLED;
4d05a28d
KRW
256}
257
2e9977c2 258/*
4d05a28d
KRW
259 * SCHEDULER FUNCTIONS
260 */
261
30fd1502 262static void print_stats(struct xen_blkif *blkif)
4d05a28d 263{
b3cb0d6a
LD
264 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d"
265 " | ds %4d\n",
ebe81906 266 current->comm, blkif->st_oo_req,
b3cb0d6a
LD
267 blkif->st_rd_req, blkif->st_wr_req,
268 blkif->st_f_req, blkif->st_ds_req);
4d05a28d
KRW
269 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
270 blkif->st_rd_req = 0;
271 blkif->st_wr_req = 0;
272 blkif->st_oo_req = 0;
b3cb0d6a 273 blkif->st_ds_req = 0;
4d05a28d
KRW
274}
275
8b6bf747 276int xen_blkif_schedule(void *arg)
4d05a28d 277{
30fd1502 278 struct xen_blkif *blkif = arg;
3d814731 279 struct xen_vbd *vbd = &blkif->vbd;
4d05a28d 280
8b6bf747 281 xen_blkif_get(blkif);
4d05a28d 282
4d05a28d
KRW
283 while (!kthread_should_stop()) {
284 if (try_to_freeze())
285 continue;
42c7841d 286 if (unlikely(vbd->size != vbd_sz(vbd)))
3d814731 287 xen_vbd_resize(blkif);
4d05a28d
KRW
288
289 wait_event_interruptible(
290 blkif->wq,
291 blkif->waiting_reqs || kthread_should_stop());
292 wait_event_interruptible(
e8e28871 293 blkbk->pending_free_wq,
2e9977c2
KRW
294 !list_empty(&blkbk->pending_free) ||
295 kthread_should_stop());
4d05a28d
KRW
296
297 blkif->waiting_reqs = 0;
298 smp_mb(); /* clear flag *before* checking for work */
299
300 if (do_block_io_op(blkif))
301 blkif->waiting_reqs = 1;
4d05a28d
KRW
302
303 if (log_stats && time_after(jiffies, blkif->st_print))
304 print_stats(blkif);
305 }
306
307 if (log_stats)
308 print_stats(blkif);
4d05a28d
KRW
309
310 blkif->xenblkd = NULL;
8b6bf747 311 xen_blkif_put(blkif);
4d05a28d
KRW
312
313 return 0;
314}
315
1a95fe6e
KRW
316struct seg_buf {
317 unsigned long buf;
318 unsigned int nsec;
319};
b0aef179
KRW
320/*
321 * Unmap the grant references, and also remove the M2P over-rides
322 * used in the 'pending_req'.
01f37f2d 323 */
9f3aedf5 324static void xen_blkbk_unmap(struct pending_req *req)
b0aef179
KRW
325{
326 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
327 unsigned int i, invcount = 0;
328 grant_handle_t handle;
329 int ret;
330
331 for (i = 0; i < req->nr_pages; i++) {
332 handle = pending_handle(req, i);
333 if (handle == BLKBACK_INVALID_HANDLE)
334 continue;
335 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
336 GNTMAP_host_map, handle);
337 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
338 invcount++;
339 }
340
341 ret = HYPERVISOR_grant_table_op(
342 GNTTABOP_unmap_grant_ref, unmap, invcount);
343 BUG_ON(ret);
01f37f2d
KRW
344 /*
345 * Note, we use invcount, so nr->pages, so we can't index
b0aef179
KRW
346 * using vaddr(req, i).
347 */
348 for (i = 0; i < invcount; i++) {
349 ret = m2p_remove_override(
350 virt_to_page(unmap[i].host_addr), false);
351 if (ret) {
22b20f2d 352 pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
ebe81906 353 (unsigned long)unmap[i].host_addr);
b0aef179
KRW
354 continue;
355 }
356 }
357}
01f37f2d
KRW
358
359static int xen_blkbk_map(struct blkif_request *req,
360 struct pending_req *pending_req,
9f3aedf5 361 struct seg_buf seg[])
1a95fe6e
KRW
362{
363 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
364 int i;
97e36834 365 int nseg = req->u.rw.nr_segments;
1a95fe6e 366 int ret = 0;
01f37f2d
KRW
367
368 /*
369 * Fill out preq.nr_sects with proper amount of sectors, and setup
1a95fe6e
KRW
370 * assign map[..] with the PFN of the page in our domain with the
371 * corresponding grant reference for each page.
372 */
373 for (i = 0; i < nseg; i++) {
374 uint32_t flags;
375
376 flags = GNTMAP_host_map;
377 if (pending_req->operation != BLKIF_OP_READ)
378 flags |= GNTMAP_readonly;
379 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
01f37f2d
KRW
380 req->u.rw.seg[i].gref,
381 pending_req->blkif->domid);
1a95fe6e
KRW
382 }
383
384 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
385 BUG_ON(ret);
386
01f37f2d
KRW
387 /*
388 * Now swizzle the MFN in our domain with the MFN from the other domain
1a95fe6e
KRW
389 * so that when we access vaddr(pending_req,i) it has the contents of
390 * the page from the other domain.
391 */
392 for (i = 0; i < nseg; i++) {
393 if (unlikely(map[i].status != 0)) {
22b20f2d 394 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
1a95fe6e
KRW
395 map[i].handle = BLKBACK_INVALID_HANDLE;
396 ret |= 1;
397 }
398
399 pending_handle(pending_req, i) = map[i].handle;
400
401 if (ret)
402 continue;
403
404 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
0930bba6 405 blkbk->pending_page(pending_req, i), NULL);
1a95fe6e 406 if (ret) {
22b20f2d 407 pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
ebe81906 408 (unsigned long)map[i].dev_bus_addr, ret);
1a95fe6e
KRW
409 /* We could switch over to GNTTABOP_copy */
410 continue;
411 }
412
413 seg[i].buf = map[i].dev_bus_addr |
414 (req->u.rw.seg[i].first_sect << 9);
415 }
416 return ret;
417}
418
b3cb0d6a
LD
419static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
420{
421 int err = 0;
422 int status = BLKIF_RSP_OKAY;
423 struct block_device *bdev = blkif->vbd.bdev;
424
5ea42986
KRW
425 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) {
426 unsigned long secure = (blkif->vbd.discard_secure &&
427 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
428 BLKDEV_DISCARD_SECURE : 0;
b3cb0d6a
LD
429 /* just forward the discard request */
430 err = blkdev_issue_discard(bdev,
431 req->u.discard.sector_number,
432 req->u.discard.nr_sectors,
5ea42986
KRW
433 GFP_KERNEL, secure);
434 } else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
b3cb0d6a
LD
435 /* punch a hole in the backing file */
436 struct loop_device *lo = bdev->bd_disk->private_data;
437 struct file *file = lo->lo_backing_file;
438
439 if (file->f_op->fallocate)
440 err = file->f_op->fallocate(file,
441 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
442 req->u.discard.sector_number << 9,
443 req->u.discard.nr_sectors << 9);
444 else
445 err = -EOPNOTSUPP;
446 } else
447 err = -EOPNOTSUPP;
448
449 if (err == -EOPNOTSUPP) {
450 pr_debug(DRV_PFX "discard op failed, not supported\n");
451 status = BLKIF_RSP_EOPNOTSUPP;
452 } else if (err)
453 status = BLKIF_RSP_ERROR;
454
97e36834 455 make_response(blkif, req->u.discard.id, req->operation, status);
b3cb0d6a
LD
456}
457
29bde093
KRW
458static void xen_blk_drain_io(struct xen_blkif *blkif)
459{
460 atomic_set(&blkif->drain, 1);
461 do {
6927d920
KRW
462 /* The initial value is one, and one refcnt taken at the
463 * start of the xen_blkif_schedule thread. */
464 if (atomic_read(&blkif->refcnt) <= 2)
465 break;
29bde093
KRW
466 wait_for_completion_interruptible_timeout(
467 &blkif->drain_complete, HZ);
468
469 if (!atomic_read(&blkif->drain))
470 break;
29bde093
KRW
471 } while (!kthread_should_stop());
472 atomic_set(&blkif->drain, 0);
473}
474
a1397fa3
KRW
475/*
476 * Completion callback on the bio's. Called as bh->b_end_io()
4d05a28d
KRW
477 */
478
2e9977c2 479static void __end_block_io_op(struct pending_req *pending_req, int error)
4d05a28d
KRW
480{
481 /* An error fails the entire request. */
24f567f9 482 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
4d05a28d 483 (error == -EOPNOTSUPP)) {
22b20f2d 484 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
24f567f9 485 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
4d05a28d 486 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
29bde093
KRW
487 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
488 (error == -EOPNOTSUPP)) {
489 pr_debug(DRV_PFX "write barrier op failed, not supported\n");
490 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
491 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
4d05a28d 492 } else if (error) {
22b20f2d 493 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
ebe81906 494 " error=%d\n", error);
4d05a28d
KRW
495 pending_req->status = BLKIF_RSP_ERROR;
496 }
497
01f37f2d
KRW
498 /*
499 * If all of the bio's have completed it is time to unmap
a1397fa3 500 * the grant references associated with 'request' and provide
2e9977c2
KRW
501 * the proper response on the ring.
502 */
4d05a28d 503 if (atomic_dec_and_test(&pending_req->pendcnt)) {
9f3aedf5 504 xen_blkbk_unmap(pending_req);
4d05a28d
KRW
505 make_response(pending_req->blkif, pending_req->id,
506 pending_req->operation, pending_req->status);
8b6bf747 507 xen_blkif_put(pending_req->blkif);
29bde093
KRW
508 if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
509 if (atomic_read(&pending_req->blkif->drain))
510 complete(&pending_req->blkif->drain_complete);
511 }
4d05a28d
KRW
512 free_req(pending_req);
513 }
514}
515
a1397fa3
KRW
516/*
517 * bio callback.
518 */
88122933 519static void end_block_io_op(struct bio *bio, int error)
4d05a28d 520{
4d05a28d
KRW
521 __end_block_io_op(bio->bi_private, error);
522 bio_put(bio);
4d05a28d
KRW
523}
524
525
4d05a28d 526
a1397fa3
KRW
527/*
528 * Function to copy the from the ring buffer the 'struct blkif_request'
529 * (which has the sectors we want, number of them, grant references, etc),
530 * and transmute it to the block API to hand it over to the proper block disk.
4d05a28d 531 */
b4726a9d
DS
532static int
533__do_block_io_op(struct xen_blkif *blkif)
4d05a28d 534{
88122933
JF
535 union blkif_back_rings *blk_rings = &blkif->blk_rings;
536 struct blkif_request req;
2e9977c2 537 struct pending_req *pending_req;
4d05a28d
KRW
538 RING_IDX rc, rp;
539 int more_to_do = 0;
540
541 rc = blk_rings->common.req_cons;
542 rp = blk_rings->common.sring->req_prod;
543 rmb(); /* Ensure we see queued requests up to 'rp'. */
544
545 while (rc != rp) {
546
547 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
548 break;
549
8270b45b 550 if (kthread_should_stop()) {
4d05a28d
KRW
551 more_to_do = 1;
552 break;
553 }
554
8270b45b
KF
555 pending_req = alloc_req();
556 if (NULL == pending_req) {
557 blkif->st_oo_req++;
4d05a28d
KRW
558 more_to_do = 1;
559 break;
560 }
561
562 switch (blkif->blk_protocol) {
563 case BLKIF_PROTOCOL_NATIVE:
564 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
565 break;
566 case BLKIF_PROTOCOL_X86_32:
567 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
568 break;
569 case BLKIF_PROTOCOL_X86_64:
570 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
571 break;
572 default:
573 BUG();
574 }
575 blk_rings->common.req_cons = ++rc; /* before make_response() */
576
577 /* Apply all sanity checks to /private copy/ of request. */
578 barrier();
579
fc53bf75 580 if (dispatch_rw_block_io(blkif, &req, pending_req))
4d05a28d 581 break;
4d05a28d
KRW
582
583 /* Yield point for this unbounded loop. */
584 cond_resched();
585 }
586
587 return more_to_do;
588}
589
b4726a9d
DS
590static int
591do_block_io_op(struct xen_blkif *blkif)
592{
593 union blkif_back_rings *blk_rings = &blkif->blk_rings;
594 int more_to_do;
595
596 do {
597 more_to_do = __do_block_io_op(blkif);
598 if (more_to_do)
599 break;
600
601 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
602 } while (more_to_do);
603
604 return more_to_do;
605}
a1397fa3 606/*
01f37f2d
KRW
607 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
608 * and call the 'submit_bio' to pass it to the underlying storage.
a1397fa3 609 */
30fd1502
KRW
610static int dispatch_rw_block_io(struct xen_blkif *blkif,
611 struct blkif_request *req,
612 struct pending_req *pending_req)
4d05a28d 613{
4d05a28d 614 struct phys_req preq;
1a95fe6e 615 struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
4d05a28d
KRW
616 unsigned int nseg;
617 struct bio *bio = NULL;
77089926 618 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
1a95fe6e 619 int i, nbio = 0;
4d05a28d 620 int operation;
a19be5f0 621 struct blk_plug plug;
29bde093 622 bool drain = false;
4d05a28d
KRW
623
624 switch (req->operation) {
625 case BLKIF_OP_READ:
fc53bf75 626 blkif->st_rd_req++;
4d05a28d
KRW
627 operation = READ;
628 break;
629 case BLKIF_OP_WRITE:
fc53bf75 630 blkif->st_wr_req++;
013c3ca1 631 operation = WRITE_ODIRECT;
4d05a28d 632 break;
29bde093
KRW
633 case BLKIF_OP_WRITE_BARRIER:
634 drain = true;
24f567f9 635 case BLKIF_OP_FLUSH_DISKCACHE:
fc53bf75 636 blkif->st_f_req++;
24f567f9 637 operation = WRITE_FLUSH;
4d05a28d 638 break;
b3cb0d6a
LD
639 case BLKIF_OP_DISCARD:
640 blkif->st_ds_req++;
641 operation = REQ_DISCARD;
642 break;
4d05a28d
KRW
643 default:
644 operation = 0; /* make gcc happy */
fc53bf75
KRW
645 goto fail_response;
646 break;
4d05a28d
KRW
647 }
648
5ea42986
KRW
649 if (unlikely(operation == REQ_DISCARD))
650 nseg = 0;
651 else
652 /* Check that the number of segments is sane. */
653 nseg = req->u.rw.nr_segments;
97e36834 654
b3cb0d6a
LD
655 if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
656 operation != REQ_DISCARD) ||
4d05a28d 657 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
22b20f2d 658 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
ebe81906 659 nseg);
1a95fe6e 660 /* Haven't submitted any bio's yet. */
4d05a28d
KRW
661 goto fail_response;
662 }
663
97e36834 664 preq.dev = req->u.rw.handle;
c35950bf 665 preq.sector_number = req->u.rw.sector_number;
4d05a28d
KRW
666 preq.nr_sects = 0;
667
668 pending_req->blkif = blkif;
97e36834 669 pending_req->id = req->u.rw.id;
4d05a28d
KRW
670 pending_req->operation = req->operation;
671 pending_req->status = BLKIF_RSP_OKAY;
672 pending_req->nr_pages = nseg;
e9350493 673
4d05a28d 674 for (i = 0; i < nseg; i++) {
c35950bf
KRW
675 seg[i].nsec = req->u.rw.seg[i].last_sect -
676 req->u.rw.seg[i].first_sect + 1;
c35950bf
KRW
677 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
678 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
4d05a28d
KRW
679 goto fail_response;
680 preq.nr_sects += seg[i].nsec;
976222e0 681
4d05a28d
KRW
682 }
683
3d814731 684 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
22b20f2d 685 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
ebe81906
KRW
686 operation == READ ? "read" : "write",
687 preq.sector_number,
688 preq.sector_number + preq.nr_sects, preq.dev);
1a95fe6e 689 goto fail_response;
4d05a28d 690 }
01f37f2d
KRW
691
692 /*
3d814731 693 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
01f37f2d
KRW
694 * is set there.
695 */
e9350493
KRW
696 for (i = 0; i < nseg; i++) {
697 if (((int)preq.sector_number|(int)seg[i].nsec) &
698 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
22b20f2d 699 pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
ebe81906 700 blkif->domid);
e9350493
KRW
701 goto fail_response;
702 }
703 }
01f37f2d 704
29bde093
KRW
705 /* Wait on all outstanding I/O's and once that has been completed
706 * issue the WRITE_FLUSH.
707 */
708 if (drain)
709 xen_blk_drain_io(pending_req->blkif);
710
01f37f2d
KRW
711 /*
712 * If we have failed at this point, we need to undo the M2P override,
2e9977c2
KRW
713 * set gnttab_set_unmap_op on all of the grant references and perform
714 * the hypercall to unmap the grants - that is all done in
9f3aedf5 715 * xen_blkbk_unmap.
2e9977c2 716 */
5ea42986 717 if (nseg && xen_blkbk_map(req, pending_req, seg))
4d05a28d
KRW
718 goto fail_flush;
719
b3cb0d6a
LD
720 /*
721 * This corresponding xen_blkif_put is done in __end_block_io_op, or
722 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
723 */
8b6bf747 724 xen_blkif_get(blkif);
4d05a28d
KRW
725
726 for (i = 0; i < nseg; i++) {
4d05a28d
KRW
727 while ((bio == NULL) ||
728 (bio_add_page(bio,
e8e28871 729 blkbk->pending_page(pending_req, i),
4d05a28d
KRW
730 seg[i].nsec << 9,
731 seg[i].buf & ~PAGE_MASK) == 0)) {
2e9977c2 732
03e0edf9 733 bio = bio_alloc(GFP_KERNEL, nseg-i);
4d05a28d
KRW
734 if (unlikely(bio == NULL))
735 goto fail_put_bio;
736
03e0edf9 737 biolist[nbio++] = bio;
4d05a28d
KRW
738 bio->bi_bdev = preq.bdev;
739 bio->bi_private = pending_req;
740 bio->bi_end_io = end_block_io_op;
741 bio->bi_sector = preq.sector_number;
742 }
743
744 preq.sector_number += seg[i].nsec;
745 }
746
b3cb0d6a 747 /* This will be hit if the operation was a flush or discard. */
4d05a28d 748 if (!bio) {
b3cb0d6a 749 BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
b0f80127 750
b3cb0d6a
LD
751 if (operation == WRITE_FLUSH) {
752 bio = bio_alloc(GFP_KERNEL, 0);
753 if (unlikely(bio == NULL))
754 goto fail_put_bio;
4d05a28d 755
b3cb0d6a
LD
756 biolist[nbio++] = bio;
757 bio->bi_bdev = preq.bdev;
758 bio->bi_private = pending_req;
759 bio->bi_end_io = end_block_io_op;
760 } else if (operation == REQ_DISCARD) {
761 xen_blk_discard(blkif, req);
762 xen_blkif_put(blkif);
763 free_req(pending_req);
764 return 0;
765 }
4d05a28d
KRW
766 }
767
01f37f2d
KRW
768 /*
769 * We set it one so that the last submit_bio does not have to call
77089926
KRW
770 * atomic_inc.
771 */
772 atomic_set(&pending_req->pendcnt, nbio);
773
a19be5f0
KRW
774 /* Get a reference count for the disk queue and start sending I/O */
775 blk_start_plug(&plug);
776
77089926
KRW
777 for (i = 0; i < nbio; i++)
778 submit_bio(operation, biolist[i]);
779
a19be5f0 780 /* Let the I/Os go.. */
3d68b399 781 blk_finish_plug(&plug);
a19be5f0 782
4d05a28d
KRW
783 if (operation == READ)
784 blkif->st_rd_sect += preq.nr_sects;
5c62cb48 785 else if (operation & WRITE)
4d05a28d
KRW
786 blkif->st_wr_sect += preq.nr_sects;
787
fc53bf75 788 return 0;
4d05a28d
KRW
789
790 fail_flush:
9f3aedf5 791 xen_blkbk_unmap(pending_req);
4d05a28d 792 fail_response:
0faa8cca 793 /* Haven't submitted any bio's yet. */
97e36834 794 make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
4d05a28d
KRW
795 free_req(pending_req);
796 msleep(1); /* back off a bit */
fc53bf75 797 return -EIO;
4d05a28d
KRW
798
799 fail_put_bio:
03e0edf9 800 for (i = 0; i < nbio; i++)
77089926 801 bio_put(biolist[i]);
4d05a28d 802 __end_block_io_op(pending_req, -EINVAL);
4d05a28d 803 msleep(1); /* back off a bit */
fc53bf75 804 return -EIO;
4d05a28d
KRW
805}
806
807
808
a1397fa3
KRW
809/*
810 * Put a response on the ring on how the operation fared.
4d05a28d 811 */
30fd1502 812static void make_response(struct xen_blkif *blkif, u64 id,
4d05a28d
KRW
813 unsigned short op, int st)
814{
88122933 815 struct blkif_response resp;
4d05a28d 816 unsigned long flags;
88122933 817 union blkif_back_rings *blk_rings = &blkif->blk_rings;
4d05a28d
KRW
818 int notify;
819
820 resp.id = id;
821 resp.operation = op;
822 resp.status = st;
823
824 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
825 /* Place on the response ring for the relevant domain. */
826 switch (blkif->blk_protocol) {
827 case BLKIF_PROTOCOL_NATIVE:
828 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
829 &resp, sizeof(resp));
830 break;
831 case BLKIF_PROTOCOL_X86_32:
832 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
833 &resp, sizeof(resp));
834 break;
835 case BLKIF_PROTOCOL_X86_64:
836 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
837 &resp, sizeof(resp));
838 break;
839 default:
840 BUG();
841 }
842 blk_rings->common.rsp_prod_pvt++;
843 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
4d05a28d 844 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
4d05a28d
KRW
845 if (notify)
846 notify_remote_via_irq(blkif->irq);
847}
848
8b6bf747 849static int __init xen_blkif_init(void)
4d05a28d
KRW
850{
851 int i, mmap_pages;
8770b268 852 int rc = 0;
4d05a28d 853
88122933 854 if (!xen_pv_domain())
4d05a28d
KRW
855 return -ENODEV;
856
2e9977c2 857 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
e8e28871 858 if (!blkbk) {
22b20f2d 859 pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
e8e28871
KRW
860 return -ENOMEM;
861 }
862
8b6bf747 863 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
4d05a28d 864
8e6dc6fe 865 blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
8b6bf747 866 xen_blkif_reqs, GFP_KERNEL);
8e6dc6fe 867 blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
a742b02c
KRW
868 mmap_pages, GFP_KERNEL);
869 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
870 mmap_pages, GFP_KERNEL);
4d05a28d 871
2e9977c2
KRW
872 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
873 !blkbk->pending_pages) {
8770b268 874 rc = -ENOMEM;
4d05a28d 875 goto out_of_memory;
8770b268 876 }
4d05a28d 877
464fb419 878 for (i = 0; i < mmap_pages; i++) {
e8e28871 879 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
a742b02c 880 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
464fb419
KRW
881 if (blkbk->pending_pages[i] == NULL) {
882 rc = -ENOMEM;
883 goto out_of_memory;
884 }
885 }
8b6bf747 886 rc = xen_blkif_interface_init();
8770b268
KRW
887 if (rc)
888 goto failed_init;
4d05a28d 889
e8e28871
KRW
890 INIT_LIST_HEAD(&blkbk->pending_free);
891 spin_lock_init(&blkbk->pending_free_lock);
892 init_waitqueue_head(&blkbk->pending_free_wq);
4d05a28d 893
8b6bf747 894 for (i = 0; i < xen_blkif_reqs; i++)
2e9977c2
KRW
895 list_add_tail(&blkbk->pending_reqs[i].free_list,
896 &blkbk->pending_free);
4d05a28d 897
8b6bf747 898 rc = xen_blkif_xenbus_init();
8770b268
KRW
899 if (rc)
900 goto failed_init;
4d05a28d
KRW
901
902 return 0;
903
904 out_of_memory:
22b20f2d 905 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
8770b268 906 failed_init:
e8e28871 907 kfree(blkbk->pending_reqs);
a742b02c 908 kfree(blkbk->pending_grant_handles);
9b83c771
DC
909 if (blkbk->pending_pages) {
910 for (i = 0; i < mmap_pages; i++) {
911 if (blkbk->pending_pages[i])
912 __free_page(blkbk->pending_pages[i]);
913 }
914 kfree(blkbk->pending_pages);
464fb419 915 }
a742b02c 916 kfree(blkbk);
e8e28871 917 blkbk = NULL;
8770b268 918 return rc;
4d05a28d
KRW
919}
920
8b6bf747 921module_init(xen_blkif_init);
4d05a28d
KRW
922
923MODULE_LICENSE("Dual BSD/GPL");
a7e9357f 924MODULE_ALIAS("xen-backend:vbd");
This page took 0.139363 seconds and 5 git commands to generate.