xen-block: implement indirect descriptors
[deliverable/linux.git] / drivers / block / xen-blkback / blkback.c
CommitLineData
4d05a28d 1/******************************************************************************
4d05a28d
KRW
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
a1397fa3 7 * drivers/block/xen-blkfront.c
4d05a28d
KRW
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#include <linux/spinlock.h>
38#include <linux/kthread.h>
39#include <linux/list.h>
40#include <linux/delay.h>
88122933 41#include <linux/freezer.h>
0a8704a5 42#include <linux/bitmap.h>
afd91d07 43
88122933
JF
44#include <xen/events.h>
45#include <xen/page.h>
e79affc3 46#include <xen/xen.h>
88122933
JF
47#include <asm/xen/hypervisor.h>
48#include <asm/xen/hypercall.h>
087ffecd 49#include <xen/balloon.h>
4d05a28d
KRW
50#include "common.h"
51
c6cc142d
RPM
52/*
53 * Maximum number of unused free pages to keep in the internal buffer.
54 * Setting this to a value too low will reduce memory used in each backend,
55 * but can have a performance penalty.
56 *
57 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58 * be set to a lower value that might degrade performance on some intensive
59 * IO workloads.
60 */
61
402b27f9 62static int xen_blkif_max_buffer_pages = 1024;
c6cc142d
RPM
63module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
64MODULE_PARM_DESC(max_buffer_pages,
65"Maximum number of free pages to keep in each block backend buffer");
66
3f3aad5e
RPM
67/*
68 * Maximum number of grants to map persistently in blkback. For maximum
69 * performance this should be the total numbers of grants that can be used
70 * to fill the ring, but since this might become too high, specially with
71 * the use of indirect descriptors, we set it to a value that provides good
72 * performance without using too much memory.
73 *
74 * When the list of persistent grants is full we clean it up using a LRU
75 * algorithm.
76 */
77
402b27f9 78static int xen_blkif_max_pgrants = 1056;
3f3aad5e
RPM
79module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80MODULE_PARM_DESC(max_persistent_grants,
81 "Maximum number of grants to map persistently");
82
83/*
84 * The LRU mechanism to clean the lists of persistent grants needs to
85 * be executed periodically. The time interval between consecutive executions
86 * of the purge mechanism is set in ms.
87 */
88#define LRU_INTERVAL 100
89
90/*
91 * When the persistent grants list is full we will remove unused grants
92 * from the list. The percent number of grants to be removed at each LRU
93 * execution.
94 */
95#define LRU_PERCENT_CLEAN 5
96
4d05a28d 97/* Run-time switchable: /sys/module/blkback/parameters/ */
2e9977c2 98static unsigned int log_stats;
4d05a28d 99module_param(log_stats, int, 0644);
4d05a28d 100
4d05a28d
KRW
101#define BLKBACK_INVALID_HANDLE (~0)
102
c6cc142d
RPM
103/* Number of free pages to remove on each call to free_xenballooned_pages */
104#define NUM_BATCH_FREE_PAGES 10
105
c6cc142d
RPM
106static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
107{
108 unsigned long flags;
109
110 spin_lock_irqsave(&blkif->free_pages_lock, flags);
111 if (list_empty(&blkif->free_pages)) {
112 BUG_ON(blkif->free_pages_num != 0);
113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 return alloc_xenballooned_pages(1, page, false);
115 }
116 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 list_del(&page[0]->lru);
119 blkif->free_pages_num--;
120 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
efe08a3e 121
c6cc142d
RPM
122 return 0;
123}
124
125static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
126 int num)
4d05a28d 127{
c6cc142d
RPM
128 unsigned long flags;
129 int i;
130
131 spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 for (i = 0; i < num; i++)
133 list_add(&page[i]->lru, &blkif->free_pages);
134 blkif->free_pages_num += num;
135 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136}
137
138static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
139{
140 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
141 struct page *page[NUM_BATCH_FREE_PAGES];
142 unsigned int num_pages = 0;
143 unsigned long flags;
144
145 spin_lock_irqsave(&blkif->free_pages_lock, flags);
146 while (blkif->free_pages_num > num) {
147 BUG_ON(list_empty(&blkif->free_pages));
148 page[num_pages] = list_first_entry(&blkif->free_pages,
149 struct page, lru);
150 list_del(&page[num_pages]->lru);
151 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0;
157 }
158 }
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page);
4d05a28d
KRW
162}
163
c6cc142d
RPM
164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
165
30fd1502
KRW
166static int do_block_io_op(struct xen_blkif *blkif);
167static int dispatch_rw_block_io(struct xen_blkif *blkif,
fc53bf75
KRW
168 struct blkif_request *req,
169 struct pending_req *pending_req);
30fd1502 170static void make_response(struct xen_blkif *blkif, u64 id,
4d05a28d
KRW
171 unsigned short op, int st);
172
7dc34117
RPM
173#define foreach_grant_safe(pos, n, rbtree, node) \
174 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
217fd5e7 175 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
0a8704a5 176 &(pos)->node != NULL; \
7dc34117
RPM
177 (pos) = container_of(n, typeof(*(pos)), node), \
178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
0a8704a5
RPM
179
180
3f3aad5e
RPM
181/*
182 * We don't need locking around the persistent grant helpers
183 * because blkback uses a single-thread for each backed, so we
184 * can be sure that this functions will never be called recursively.
185 *
186 * The only exception to that is put_persistent_grant, that can be called
187 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188 * bit operations to modify the flags of a persistent grant and to count
189 * the number of used grants.
190 */
191static int add_persistent_gnt(struct xen_blkif *blkif,
0a8704a5
RPM
192 struct persistent_gnt *persistent_gnt)
193{
3f3aad5e 194 struct rb_node **new = NULL, *parent = NULL;
0a8704a5
RPM
195 struct persistent_gnt *this;
196
3f3aad5e
RPM
197 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 if (!blkif->vbd.overflow_max_grants)
199 blkif->vbd.overflow_max_grants = 1;
200 return -EBUSY;
201 }
0a8704a5 202 /* Figure out where to put new node */
3f3aad5e 203 new = &blkif->persistent_gnts.rb_node;
0a8704a5
RPM
204 while (*new) {
205 this = container_of(*new, struct persistent_gnt, node);
206
207 parent = *new;
208 if (persistent_gnt->gnt < this->gnt)
209 new = &((*new)->rb_left);
210 else if (persistent_gnt->gnt > this->gnt)
211 new = &((*new)->rb_right);
212 else {
c6cc142d
RPM
213 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
214 return -EINVAL;
0a8704a5
RPM
215 }
216 }
217
3f3aad5e
RPM
218 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
0a8704a5
RPM
220 /* Add new node and rebalance tree. */
221 rb_link_node(&(persistent_gnt->node), parent, new);
3f3aad5e
RPM
222 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 blkif->persistent_gnt_c++;
224 atomic_inc(&blkif->persistent_gnt_in_use);
c6cc142d 225 return 0;
0a8704a5
RPM
226}
227
3f3aad5e 228static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
0a8704a5
RPM
229 grant_ref_t gref)
230{
231 struct persistent_gnt *data;
3f3aad5e 232 struct rb_node *node = NULL;
0a8704a5 233
3f3aad5e 234 node = blkif->persistent_gnts.rb_node;
0a8704a5
RPM
235 while (node) {
236 data = container_of(node, struct persistent_gnt, node);
237
238 if (gref < data->gnt)
239 node = node->rb_left;
240 else if (gref > data->gnt)
241 node = node->rb_right;
3f3aad5e
RPM
242 else {
243 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 return NULL;
246 }
247 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 atomic_inc(&blkif->persistent_gnt_in_use);
0a8704a5 249 return data;
3f3aad5e 250 }
0a8704a5
RPM
251 }
252 return NULL;
253}
254
3f3aad5e
RPM
255static void put_persistent_gnt(struct xen_blkif *blkif,
256 struct persistent_gnt *persistent_gnt)
257{
258 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 atomic_dec(&blkif->persistent_gnt_in_use);
263}
264
c6cc142d
RPM
265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num)
4d4f270f
RPM
267{
268 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 struct persistent_gnt *persistent_gnt;
7dc34117 271 struct rb_node *n;
4d4f270f
RPM
272 int ret = 0;
273 int segs_to_unmap = 0;
274
7dc34117 275 foreach_grant_safe(persistent_gnt, n, root, node) {
4d4f270f
RPM
276 BUG_ON(persistent_gnt->handle ==
277 BLKBACK_INVALID_HANDLE);
278 gnttab_set_unmap_op(&unmap[segs_to_unmap],
279 (unsigned long) pfn_to_kaddr(page_to_pfn(
280 persistent_gnt->page)),
281 GNTMAP_host_map,
282 persistent_gnt->handle);
283
284 pages[segs_to_unmap] = persistent_gnt->page;
4d4f270f
RPM
285
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, NULL, pages,
289 segs_to_unmap);
290 BUG_ON(ret);
c6cc142d 291 put_free_pages(blkif, pages, segs_to_unmap);
4d4f270f
RPM
292 segs_to_unmap = 0;
293 }
7dc34117
RPM
294
295 rb_erase(&persistent_gnt->node, root);
296 kfree(persistent_gnt);
297 num--;
4d4f270f
RPM
298 }
299 BUG_ON(num != 0);
300}
301
3f3aad5e
RPM
302static void unmap_purged_grants(struct work_struct *work)
303{
304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
306 struct persistent_gnt *persistent_gnt;
307 int ret, segs_to_unmap = 0;
308 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
309
310 while(!list_empty(&blkif->persistent_purge_list)) {
311 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
312 struct persistent_gnt,
313 remove_node);
314 list_del(&persistent_gnt->remove_node);
315
316 gnttab_set_unmap_op(&unmap[segs_to_unmap],
317 vaddr(persistent_gnt->page),
318 GNTMAP_host_map,
319 persistent_gnt->handle);
320
321 pages[segs_to_unmap] = persistent_gnt->page;
322
323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324 ret = gnttab_unmap_refs(unmap, NULL, pages,
325 segs_to_unmap);
326 BUG_ON(ret);
327 put_free_pages(blkif, pages, segs_to_unmap);
328 segs_to_unmap = 0;
329 }
330 kfree(persistent_gnt);
331 }
332 if (segs_to_unmap > 0) {
333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
334 BUG_ON(ret);
335 put_free_pages(blkif, pages, segs_to_unmap);
336 }
337}
338
339static void purge_persistent_gnt(struct xen_blkif *blkif)
340{
341 struct persistent_gnt *persistent_gnt;
342 struct rb_node *n;
343 unsigned int num_clean, total;
344 bool scan_used = false;
345 struct rb_root *root;
346
347 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
348 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
349 !blkif->vbd.overflow_max_grants)) {
350 return;
351 }
352
353 if (work_pending(&blkif->persistent_purge_work)) {
354 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
355 return;
356 }
357
358 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
359 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
360 num_clean = min(blkif->persistent_gnt_c, num_clean);
361 if (num_clean >
362 (blkif->persistent_gnt_c -
363 atomic_read(&blkif->persistent_gnt_in_use)))
364 return;
365
366 /*
367 * At this point, we can assure that there will be no calls
368 * to get_persistent_grant (because we are executing this code from
369 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
370 * which means that the number of currently used grants will go down,
371 * but never up, so we will always be able to remove the requested
372 * number of grants.
373 */
374
375 total = num_clean;
376
377 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
378
379 INIT_LIST_HEAD(&blkif->persistent_purge_list);
380 root = &blkif->persistent_gnts;
381purge_list:
382 foreach_grant_safe(persistent_gnt, n, root, node) {
383 BUG_ON(persistent_gnt->handle ==
384 BLKBACK_INVALID_HANDLE);
385
386 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
387 continue;
388 if (!scan_used &&
389 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
390 continue;
391
392 rb_erase(&persistent_gnt->node, root);
393 list_add(&persistent_gnt->remove_node,
394 &blkif->persistent_purge_list);
395 if (--num_clean == 0)
396 goto finished;
397 }
398 /*
399 * If we get here it means we also need to start cleaning
400 * grants that were used since last purge in order to cope
401 * with the requested num
402 */
403 if (!scan_used) {
404 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
405 scan_used = true;
406 goto purge_list;
407 }
408finished:
409 /* Remove the "used" flag from all the persistent grants */
410 foreach_grant_safe(persistent_gnt, n, root, node) {
411 BUG_ON(persistent_gnt->handle ==
412 BLKBACK_INVALID_HANDLE);
413 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
414 }
415 blkif->persistent_gnt_c -= (total - num_clean);
416 blkif->vbd.overflow_max_grants = 0;
417
418 /* We can defer this work */
419 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
420 schedule_work(&blkif->persistent_purge_work);
421 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
422 return;
423}
424
a1397fa3
KRW
425/*
426 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
4d05a28d 427 */
bf0720c4 428static struct pending_req *alloc_req(struct xen_blkif *blkif)
4d05a28d 429{
2e9977c2 430 struct pending_req *req = NULL;
4d05a28d
KRW
431 unsigned long flags;
432
bf0720c4
RPM
433 spin_lock_irqsave(&blkif->pending_free_lock, flags);
434 if (!list_empty(&blkif->pending_free)) {
435 req = list_entry(blkif->pending_free.next, struct pending_req,
2e9977c2 436 free_list);
4d05a28d
KRW
437 list_del(&req->free_list);
438 }
bf0720c4 439 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
4d05a28d
KRW
440 return req;
441}
442
a1397fa3
KRW
443/*
444 * Return the 'pending_req' structure back to the freepool. We also
445 * wake up the thread if it was waiting for a free page.
446 */
bf0720c4 447static void free_req(struct xen_blkif *blkif, struct pending_req *req)
4d05a28d
KRW
448{
449 unsigned long flags;
450 int was_empty;
451
bf0720c4
RPM
452 spin_lock_irqsave(&blkif->pending_free_lock, flags);
453 was_empty = list_empty(&blkif->pending_free);
454 list_add(&req->free_list, &blkif->pending_free);
455 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
4d05a28d 456 if (was_empty)
bf0720c4 457 wake_up(&blkif->pending_free_wq);
4d05a28d
KRW
458}
459
ee9ff853
KRW
460/*
461 * Routines for managing virtual block devices (vbds).
462 */
3d814731
KRW
463static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
464 int operation)
ee9ff853 465{
3d814731 466 struct xen_vbd *vbd = &blkif->vbd;
ee9ff853
KRW
467 int rc = -EACCES;
468
469 if ((operation != READ) && vbd->readonly)
470 goto out;
471
8ab52150
JB
472 if (likely(req->nr_sects)) {
473 blkif_sector_t end = req->sector_number + req->nr_sects;
474
475 if (unlikely(end < req->sector_number))
476 goto out;
477 if (unlikely(end > vbd_sz(vbd)))
478 goto out;
479 }
ee9ff853
KRW
480
481 req->dev = vbd->pdevice;
482 req->bdev = vbd->bdev;
483 rc = 0;
484
485 out:
486 return rc;
487}
488
3d814731 489static void xen_vbd_resize(struct xen_blkif *blkif)
ee9ff853 490{
3d814731 491 struct xen_vbd *vbd = &blkif->vbd;
ee9ff853
KRW
492 struct xenbus_transaction xbt;
493 int err;
8b6bf747 494 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
42c7841d 495 unsigned long long new_size = vbd_sz(vbd);
ee9ff853 496
22b20f2d 497 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
ee9ff853 498 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
22b20f2d 499 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
ee9ff853
KRW
500 vbd->size = new_size;
501again:
502 err = xenbus_transaction_start(&xbt);
503 if (err) {
22b20f2d 504 pr_warn(DRV_PFX "Error starting transaction");
ee9ff853
KRW
505 return;
506 }
507 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
42c7841d 508 (unsigned long long)vbd_sz(vbd));
ee9ff853 509 if (err) {
22b20f2d 510 pr_warn(DRV_PFX "Error writing new size");
ee9ff853
KRW
511 goto abort;
512 }
513 /*
514 * Write the current state; we will use this to synchronize
515 * the front-end. If the current state is "connected" the
516 * front-end will get the new size information online.
517 */
518 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
519 if (err) {
22b20f2d 520 pr_warn(DRV_PFX "Error writing the state");
ee9ff853
KRW
521 goto abort;
522 }
523
524 err = xenbus_transaction_end(xbt, 0);
525 if (err == -EAGAIN)
526 goto again;
527 if (err)
22b20f2d 528 pr_warn(DRV_PFX "Error ending transaction");
496b318e 529 return;
ee9ff853
KRW
530abort:
531 xenbus_transaction_end(xbt, 1);
532}
533
a1397fa3 534/*
b0aef179
KRW
535 * Notification from the guest OS.
536 */
30fd1502 537static void blkif_notify_work(struct xen_blkif *blkif)
4d05a28d 538{
b0aef179
KRW
539 blkif->waiting_reqs = 1;
540 wake_up(&blkif->wq);
541}
4d05a28d 542
8b6bf747 543irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
b0aef179
KRW
544{
545 blkif_notify_work(dev_id);
546 return IRQ_HANDLED;
4d05a28d
KRW
547}
548
2e9977c2 549/*
4d05a28d
KRW
550 * SCHEDULER FUNCTIONS
551 */
552
30fd1502 553static void print_stats(struct xen_blkif *blkif)
4d05a28d 554{
986cacbd 555 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
3f3aad5e 556 " | ds %4llu | pg: %4u/%4d\n",
ebe81906 557 current->comm, blkif->st_oo_req,
b3cb0d6a 558 blkif->st_rd_req, blkif->st_wr_req,
c1a15d08
RPM
559 blkif->st_f_req, blkif->st_ds_req,
560 blkif->persistent_gnt_c,
3f3aad5e 561 xen_blkif_max_pgrants);
4d05a28d
KRW
562 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
563 blkif->st_rd_req = 0;
564 blkif->st_wr_req = 0;
565 blkif->st_oo_req = 0;
b3cb0d6a 566 blkif->st_ds_req = 0;
4d05a28d
KRW
567}
568
8b6bf747 569int xen_blkif_schedule(void *arg)
4d05a28d 570{
30fd1502 571 struct xen_blkif *blkif = arg;
3d814731 572 struct xen_vbd *vbd = &blkif->vbd;
3f3aad5e 573 unsigned long timeout;
4d05a28d 574
8b6bf747 575 xen_blkif_get(blkif);
4d05a28d 576
4d05a28d
KRW
577 while (!kthread_should_stop()) {
578 if (try_to_freeze())
579 continue;
42c7841d 580 if (unlikely(vbd->size != vbd_sz(vbd)))
3d814731 581 xen_vbd_resize(blkif);
4d05a28d 582
3f3aad5e
RPM
583 timeout = msecs_to_jiffies(LRU_INTERVAL);
584
585 timeout = wait_event_interruptible_timeout(
4d05a28d 586 blkif->wq,
3f3aad5e
RPM
587 blkif->waiting_reqs || kthread_should_stop(),
588 timeout);
589 if (timeout == 0)
590 goto purge_gnt_list;
591 timeout = wait_event_interruptible_timeout(
bf0720c4
RPM
592 blkif->pending_free_wq,
593 !list_empty(&blkif->pending_free) ||
3f3aad5e
RPM
594 kthread_should_stop(),
595 timeout);
596 if (timeout == 0)
597 goto purge_gnt_list;
4d05a28d
KRW
598
599 blkif->waiting_reqs = 0;
600 smp_mb(); /* clear flag *before* checking for work */
601
602 if (do_block_io_op(blkif))
603 blkif->waiting_reqs = 1;
4d05a28d 604
3f3aad5e
RPM
605purge_gnt_list:
606 if (blkif->vbd.feature_gnt_persistent &&
607 time_after(jiffies, blkif->next_lru)) {
608 purge_persistent_gnt(blkif);
609 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
610 }
611
c6cc142d
RPM
612 /* Shrink if we have more than xen_blkif_max_buffer_pages */
613 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
614
4d05a28d
KRW
615 if (log_stats && time_after(jiffies, blkif->st_print))
616 print_stats(blkif);
617 }
618
c6cc142d
RPM
619 /* Since we are shutting down remove all pages from the buffer */
620 shrink_free_pagepool(blkif, 0 /* All */);
621
0a8704a5 622 /* Free all persistent grant pages */
4d4f270f 623 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
c6cc142d 624 free_persistent_gnts(blkif, &blkif->persistent_gnts,
4d4f270f 625 blkif->persistent_gnt_c);
0a8704a5 626
0a8704a5 627 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
4d4f270f 628 blkif->persistent_gnt_c = 0;
0a8704a5 629
4d05a28d
KRW
630 if (log_stats)
631 print_stats(blkif);
4d05a28d
KRW
632
633 blkif->xenblkd = NULL;
8b6bf747 634 xen_blkif_put(blkif);
4d05a28d
KRW
635
636 return 0;
637}
638
b0aef179
KRW
639/*
640 * Unmap the grant references, and also remove the M2P over-rides
641 * used in the 'pending_req'.
01f37f2d 642 */
31552ee3
RPM
643static void xen_blkbk_unmap(struct xen_blkif *blkif,
644 grant_handle_t handles[],
645 struct page *pages[],
646 struct persistent_gnt *persistent_gnts[],
647 int num)
b0aef179
KRW
648{
649 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
31552ee3 650 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
b0aef179 651 unsigned int i, invcount = 0;
b0aef179
KRW
652 int ret;
653
31552ee3
RPM
654 for (i = 0; i < num; i++) {
655 if (persistent_gnts[i] != NULL) {
656 put_persistent_gnt(blkif, persistent_gnts[i]);
0a8704a5 657 continue;
3f3aad5e 658 }
31552ee3 659 if (handles[i] == BLKBACK_INVALID_HANDLE)
b0aef179 660 continue;
31552ee3
RPM
661 unmap_pages[invcount] = pages[i];
662 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]),
663 GNTMAP_host_map, handles[i]);
664 handles[i] = BLKBACK_INVALID_HANDLE;
665 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
666 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
667 invcount);
668 BUG_ON(ret);
669 put_free_pages(blkif, unmap_pages, invcount);
670 invcount = 0;
671 }
672 }
673 if (invcount) {
674 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
675 BUG_ON(ret);
676 put_free_pages(blkif, unmap_pages, invcount);
b0aef179 677 }
b0aef179 678}
01f37f2d 679
31552ee3
RPM
680static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[],
681 struct persistent_gnt *persistent_gnts[],
682 grant_handle_t handles[],
683 struct page *pages[],
684 int num, bool ro)
1a95fe6e
KRW
685{
686 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
0a8704a5
RPM
687 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
688 struct persistent_gnt *persistent_gnt = NULL;
0a8704a5 689 phys_addr_t addr = 0;
c6cc142d 690 int i, seg_idx, new_map_idx;
0a8704a5 691 int segs_to_map = 0;
1a95fe6e 692 int ret = 0;
31552ee3 693 int last_map = 0, map_until = 0;
0a8704a5
RPM
694 int use_persistent_gnts;
695
696 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
697
01f37f2d
KRW
698 /*
699 * Fill out preq.nr_sects with proper amount of sectors, and setup
1a95fe6e
KRW
700 * assign map[..] with the PFN of the page in our domain with the
701 * corresponding grant reference for each page.
702 */
31552ee3
RPM
703again:
704 for (i = map_until; i < num; i++) {
1a95fe6e
KRW
705 uint32_t flags;
706
0a8704a5
RPM
707 if (use_persistent_gnts)
708 persistent_gnt = get_persistent_gnt(
3f3aad5e 709 blkif,
31552ee3 710 grefs[i]);
0a8704a5
RPM
711
712 if (persistent_gnt) {
713 /*
714 * We are using persistent grants and
715 * the grant is already mapped
716 */
0a8704a5
RPM
717 pages[i] = persistent_gnt->page;
718 persistent_gnts[i] = persistent_gnt;
719 } else {
c6cc142d
RPM
720 if (get_free_page(blkif, &pages[i]))
721 goto out_of_memory;
722 addr = vaddr(pages[i]);
723 pages_to_gnt[segs_to_map] = pages[i];
0a8704a5 724 persistent_gnts[i] = NULL;
0a8704a5 725 flags = GNTMAP_host_map;
31552ee3 726 if (!use_persistent_gnts && ro)
0a8704a5
RPM
727 flags |= GNTMAP_readonly;
728 gnttab_set_map_op(&map[segs_to_map++], addr,
31552ee3 729 flags, grefs[i],
0a8704a5
RPM
730 blkif->domid);
731 }
31552ee3
RPM
732 map_until = i + 1;
733 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
734 break;
1a95fe6e
KRW
735 }
736
0a8704a5
RPM
737 if (segs_to_map) {
738 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
739 BUG_ON(ret);
740 }
1a95fe6e 741
01f37f2d
KRW
742 /*
743 * Now swizzle the MFN in our domain with the MFN from the other domain
1a95fe6e
KRW
744 * so that when we access vaddr(pending_req,i) it has the contents of
745 * the page from the other domain.
746 */
31552ee3 747 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
c6cc142d 748 if (!persistent_gnts[seg_idx]) {
0a8704a5 749 /* This is a newly mapped grant */
c6cc142d
RPM
750 BUG_ON(new_map_idx >= segs_to_map);
751 if (unlikely(map[new_map_idx].status != 0)) {
0a8704a5 752 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
31552ee3 753 handles[seg_idx] = BLKBACK_INVALID_HANDLE;
0a8704a5 754 ret |= 1;
31552ee3 755 goto next;
0a8704a5 756 }
31552ee3 757 handles[seg_idx] = map[new_map_idx].handle;
c6cc142d 758 } else {
31552ee3 759 continue;
0a8704a5 760 }
c6cc142d 761 if (use_persistent_gnts &&
3f3aad5e 762 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
c6cc142d
RPM
763 /*
764 * We are using persistent grants, the grant is
3f3aad5e 765 * not mapped but we might have room for it.
c6cc142d
RPM
766 */
767 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
768 GFP_KERNEL);
769 if (!persistent_gnt) {
0a8704a5 770 /*
c6cc142d
RPM
771 * If we don't have enough memory to
772 * allocate the persistent_gnt struct
773 * map this grant non-persistenly
0a8704a5 774 */
31552ee3 775 goto next;
0a8704a5 776 }
c6cc142d
RPM
777 persistent_gnt->gnt = map[new_map_idx].ref;
778 persistent_gnt->handle = map[new_map_idx].handle;
779 persistent_gnt->page = pages[seg_idx];
3f3aad5e 780 if (add_persistent_gnt(blkif,
c6cc142d
RPM
781 persistent_gnt)) {
782 kfree(persistent_gnt);
783 persistent_gnt = NULL;
31552ee3 784 goto next;
c6cc142d 785 }
3f3aad5e 786 persistent_gnts[seg_idx] = persistent_gnt;
c6cc142d
RPM
787 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
788 persistent_gnt->gnt, blkif->persistent_gnt_c,
3f3aad5e 789 xen_blkif_max_pgrants);
c6cc142d
RPM
790 goto next;
791 }
792 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
793 blkif->vbd.overflow_max_grants = 1;
794 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
795 blkif->domid, blkif->vbd.handle);
1a95fe6e 796 }
c6cc142d
RPM
797 /*
798 * We could not map this grant persistently, so use it as
799 * a non-persistent grant.
800 */
c6cc142d 801next:
31552ee3 802 new_map_idx++;
1a95fe6e 803 }
31552ee3
RPM
804 segs_to_map = 0;
805 last_map = map_until;
806 if (map_until != num)
807 goto again;
808
1a95fe6e 809 return ret;
c6cc142d
RPM
810
811out_of_memory:
812 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
813 put_free_pages(blkif, pages_to_gnt, segs_to_map);
814 return -ENOMEM;
1a95fe6e
KRW
815}
816
402b27f9 817static int xen_blkbk_map_seg(struct pending_req *pending_req,
31552ee3
RPM
818 struct seg_buf seg[],
819 struct page *pages[])
820{
402b27f9 821 int rc;
31552ee3 822
402b27f9 823 rc = xen_blkbk_map(pending_req->blkif, pending_req->grefs,
31552ee3
RPM
824 pending_req->persistent_gnts,
825 pending_req->grant_handles, pending_req->pages,
402b27f9 826 pending_req->nr_pages,
31552ee3 827 (pending_req->operation != BLKIF_OP_READ));
31552ee3 828
402b27f9
RPM
829 return rc;
830}
31552ee3 831
402b27f9
RPM
832static int xen_blkbk_parse_indirect(struct blkif_request *req,
833 struct pending_req *pending_req,
834 struct seg_buf seg[],
835 struct phys_req *preq)
836{
837 struct persistent_gnt **persistent =
838 pending_req->indirect_persistent_gnts;
839 struct page **pages = pending_req->indirect_pages;
840 struct xen_blkif *blkif = pending_req->blkif;
841 int indirect_grefs, rc, n, nseg, i;
842 struct blkif_request_segment_aligned *segments = NULL;
843
844 nseg = pending_req->nr_pages;
845 indirect_grefs = INDIRECT_PAGES(nseg);
846 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
847
848 rc = xen_blkbk_map(blkif, req->u.indirect.indirect_grefs,
849 persistent, pending_req->indirect_handles,
850 pages, indirect_grefs, true);
851 if (rc)
852 goto unmap;
853
854 for (n = 0, i = 0; n < nseg; n++) {
855 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
856 /* Map indirect segments */
857 if (segments)
858 kunmap_atomic(segments);
859 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]);
860 }
861 i = n % SEGS_PER_INDIRECT_FRAME;
862 pending_req->grefs[n] = segments[i].gref;
863 seg[n].nsec = segments[i].last_sect -
864 segments[i].first_sect + 1;
865 seg[n].offset = (segments[i].first_sect << 9);
866 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
867 (segments[i].last_sect < segments[i].first_sect)) {
868 rc = -EINVAL;
869 goto unmap;
870 }
871 preq->nr_sects += seg[n].nsec;
872 }
873
874unmap:
875 if (segments)
876 kunmap_atomic(segments);
877 xen_blkbk_unmap(blkif, pending_req->indirect_handles,
878 pages, persistent, indirect_grefs);
879 return rc;
31552ee3
RPM
880}
881
42146352
KRW
882static int dispatch_discard_io(struct xen_blkif *blkif,
883 struct blkif_request *req)
b3cb0d6a
LD
884{
885 int err = 0;
886 int status = BLKIF_RSP_OKAY;
887 struct block_device *bdev = blkif->vbd.bdev;
4dae7670 888 unsigned long secure;
b3cb0d6a 889
42146352
KRW
890 blkif->st_ds_req++;
891
892 xen_blkif_get(blkif);
4dae7670
KRW
893 secure = (blkif->vbd.discard_secure &&
894 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
895 BLKDEV_DISCARD_SECURE : 0;
896
897 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
898 req->u.discard.nr_sectors,
899 GFP_KERNEL, secure);
b3cb0d6a
LD
900
901 if (err == -EOPNOTSUPP) {
902 pr_debug(DRV_PFX "discard op failed, not supported\n");
903 status = BLKIF_RSP_EOPNOTSUPP;
904 } else if (err)
905 status = BLKIF_RSP_ERROR;
906
97e36834 907 make_response(blkif, req->u.discard.id, req->operation, status);
42146352
KRW
908 xen_blkif_put(blkif);
909 return err;
b3cb0d6a
LD
910}
911
0e367ae4
DV
912static int dispatch_other_io(struct xen_blkif *blkif,
913 struct blkif_request *req,
914 struct pending_req *pending_req)
915{
bf0720c4 916 free_req(blkif, pending_req);
0e367ae4
DV
917 make_response(blkif, req->u.other.id, req->operation,
918 BLKIF_RSP_EOPNOTSUPP);
919 return -EIO;
920}
921
29bde093
KRW
922static void xen_blk_drain_io(struct xen_blkif *blkif)
923{
924 atomic_set(&blkif->drain, 1);
925 do {
6927d920
KRW
926 /* The initial value is one, and one refcnt taken at the
927 * start of the xen_blkif_schedule thread. */
928 if (atomic_read(&blkif->refcnt) <= 2)
929 break;
29bde093
KRW
930 wait_for_completion_interruptible_timeout(
931 &blkif->drain_complete, HZ);
932
933 if (!atomic_read(&blkif->drain))
934 break;
29bde093
KRW
935 } while (!kthread_should_stop());
936 atomic_set(&blkif->drain, 0);
937}
938
a1397fa3
KRW
939/*
940 * Completion callback on the bio's. Called as bh->b_end_io()
4d05a28d
KRW
941 */
942
2e9977c2 943static void __end_block_io_op(struct pending_req *pending_req, int error)
4d05a28d
KRW
944{
945 /* An error fails the entire request. */
24f567f9 946 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
4d05a28d 947 (error == -EOPNOTSUPP)) {
22b20f2d 948 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
24f567f9 949 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
4d05a28d 950 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
29bde093
KRW
951 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
952 (error == -EOPNOTSUPP)) {
953 pr_debug(DRV_PFX "write barrier op failed, not supported\n");
954 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
955 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
4d05a28d 956 } else if (error) {
22b20f2d 957 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
ebe81906 958 " error=%d\n", error);
4d05a28d
KRW
959 pending_req->status = BLKIF_RSP_ERROR;
960 }
961
01f37f2d
KRW
962 /*
963 * If all of the bio's have completed it is time to unmap
a1397fa3 964 * the grant references associated with 'request' and provide
2e9977c2
KRW
965 * the proper response on the ring.
966 */
4d05a28d 967 if (atomic_dec_and_test(&pending_req->pendcnt)) {
31552ee3
RPM
968 xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles,
969 pending_req->pages,
970 pending_req->persistent_gnts,
971 pending_req->nr_pages);
4d05a28d
KRW
972 make_response(pending_req->blkif, pending_req->id,
973 pending_req->operation, pending_req->status);
8b6bf747 974 xen_blkif_put(pending_req->blkif);
29bde093
KRW
975 if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
976 if (atomic_read(&pending_req->blkif->drain))
977 complete(&pending_req->blkif->drain_complete);
978 }
bf0720c4 979 free_req(pending_req->blkif, pending_req);
4d05a28d
KRW
980 }
981}
982
a1397fa3
KRW
983/*
984 * bio callback.
985 */
88122933 986static void end_block_io_op(struct bio *bio, int error)
4d05a28d 987{
4d05a28d
KRW
988 __end_block_io_op(bio->bi_private, error);
989 bio_put(bio);
4d05a28d
KRW
990}
991
992
4d05a28d 993
a1397fa3
KRW
994/*
995 * Function to copy the from the ring buffer the 'struct blkif_request'
996 * (which has the sectors we want, number of them, grant references, etc),
997 * and transmute it to the block API to hand it over to the proper block disk.
4d05a28d 998 */
b4726a9d
DS
999static int
1000__do_block_io_op(struct xen_blkif *blkif)
4d05a28d 1001{
88122933
JF
1002 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1003 struct blkif_request req;
2e9977c2 1004 struct pending_req *pending_req;
4d05a28d
KRW
1005 RING_IDX rc, rp;
1006 int more_to_do = 0;
1007
1008 rc = blk_rings->common.req_cons;
1009 rp = blk_rings->common.sring->req_prod;
1010 rmb(); /* Ensure we see queued requests up to 'rp'. */
1011
1012 while (rc != rp) {
1013
1014 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1015 break;
1016
8270b45b 1017 if (kthread_should_stop()) {
4d05a28d
KRW
1018 more_to_do = 1;
1019 break;
1020 }
1021
bf0720c4 1022 pending_req = alloc_req(blkif);
8270b45b
KF
1023 if (NULL == pending_req) {
1024 blkif->st_oo_req++;
4d05a28d
KRW
1025 more_to_do = 1;
1026 break;
1027 }
1028
1029 switch (blkif->blk_protocol) {
1030 case BLKIF_PROTOCOL_NATIVE:
1031 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1032 break;
1033 case BLKIF_PROTOCOL_X86_32:
1034 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1035 break;
1036 case BLKIF_PROTOCOL_X86_64:
1037 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1038 break;
1039 default:
1040 BUG();
1041 }
1042 blk_rings->common.req_cons = ++rc; /* before make_response() */
1043
1044 /* Apply all sanity checks to /private copy/ of request. */
1045 barrier();
0e367ae4
DV
1046
1047 switch (req.operation) {
1048 case BLKIF_OP_READ:
1049 case BLKIF_OP_WRITE:
1050 case BLKIF_OP_WRITE_BARRIER:
1051 case BLKIF_OP_FLUSH_DISKCACHE:
402b27f9 1052 case BLKIF_OP_INDIRECT:
0e367ae4
DV
1053 if (dispatch_rw_block_io(blkif, &req, pending_req))
1054 goto done;
1055 break;
1056 case BLKIF_OP_DISCARD:
bf0720c4 1057 free_req(blkif, pending_req);
42146352 1058 if (dispatch_discard_io(blkif, &req))
0e367ae4 1059 goto done;
4d05a28d 1060 break;
0e367ae4
DV
1061 default:
1062 if (dispatch_other_io(blkif, &req, pending_req))
1063 goto done;
1064 break;
1065 }
4d05a28d
KRW
1066
1067 /* Yield point for this unbounded loop. */
1068 cond_resched();
1069 }
0e367ae4 1070done:
4d05a28d
KRW
1071 return more_to_do;
1072}
1073
b4726a9d
DS
1074static int
1075do_block_io_op(struct xen_blkif *blkif)
1076{
1077 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1078 int more_to_do;
1079
1080 do {
1081 more_to_do = __do_block_io_op(blkif);
1082 if (more_to_do)
1083 break;
1084
1085 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1086 } while (more_to_do);
1087
1088 return more_to_do;
1089}
a1397fa3 1090/*
01f37f2d
KRW
1091 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1092 * and call the 'submit_bio' to pass it to the underlying storage.
a1397fa3 1093 */
30fd1502
KRW
1094static int dispatch_rw_block_io(struct xen_blkif *blkif,
1095 struct blkif_request *req,
1096 struct pending_req *pending_req)
4d05a28d 1097{
4d05a28d 1098 struct phys_req preq;
402b27f9 1099 struct seg_buf *seg = pending_req->seg;
4d05a28d
KRW
1100 unsigned int nseg;
1101 struct bio *bio = NULL;
402b27f9 1102 struct bio **biolist = pending_req->biolist;
1a95fe6e 1103 int i, nbio = 0;
4d05a28d 1104 int operation;
a19be5f0 1105 struct blk_plug plug;
29bde093 1106 bool drain = false;
c6cc142d 1107 struct page **pages = pending_req->pages;
402b27f9
RPM
1108 unsigned short req_operation;
1109
1110 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1111 req->u.indirect.indirect_op : req->operation;
1112 if ((req->operation == BLKIF_OP_INDIRECT) &&
1113 (req_operation != BLKIF_OP_READ) &&
1114 (req_operation != BLKIF_OP_WRITE)) {
1115 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1116 req_operation);
1117 goto fail_response;
1118 }
4d05a28d 1119
402b27f9 1120 switch (req_operation) {
4d05a28d 1121 case BLKIF_OP_READ:
fc53bf75 1122 blkif->st_rd_req++;
4d05a28d
KRW
1123 operation = READ;
1124 break;
1125 case BLKIF_OP_WRITE:
fc53bf75 1126 blkif->st_wr_req++;
013c3ca1 1127 operation = WRITE_ODIRECT;
4d05a28d 1128 break;
29bde093
KRW
1129 case BLKIF_OP_WRITE_BARRIER:
1130 drain = true;
24f567f9 1131 case BLKIF_OP_FLUSH_DISKCACHE:
fc53bf75 1132 blkif->st_f_req++;
24f567f9 1133 operation = WRITE_FLUSH;
4d05a28d
KRW
1134 break;
1135 default:
1136 operation = 0; /* make gcc happy */
fc53bf75
KRW
1137 goto fail_response;
1138 break;
4d05a28d
KRW
1139 }
1140
42146352 1141 /* Check that the number of segments is sane. */
402b27f9
RPM
1142 nseg = req->operation == BLKIF_OP_INDIRECT ?
1143 req->u.indirect.nr_segments : req->u.rw.nr_segments;
97e36834 1144
42146352 1145 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
402b27f9
RPM
1146 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1147 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1148 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1149 (nseg > MAX_INDIRECT_SEGMENTS))) {
22b20f2d 1150 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
ebe81906 1151 nseg);
1a95fe6e 1152 /* Haven't submitted any bio's yet. */
4d05a28d
KRW
1153 goto fail_response;
1154 }
1155
4d05a28d
KRW
1156 preq.nr_sects = 0;
1157
1158 pending_req->blkif = blkif;
97e36834 1159 pending_req->id = req->u.rw.id;
402b27f9 1160 pending_req->operation = req_operation;
4d05a28d
KRW
1161 pending_req->status = BLKIF_RSP_OKAY;
1162 pending_req->nr_pages = nseg;
e9350493 1163
402b27f9
RPM
1164 if (req->operation != BLKIF_OP_INDIRECT) {
1165 preq.dev = req->u.rw.handle;
1166 preq.sector_number = req->u.rw.sector_number;
1167 for (i = 0; i < nseg; i++) {
1168 pending_req->grefs[i] = req->u.rw.seg[i].gref;
1169 seg[i].nsec = req->u.rw.seg[i].last_sect -
1170 req->u.rw.seg[i].first_sect + 1;
1171 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1172 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1173 (req->u.rw.seg[i].last_sect <
1174 req->u.rw.seg[i].first_sect))
1175 goto fail_response;
1176 preq.nr_sects += seg[i].nsec;
1177 }
1178 } else {
1179 preq.dev = req->u.indirect.handle;
1180 preq.sector_number = req->u.indirect.sector_number;
1181 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
4d05a28d 1182 goto fail_response;
4d05a28d
KRW
1183 }
1184
3d814731 1185 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
22b20f2d 1186 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
ebe81906
KRW
1187 operation == READ ? "read" : "write",
1188 preq.sector_number,
a72d9002
CG
1189 preq.sector_number + preq.nr_sects,
1190 blkif->vbd.pdevice);
1a95fe6e 1191 goto fail_response;
4d05a28d 1192 }
01f37f2d
KRW
1193
1194 /*
3d814731 1195 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
01f37f2d
KRW
1196 * is set there.
1197 */
e9350493
KRW
1198 for (i = 0; i < nseg; i++) {
1199 if (((int)preq.sector_number|(int)seg[i].nsec) &
1200 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
22b20f2d 1201 pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
ebe81906 1202 blkif->domid);
e9350493
KRW
1203 goto fail_response;
1204 }
1205 }
01f37f2d 1206
29bde093
KRW
1207 /* Wait on all outstanding I/O's and once that has been completed
1208 * issue the WRITE_FLUSH.
1209 */
1210 if (drain)
1211 xen_blk_drain_io(pending_req->blkif);
1212
01f37f2d
KRW
1213 /*
1214 * If we have failed at this point, we need to undo the M2P override,
2e9977c2
KRW
1215 * set gnttab_set_unmap_op on all of the grant references and perform
1216 * the hypercall to unmap the grants - that is all done in
9f3aedf5 1217 * xen_blkbk_unmap.
2e9977c2 1218 */
402b27f9 1219 if (xen_blkbk_map_seg(pending_req, seg, pages))
4d05a28d
KRW
1220 goto fail_flush;
1221
b3cb0d6a
LD
1222 /*
1223 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1224 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1225 */
8b6bf747 1226 xen_blkif_get(blkif);
4d05a28d
KRW
1227
1228 for (i = 0; i < nseg; i++) {
4d05a28d
KRW
1229 while ((bio == NULL) ||
1230 (bio_add_page(bio,
0a8704a5 1231 pages[i],
4d05a28d 1232 seg[i].nsec << 9,
ffb1dabd 1233 seg[i].offset) == 0)) {
2e9977c2 1234
03e0edf9 1235 bio = bio_alloc(GFP_KERNEL, nseg-i);
4d05a28d
KRW
1236 if (unlikely(bio == NULL))
1237 goto fail_put_bio;
1238
03e0edf9 1239 biolist[nbio++] = bio;
4d05a28d
KRW
1240 bio->bi_bdev = preq.bdev;
1241 bio->bi_private = pending_req;
1242 bio->bi_end_io = end_block_io_op;
1243 bio->bi_sector = preq.sector_number;
1244 }
1245
1246 preq.sector_number += seg[i].nsec;
1247 }
1248
b3cb0d6a 1249 /* This will be hit if the operation was a flush or discard. */
4d05a28d 1250 if (!bio) {
42146352 1251 BUG_ON(operation != WRITE_FLUSH);
b0f80127 1252
42146352
KRW
1253 bio = bio_alloc(GFP_KERNEL, 0);
1254 if (unlikely(bio == NULL))
1255 goto fail_put_bio;
4d05a28d 1256
42146352
KRW
1257 biolist[nbio++] = bio;
1258 bio->bi_bdev = preq.bdev;
1259 bio->bi_private = pending_req;
1260 bio->bi_end_io = end_block_io_op;
4d05a28d
KRW
1261 }
1262
77089926 1263 atomic_set(&pending_req->pendcnt, nbio);
a19be5f0
KRW
1264 blk_start_plug(&plug);
1265
77089926
KRW
1266 for (i = 0; i < nbio; i++)
1267 submit_bio(operation, biolist[i]);
1268
a19be5f0 1269 /* Let the I/Os go.. */
3d68b399 1270 blk_finish_plug(&plug);
a19be5f0 1271
4d05a28d
KRW
1272 if (operation == READ)
1273 blkif->st_rd_sect += preq.nr_sects;
5c62cb48 1274 else if (operation & WRITE)
4d05a28d
KRW
1275 blkif->st_wr_sect += preq.nr_sects;
1276
fc53bf75 1277 return 0;
4d05a28d
KRW
1278
1279 fail_flush:
31552ee3
RPM
1280 xen_blkbk_unmap(blkif, pending_req->grant_handles,
1281 pending_req->pages, pending_req->persistent_gnts,
1282 pending_req->nr_pages);
4d05a28d 1283 fail_response:
0faa8cca 1284 /* Haven't submitted any bio's yet. */
402b27f9 1285 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
bf0720c4 1286 free_req(blkif, pending_req);
4d05a28d 1287 msleep(1); /* back off a bit */
fc53bf75 1288 return -EIO;
4d05a28d
KRW
1289
1290 fail_put_bio:
03e0edf9 1291 for (i = 0; i < nbio; i++)
77089926 1292 bio_put(biolist[i]);
0e5e098a 1293 atomic_set(&pending_req->pendcnt, 1);
4d05a28d 1294 __end_block_io_op(pending_req, -EINVAL);
4d05a28d 1295 msleep(1); /* back off a bit */
fc53bf75 1296 return -EIO;
4d05a28d
KRW
1297}
1298
1299
1300
a1397fa3
KRW
1301/*
1302 * Put a response on the ring on how the operation fared.
4d05a28d 1303 */
30fd1502 1304static void make_response(struct xen_blkif *blkif, u64 id,
4d05a28d
KRW
1305 unsigned short op, int st)
1306{
88122933 1307 struct blkif_response resp;
4d05a28d 1308 unsigned long flags;
88122933 1309 union blkif_back_rings *blk_rings = &blkif->blk_rings;
4d05a28d
KRW
1310 int notify;
1311
1312 resp.id = id;
1313 resp.operation = op;
1314 resp.status = st;
1315
1316 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1317 /* Place on the response ring for the relevant domain. */
1318 switch (blkif->blk_protocol) {
1319 case BLKIF_PROTOCOL_NATIVE:
1320 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1321 &resp, sizeof(resp));
1322 break;
1323 case BLKIF_PROTOCOL_X86_32:
1324 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1325 &resp, sizeof(resp));
1326 break;
1327 case BLKIF_PROTOCOL_X86_64:
1328 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1329 &resp, sizeof(resp));
1330 break;
1331 default:
1332 BUG();
1333 }
1334 blk_rings->common.rsp_prod_pvt++;
1335 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
4d05a28d 1336 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
4d05a28d
KRW
1337 if (notify)
1338 notify_remote_via_irq(blkif->irq);
1339}
1340
8b6bf747 1341static int __init xen_blkif_init(void)
4d05a28d 1342{
8770b268 1343 int rc = 0;
4d05a28d 1344
b2167ba6 1345 if (!xen_domain())
4d05a28d
KRW
1346 return -ENODEV;
1347
8b6bf747 1348 rc = xen_blkif_interface_init();
8770b268
KRW
1349 if (rc)
1350 goto failed_init;
4d05a28d 1351
8b6bf747 1352 rc = xen_blkif_xenbus_init();
8770b268
KRW
1353 if (rc)
1354 goto failed_init;
4d05a28d 1355
8770b268 1356 failed_init:
8770b268 1357 return rc;
4d05a28d
KRW
1358}
1359
8b6bf747 1360module_init(xen_blkif_init);
4d05a28d
KRW
1361
1362MODULE_LICENSE("Dual BSD/GPL");
a7e9357f 1363MODULE_ALIAS("xen-backend:vbd");
This page took 0.150629 seconds and 5 git commands to generate.