Commit | Line | Data |
---|---|---|
4d05a28d | 1 | /****************************************************************************** |
4d05a28d KRW |
2 | * |
3 | * Back-end of the driver for virtual block devices. This portion of the | |
4 | * driver exports a 'unified' block-device interface that can be accessed | |
5 | * by any operating system that implements a compatible front end. A | |
6 | * reference front-end implementation can be found in: | |
a1397fa3 | 7 | * drivers/block/xen-blkfront.c |
4d05a28d KRW |
8 | * |
9 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License version 2 | |
14 | * as published by the Free Software Foundation; or, when distributed | |
15 | * separately from the Linux kernel or incorporated into other | |
16 | * software packages, subject to the following license: | |
17 | * | |
18 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
19 | * of this source file (the "Software"), to deal in the Software without | |
20 | * restriction, including without limitation the rights to use, copy, modify, | |
21 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
22 | * and to permit persons to whom the Software is furnished to do so, subject to | |
23 | * the following conditions: | |
24 | * | |
25 | * The above copyright notice and this permission notice shall be included in | |
26 | * all copies or substantial portions of the Software. | |
27 | * | |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
31 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
32 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
33 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
34 | * IN THE SOFTWARE. | |
35 | */ | |
36 | ||
37 | #include <linux/spinlock.h> | |
38 | #include <linux/kthread.h> | |
39 | #include <linux/list.h> | |
40 | #include <linux/delay.h> | |
88122933 | 41 | #include <linux/freezer.h> |
afd91d07 | 42 | |
88122933 JF |
43 | #include <xen/events.h> |
44 | #include <xen/page.h> | |
45 | #include <asm/xen/hypervisor.h> | |
46 | #include <asm/xen/hypercall.h> | |
4d05a28d KRW |
47 | #include "common.h" |
48 | ||
314146e5 TG |
49 | #define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA) |
50 | ||
4d05a28d KRW |
51 | /* |
52 | * These are rather arbitrary. They are fairly large because adjacent requests | |
53 | * pulled from a communication ring are quite likely to end up being part of | |
54 | * the same scatter/gather request at the disc. | |
55 | * | |
8b6bf747 | 56 | * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** |
4d05a28d KRW |
57 | * |
58 | * This will increase the chances of being able to write whole tracks. | |
59 | * 64 should be enough to keep us competitive with Linux. | |
60 | */ | |
8b6bf747 KRW |
61 | static int xen_blkif_reqs = 64; |
62 | module_param_named(reqs, xen_blkif_reqs, int, 0); | |
4d05a28d KRW |
63 | MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); |
64 | ||
65 | /* Run-time switchable: /sys/module/blkback/parameters/ */ | |
2e9977c2 KRW |
66 | static unsigned int log_stats; |
67 | static unsigned int debug_lvl; | |
4d05a28d KRW |
68 | module_param(log_stats, int, 0644); |
69 | module_param(debug_lvl, int, 0644); | |
70 | ||
71 | /* | |
72 | * Each outstanding request that we've passed to the lower device layers has a | |
73 | * 'pending_req' allocated to it. Each buffer_head that completes decrements | |
74 | * the pendcnt towards zero. When it hits zero, the specified domain has a | |
75 | * response queued for it, with the saved 'id' passed back. | |
76 | */ | |
2e9977c2 | 77 | struct pending_req { |
5489377c | 78 | struct blkif_st *blkif; |
4d05a28d KRW |
79 | u64 id; |
80 | int nr_pages; | |
81 | atomic_t pendcnt; | |
82 | unsigned short operation; | |
83 | int status; | |
84 | struct list_head free_list; | |
2e9977c2 | 85 | }; |
4d05a28d | 86 | |
4d05a28d KRW |
87 | #define BLKBACK_INVALID_HANDLE (~0) |
88 | ||
e8e28871 | 89 | struct xen_blkbk { |
2e9977c2 | 90 | struct pending_req *pending_reqs; |
a1397fa3 | 91 | /* List of all 'pending_req' available */ |
e8e28871 | 92 | struct list_head pending_free; |
a1397fa3 | 93 | /* And its spinlock. */ |
e8e28871 KRW |
94 | spinlock_t pending_free_lock; |
95 | wait_queue_head_t pending_free_wq; | |
a1397fa3 | 96 | /* The list of all pages that are available. */ |
e8e28871 | 97 | struct page **pending_pages; |
a1397fa3 | 98 | /* And the grant handles that are available. */ |
e8e28871 KRW |
99 | grant_handle_t *pending_grant_handles; |
100 | }; | |
101 | ||
102 | static struct xen_blkbk *blkbk; | |
4d05a28d | 103 | |
a1397fa3 KRW |
104 | /* |
105 | * Little helpful macro to figure out the index and virtual address of the | |
106 | * pending_pages[..]. For each 'pending_req' we have have up to | |
107 | * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through | |
108 | * 10 and would index in the pending_pages[..]. */ | |
2e9977c2 | 109 | static inline int vaddr_pagenr(struct pending_req *req, int seg) |
4d05a28d | 110 | { |
2e9977c2 KRW |
111 | return (req - blkbk->pending_reqs) * |
112 | BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; | |
4d05a28d KRW |
113 | } |
114 | ||
efe08a3e JB |
115 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] |
116 | ||
2e9977c2 | 117 | static inline unsigned long vaddr(struct pending_req *req, int seg) |
4d05a28d | 118 | { |
e8e28871 | 119 | unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); |
4d05a28d KRW |
120 | return (unsigned long)pfn_to_kaddr(pfn); |
121 | } | |
122 | ||
123 | #define pending_handle(_req, _seg) \ | |
e8e28871 | 124 | (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) |
4d05a28d KRW |
125 | |
126 | ||
5489377c KRW |
127 | static int do_block_io_op(struct blkif_st *blkif); |
128 | static void dispatch_rw_block_io(struct blkif_st *blkif, | |
88122933 | 129 | struct blkif_request *req, |
2e9977c2 | 130 | struct pending_req *pending_req); |
5489377c | 131 | static void make_response(struct blkif_st *blkif, u64 id, |
4d05a28d KRW |
132 | unsigned short op, int st); |
133 | ||
a1397fa3 KRW |
134 | /* |
135 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | |
4d05a28d | 136 | */ |
2e9977c2 | 137 | static struct pending_req *alloc_req(void) |
4d05a28d | 138 | { |
2e9977c2 | 139 | struct pending_req *req = NULL; |
4d05a28d KRW |
140 | unsigned long flags; |
141 | ||
e8e28871 KRW |
142 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
143 | if (!list_empty(&blkbk->pending_free)) { | |
2e9977c2 KRW |
144 | req = list_entry(blkbk->pending_free.next, struct pending_req, |
145 | free_list); | |
4d05a28d KRW |
146 | list_del(&req->free_list); |
147 | } | |
e8e28871 | 148 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); |
4d05a28d KRW |
149 | return req; |
150 | } | |
151 | ||
a1397fa3 KRW |
152 | /* |
153 | * Return the 'pending_req' structure back to the freepool. We also | |
154 | * wake up the thread if it was waiting for a free page. | |
155 | */ | |
2e9977c2 | 156 | static void free_req(struct pending_req *req) |
4d05a28d KRW |
157 | { |
158 | unsigned long flags; | |
159 | int was_empty; | |
160 | ||
e8e28871 KRW |
161 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
162 | was_empty = list_empty(&blkbk->pending_free); | |
163 | list_add(&req->free_list, &blkbk->pending_free); | |
164 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); | |
4d05a28d | 165 | if (was_empty) |
e8e28871 | 166 | wake_up(&blkbk->pending_free_wq); |
4d05a28d KRW |
167 | } |
168 | ||
ee9ff853 KRW |
169 | /* |
170 | * Routines for managing virtual block devices (vbds). | |
171 | */ | |
172 | ||
ee9ff853 | 173 | |
42c7841d KRW |
174 | static int vbd_translate(struct phys_req *req, struct blkif_st *blkif, |
175 | int operation) | |
ee9ff853 KRW |
176 | { |
177 | struct vbd *vbd = &blkif->vbd; | |
178 | int rc = -EACCES; | |
179 | ||
180 | if ((operation != READ) && vbd->readonly) | |
181 | goto out; | |
182 | ||
183 | if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) | |
184 | goto out; | |
185 | ||
186 | req->dev = vbd->pdevice; | |
187 | req->bdev = vbd->bdev; | |
188 | rc = 0; | |
189 | ||
190 | out: | |
191 | return rc; | |
192 | } | |
193 | ||
42c7841d | 194 | static void vbd_resize(struct blkif_st *blkif) |
ee9ff853 KRW |
195 | { |
196 | struct vbd *vbd = &blkif->vbd; | |
197 | struct xenbus_transaction xbt; | |
198 | int err; | |
8b6bf747 | 199 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); |
42c7841d | 200 | unsigned long long new_size = vbd_sz(vbd); |
ee9ff853 KRW |
201 | |
202 | printk(KERN_INFO "VBD Resize: Domid: %d, Device: (%d, %d)\n", | |
203 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); | |
204 | printk(KERN_INFO "VBD Resize: new size %llu\n", new_size); | |
205 | vbd->size = new_size; | |
206 | again: | |
207 | err = xenbus_transaction_start(&xbt); | |
208 | if (err) { | |
209 | printk(KERN_WARNING "Error starting transaction"); | |
210 | return; | |
211 | } | |
212 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | |
42c7841d | 213 | (unsigned long long)vbd_sz(vbd)); |
ee9ff853 KRW |
214 | if (err) { |
215 | printk(KERN_WARNING "Error writing new size"); | |
216 | goto abort; | |
217 | } | |
218 | /* | |
219 | * Write the current state; we will use this to synchronize | |
220 | * the front-end. If the current state is "connected" the | |
221 | * front-end will get the new size information online. | |
222 | */ | |
223 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); | |
224 | if (err) { | |
225 | printk(KERN_WARNING "Error writing the state"); | |
226 | goto abort; | |
227 | } | |
228 | ||
229 | err = xenbus_transaction_end(xbt, 0); | |
230 | if (err == -EAGAIN) | |
231 | goto again; | |
232 | if (err) | |
233 | printk(KERN_WARNING "Error ending transaction"); | |
234 | abort: | |
235 | xenbus_transaction_end(xbt, 1); | |
236 | } | |
237 | ||
a1397fa3 | 238 | /* |
b0aef179 KRW |
239 | * Notification from the guest OS. |
240 | */ | |
241 | static void blkif_notify_work(struct blkif_st *blkif) | |
4d05a28d | 242 | { |
b0aef179 KRW |
243 | blkif->waiting_reqs = 1; |
244 | wake_up(&blkif->wq); | |
245 | } | |
4d05a28d | 246 | |
8b6bf747 | 247 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id) |
b0aef179 KRW |
248 | { |
249 | blkif_notify_work(dev_id); | |
250 | return IRQ_HANDLED; | |
4d05a28d KRW |
251 | } |
252 | ||
2e9977c2 | 253 | /* |
4d05a28d KRW |
254 | * SCHEDULER FUNCTIONS |
255 | */ | |
256 | ||
5489377c | 257 | static void print_stats(struct blkif_st *blkif) |
4d05a28d KRW |
258 | { |
259 | printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n", | |
260 | current->comm, blkif->st_oo_req, | |
261 | blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req); | |
262 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); | |
263 | blkif->st_rd_req = 0; | |
264 | blkif->st_wr_req = 0; | |
265 | blkif->st_oo_req = 0; | |
266 | } | |
267 | ||
8b6bf747 | 268 | int xen_blkif_schedule(void *arg) |
4d05a28d | 269 | { |
5489377c | 270 | struct blkif_st *blkif = arg; |
2ccbfe26 | 271 | struct vbd *vbd = &blkif->vbd; |
4d05a28d | 272 | |
8b6bf747 | 273 | xen_blkif_get(blkif); |
4d05a28d KRW |
274 | |
275 | if (debug_lvl) | |
276 | printk(KERN_DEBUG "%s: started\n", current->comm); | |
277 | ||
278 | while (!kthread_should_stop()) { | |
279 | if (try_to_freeze()) | |
280 | continue; | |
42c7841d | 281 | if (unlikely(vbd->size != vbd_sz(vbd))) |
2ccbfe26 | 282 | vbd_resize(blkif); |
4d05a28d KRW |
283 | |
284 | wait_event_interruptible( | |
285 | blkif->wq, | |
286 | blkif->waiting_reqs || kthread_should_stop()); | |
287 | wait_event_interruptible( | |
e8e28871 | 288 | blkbk->pending_free_wq, |
2e9977c2 KRW |
289 | !list_empty(&blkbk->pending_free) || |
290 | kthread_should_stop()); | |
4d05a28d KRW |
291 | |
292 | blkif->waiting_reqs = 0; | |
293 | smp_mb(); /* clear flag *before* checking for work */ | |
294 | ||
295 | if (do_block_io_op(blkif)) | |
296 | blkif->waiting_reqs = 1; | |
4d05a28d KRW |
297 | |
298 | if (log_stats && time_after(jiffies, blkif->st_print)) | |
299 | print_stats(blkif); | |
300 | } | |
301 | ||
302 | if (log_stats) | |
303 | print_stats(blkif); | |
304 | if (debug_lvl) | |
305 | printk(KERN_DEBUG "%s: exiting\n", current->comm); | |
306 | ||
307 | blkif->xenblkd = NULL; | |
8b6bf747 | 308 | xen_blkif_put(blkif); |
4d05a28d KRW |
309 | |
310 | return 0; | |
311 | } | |
312 | ||
1a95fe6e KRW |
313 | struct seg_buf { |
314 | unsigned long buf; | |
315 | unsigned int nsec; | |
316 | }; | |
b0aef179 KRW |
317 | /* |
318 | * Unmap the grant references, and also remove the M2P over-rides | |
319 | * used in the 'pending_req'. | |
320 | */ | |
9f3aedf5 | 321 | static void xen_blkbk_unmap(struct pending_req *req) |
b0aef179 KRW |
322 | { |
323 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
324 | unsigned int i, invcount = 0; | |
325 | grant_handle_t handle; | |
326 | int ret; | |
327 | ||
328 | for (i = 0; i < req->nr_pages; i++) { | |
329 | handle = pending_handle(req, i); | |
330 | if (handle == BLKBACK_INVALID_HANDLE) | |
331 | continue; | |
332 | gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), | |
333 | GNTMAP_host_map, handle); | |
334 | pending_handle(req, i) = BLKBACK_INVALID_HANDLE; | |
335 | invcount++; | |
336 | } | |
337 | ||
338 | ret = HYPERVISOR_grant_table_op( | |
339 | GNTTABOP_unmap_grant_ref, unmap, invcount); | |
340 | BUG_ON(ret); | |
341 | /* Note, we use invcount, so nr->pages, so we can't index | |
342 | * using vaddr(req, i). | |
343 | */ | |
344 | for (i = 0; i < invcount; i++) { | |
345 | ret = m2p_remove_override( | |
346 | virt_to_page(unmap[i].host_addr), false); | |
347 | if (ret) { | |
348 | printk(KERN_ALERT "Failed to remove M2P override for " \ | |
349 | "%lx\n", (unsigned long)unmap[i].host_addr); | |
350 | continue; | |
351 | } | |
352 | } | |
353 | } | |
9f3aedf5 KRW |
354 | static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_req, |
355 | struct seg_buf seg[]) | |
1a95fe6e KRW |
356 | { |
357 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
358 | int i; | |
359 | int nseg = req->nr_segments; | |
360 | int ret = 0; | |
361 | /* Fill out preq.nr_sects with proper amount of sectors, and setup | |
362 | * assign map[..] with the PFN of the page in our domain with the | |
363 | * corresponding grant reference for each page. | |
364 | */ | |
365 | for (i = 0; i < nseg; i++) { | |
366 | uint32_t flags; | |
367 | ||
368 | flags = GNTMAP_host_map; | |
369 | if (pending_req->operation != BLKIF_OP_READ) | |
370 | flags |= GNTMAP_readonly; | |
371 | gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, | |
372 | req->u.rw.seg[i].gref, pending_req->blkif->domid); | |
373 | } | |
374 | ||
375 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); | |
376 | BUG_ON(ret); | |
377 | ||
378 | /* Now swizzel the MFN in our domain with the MFN from the other domain | |
379 | * so that when we access vaddr(pending_req,i) it has the contents of | |
380 | * the page from the other domain. | |
381 | */ | |
382 | for (i = 0; i < nseg; i++) { | |
383 | if (unlikely(map[i].status != 0)) { | |
384 | DPRINTK("invalid buffer -- could not remap it\n"); | |
385 | map[i].handle = BLKBACK_INVALID_HANDLE; | |
386 | ret |= 1; | |
387 | } | |
388 | ||
389 | pending_handle(pending_req, i) = map[i].handle; | |
390 | ||
391 | if (ret) | |
392 | continue; | |
393 | ||
394 | ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), | |
395 | blkbk->pending_page(pending_req, i), false); | |
396 | if (ret) { | |
397 | printk(KERN_ALERT "Failed to install M2P override for"\ | |
398 | " %lx (ret: %d)\n", (unsigned long) | |
399 | map[i].dev_bus_addr, ret); | |
400 | /* We could switch over to GNTTABOP_copy */ | |
401 | continue; | |
402 | } | |
403 | ||
404 | seg[i].buf = map[i].dev_bus_addr | | |
405 | (req->u.rw.seg[i].first_sect << 9); | |
406 | } | |
407 | return ret; | |
408 | } | |
409 | ||
a1397fa3 KRW |
410 | /* |
411 | * Completion callback on the bio's. Called as bh->b_end_io() | |
4d05a28d KRW |
412 | */ |
413 | ||
2e9977c2 | 414 | static void __end_block_io_op(struct pending_req *pending_req, int error) |
4d05a28d KRW |
415 | { |
416 | /* An error fails the entire request. */ | |
417 | if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && | |
418 | (error == -EOPNOTSUPP)) { | |
419 | DPRINTK("blkback: write barrier op failed, not supported\n"); | |
8b6bf747 | 420 | xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); |
4d05a28d KRW |
421 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
422 | } else if (error) { | |
423 | DPRINTK("Buffer not up-to-date at end of operation, " | |
424 | "error=%d\n", error); | |
425 | pending_req->status = BLKIF_RSP_ERROR; | |
426 | } | |
427 | ||
a1397fa3 KRW |
428 | /* If all of the bio's have completed it is time to unmap |
429 | * the grant references associated with 'request' and provide | |
2e9977c2 KRW |
430 | * the proper response on the ring. |
431 | */ | |
4d05a28d | 432 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
9f3aedf5 | 433 | xen_blkbk_unmap(pending_req); |
4d05a28d KRW |
434 | make_response(pending_req->blkif, pending_req->id, |
435 | pending_req->operation, pending_req->status); | |
8b6bf747 | 436 | xen_blkif_put(pending_req->blkif); |
4d05a28d KRW |
437 | free_req(pending_req); |
438 | } | |
439 | } | |
440 | ||
a1397fa3 KRW |
441 | /* |
442 | * bio callback. | |
443 | */ | |
88122933 | 444 | static void end_block_io_op(struct bio *bio, int error) |
4d05a28d | 445 | { |
4d05a28d KRW |
446 | __end_block_io_op(bio->bi_private, error); |
447 | bio_put(bio); | |
4d05a28d KRW |
448 | } |
449 | ||
450 | ||
4d05a28d | 451 | |
a1397fa3 KRW |
452 | /* |
453 | * Function to copy the from the ring buffer the 'struct blkif_request' | |
454 | * (which has the sectors we want, number of them, grant references, etc), | |
455 | * and transmute it to the block API to hand it over to the proper block disk. | |
4d05a28d | 456 | */ |
5489377c | 457 | static int do_block_io_op(struct blkif_st *blkif) |
4d05a28d | 458 | { |
88122933 JF |
459 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
460 | struct blkif_request req; | |
2e9977c2 | 461 | struct pending_req *pending_req; |
4d05a28d KRW |
462 | RING_IDX rc, rp; |
463 | int more_to_do = 0; | |
464 | ||
465 | rc = blk_rings->common.req_cons; | |
466 | rp = blk_rings->common.sring->req_prod; | |
467 | rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
468 | ||
469 | while (rc != rp) { | |
470 | ||
471 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) | |
472 | break; | |
473 | ||
8270b45b | 474 | if (kthread_should_stop()) { |
4d05a28d KRW |
475 | more_to_do = 1; |
476 | break; | |
477 | } | |
478 | ||
8270b45b KF |
479 | pending_req = alloc_req(); |
480 | if (NULL == pending_req) { | |
481 | blkif->st_oo_req++; | |
4d05a28d KRW |
482 | more_to_do = 1; |
483 | break; | |
484 | } | |
485 | ||
486 | switch (blkif->blk_protocol) { | |
487 | case BLKIF_PROTOCOL_NATIVE: | |
488 | memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); | |
489 | break; | |
490 | case BLKIF_PROTOCOL_X86_32: | |
491 | blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); | |
492 | break; | |
493 | case BLKIF_PROTOCOL_X86_64: | |
494 | blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); | |
495 | break; | |
496 | default: | |
497 | BUG(); | |
498 | } | |
499 | blk_rings->common.req_cons = ++rc; /* before make_response() */ | |
500 | ||
501 | /* Apply all sanity checks to /private copy/ of request. */ | |
502 | barrier(); | |
503 | ||
504 | switch (req.operation) { | |
505 | case BLKIF_OP_READ: | |
506 | blkif->st_rd_req++; | |
507 | dispatch_rw_block_io(blkif, &req, pending_req); | |
508 | break; | |
509 | case BLKIF_OP_WRITE_BARRIER: | |
510 | blkif->st_br_req++; | |
511 | /* fall through */ | |
512 | case BLKIF_OP_WRITE: | |
513 | blkif->st_wr_req++; | |
514 | dispatch_rw_block_io(blkif, &req, pending_req); | |
515 | break; | |
516 | default: | |
517 | /* A good sign something is wrong: sleep for a while to | |
518 | * avoid excessive CPU consumption by a bad guest. */ | |
519 | msleep(1); | |
520 | DPRINTK("error: unknown block io operation [%d]\n", | |
521 | req.operation); | |
522 | make_response(blkif, req.id, req.operation, | |
523 | BLKIF_RSP_ERROR); | |
524 | free_req(pending_req); | |
525 | break; | |
526 | } | |
527 | ||
528 | /* Yield point for this unbounded loop. */ | |
529 | cond_resched(); | |
530 | } | |
531 | ||
532 | return more_to_do; | |
533 | } | |
534 | ||
a1397fa3 KRW |
535 | /* |
536 | * Transumation of the 'struct blkif_request' to a proper 'struct bio' | |
537 | * and call the 'submit_bio' to pass it to the underlaying storage. | |
538 | */ | |
5489377c | 539 | static void dispatch_rw_block_io(struct blkif_st *blkif, |
88122933 | 540 | struct blkif_request *req, |
2e9977c2 | 541 | struct pending_req *pending_req) |
4d05a28d | 542 | { |
4d05a28d | 543 | struct phys_req preq; |
1a95fe6e | 544 | struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
4d05a28d KRW |
545 | unsigned int nseg; |
546 | struct bio *bio = NULL; | |
77089926 | 547 | struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
1a95fe6e | 548 | int i, nbio = 0; |
4d05a28d | 549 | int operation; |
a19be5f0 | 550 | struct blk_plug plug; |
4d05a28d KRW |
551 | |
552 | switch (req->operation) { | |
553 | case BLKIF_OP_READ: | |
554 | operation = READ; | |
555 | break; | |
556 | case BLKIF_OP_WRITE: | |
013c3ca1 | 557 | operation = WRITE_ODIRECT; |
4d05a28d KRW |
558 | break; |
559 | case BLKIF_OP_WRITE_BARRIER: | |
314146e5 | 560 | operation = WRITE_BARRIER; |
4d05a28d KRW |
561 | break; |
562 | default: | |
563 | operation = 0; /* make gcc happy */ | |
564 | BUG(); | |
565 | } | |
566 | ||
a1397fa3 | 567 | /* Check that the number of segments is sane. */ |
4d05a28d | 568 | nseg = req->nr_segments; |
314146e5 | 569 | if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || |
4d05a28d KRW |
570 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { |
571 | DPRINTK("Bad number of segments in request (%d)\n", nseg); | |
1a95fe6e | 572 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
573 | goto fail_response; |
574 | } | |
575 | ||
576 | preq.dev = req->handle; | |
c35950bf | 577 | preq.sector_number = req->u.rw.sector_number; |
4d05a28d KRW |
578 | preq.nr_sects = 0; |
579 | ||
580 | pending_req->blkif = blkif; | |
581 | pending_req->id = req->id; | |
582 | pending_req->operation = req->operation; | |
583 | pending_req->status = BLKIF_RSP_OKAY; | |
584 | pending_req->nr_pages = nseg; | |
e9350493 | 585 | |
4d05a28d | 586 | for (i = 0; i < nseg; i++) { |
c35950bf KRW |
587 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
588 | req->u.rw.seg[i].first_sect + 1; | |
c35950bf KRW |
589 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || |
590 | (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) | |
4d05a28d KRW |
591 | goto fail_response; |
592 | preq.nr_sects += seg[i].nsec; | |
976222e0 | 593 | |
4d05a28d KRW |
594 | } |
595 | ||
1a95fe6e KRW |
596 | if (vbd_translate(&preq, blkif, operation) != 0) { |
597 | DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", | |
598 | operation == READ ? "read" : "write", | |
599 | preq.sector_number, | |
600 | preq.sector_number + preq.nr_sects, preq.dev); | |
601 | goto fail_response; | |
4d05a28d | 602 | } |
e9350493 KRW |
603 | /* This check _MUST_ be done after vbd_translate as the preq.bdev |
604 | * is set there. */ | |
605 | for (i = 0; i < nseg; i++) { | |
606 | if (((int)preq.sector_number|(int)seg[i].nsec) & | |
607 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { | |
608 | DPRINTK("Misaligned I/O request from domain %d", | |
609 | blkif->domid); | |
610 | goto fail_response; | |
611 | } | |
612 | } | |
2e9977c2 KRW |
613 | /* If we have failed at this point, we need to undo the M2P override, |
614 | * set gnttab_set_unmap_op on all of the grant references and perform | |
615 | * the hypercall to unmap the grants - that is all done in | |
9f3aedf5 | 616 | * xen_blkbk_unmap. |
2e9977c2 | 617 | */ |
9f3aedf5 | 618 | if (xen_blkbk_map(req, pending_req, seg)) |
4d05a28d KRW |
619 | goto fail_flush; |
620 | ||
77089926 | 621 | /* This corresponding blkif_put is done in __end_block_io_op */ |
8b6bf747 | 622 | xen_blkif_get(blkif); |
4d05a28d KRW |
623 | |
624 | for (i = 0; i < nseg; i++) { | |
4d05a28d KRW |
625 | while ((bio == NULL) || |
626 | (bio_add_page(bio, | |
e8e28871 | 627 | blkbk->pending_page(pending_req, i), |
4d05a28d KRW |
628 | seg[i].nsec << 9, |
629 | seg[i].buf & ~PAGE_MASK) == 0)) { | |
2e9977c2 | 630 | |
77089926 | 631 | bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i); |
4d05a28d KRW |
632 | if (unlikely(bio == NULL)) |
633 | goto fail_put_bio; | |
634 | ||
635 | bio->bi_bdev = preq.bdev; | |
636 | bio->bi_private = pending_req; | |
637 | bio->bi_end_io = end_block_io_op; | |
638 | bio->bi_sector = preq.sector_number; | |
639 | } | |
640 | ||
641 | preq.sector_number += seg[i].nsec; | |
642 | } | |
643 | ||
a1397fa3 | 644 | /* This will be hit if the operation was a barrier. */ |
4d05a28d | 645 | if (!bio) { |
314146e5 | 646 | BUG_ON(operation != WRITE_BARRIER); |
77089926 | 647 | bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0); |
4d05a28d KRW |
648 | if (unlikely(bio == NULL)) |
649 | goto fail_put_bio; | |
650 | ||
651 | bio->bi_bdev = preq.bdev; | |
652 | bio->bi_private = pending_req; | |
653 | bio->bi_end_io = end_block_io_op; | |
654 | bio->bi_sector = -1; | |
655 | } | |
656 | ||
77089926 KRW |
657 | |
658 | /* We set it one so that the last submit_bio does not have to call | |
659 | * atomic_inc. | |
660 | */ | |
661 | atomic_set(&pending_req->pendcnt, nbio); | |
662 | ||
a19be5f0 KRW |
663 | /* Get a reference count for the disk queue and start sending I/O */ |
664 | blk_start_plug(&plug); | |
665 | ||
77089926 KRW |
666 | for (i = 0; i < nbio; i++) |
667 | submit_bio(operation, biolist[i]); | |
668 | ||
a19be5f0 KRW |
669 | blk_finish_plug(&plug); |
670 | /* Let the I/Os go.. */ | |
671 | ||
4d05a28d KRW |
672 | if (operation == READ) |
673 | blkif->st_rd_sect += preq.nr_sects; | |
314146e5 | 674 | else if (operation == WRITE || operation == WRITE_BARRIER) |
4d05a28d KRW |
675 | blkif->st_wr_sect += preq.nr_sects; |
676 | ||
677 | return; | |
678 | ||
679 | fail_flush: | |
9f3aedf5 | 680 | xen_blkbk_unmap(pending_req); |
4d05a28d | 681 | fail_response: |
0faa8cca | 682 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
683 | make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); |
684 | free_req(pending_req); | |
685 | msleep(1); /* back off a bit */ | |
686 | return; | |
687 | ||
688 | fail_put_bio: | |
77089926 KRW |
689 | for (i = 0; i < (nbio-1); i++) |
690 | bio_put(biolist[i]); | |
4d05a28d | 691 | __end_block_io_op(pending_req, -EINVAL); |
4d05a28d KRW |
692 | msleep(1); /* back off a bit */ |
693 | return; | |
694 | } | |
695 | ||
696 | ||
697 | ||
a1397fa3 KRW |
698 | /* |
699 | * Put a response on the ring on how the operation fared. | |
4d05a28d | 700 | */ |
5489377c | 701 | static void make_response(struct blkif_st *blkif, u64 id, |
4d05a28d KRW |
702 | unsigned short op, int st) |
703 | { | |
88122933 | 704 | struct blkif_response resp; |
4d05a28d | 705 | unsigned long flags; |
88122933 | 706 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
4d05a28d KRW |
707 | int more_to_do = 0; |
708 | int notify; | |
709 | ||
710 | resp.id = id; | |
711 | resp.operation = op; | |
712 | resp.status = st; | |
713 | ||
714 | spin_lock_irqsave(&blkif->blk_ring_lock, flags); | |
715 | /* Place on the response ring for the relevant domain. */ | |
716 | switch (blkif->blk_protocol) { | |
717 | case BLKIF_PROTOCOL_NATIVE: | |
718 | memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), | |
719 | &resp, sizeof(resp)); | |
720 | break; | |
721 | case BLKIF_PROTOCOL_X86_32: | |
722 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), | |
723 | &resp, sizeof(resp)); | |
724 | break; | |
725 | case BLKIF_PROTOCOL_X86_64: | |
726 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), | |
727 | &resp, sizeof(resp)); | |
728 | break; | |
729 | default: | |
730 | BUG(); | |
731 | } | |
732 | blk_rings->common.rsp_prod_pvt++; | |
733 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | |
734 | if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) { | |
735 | /* | |
736 | * Tail check for pending requests. Allows frontend to avoid | |
737 | * notifications if requests are already in flight (lower | |
738 | * overheads and promotes batching). | |
739 | */ | |
740 | RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); | |
741 | ||
742 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) { | |
743 | more_to_do = 1; | |
744 | } | |
745 | ||
746 | spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); | |
747 | ||
748 | if (more_to_do) | |
749 | blkif_notify_work(blkif); | |
750 | if (notify) | |
751 | notify_remote_via_irq(blkif->irq); | |
752 | } | |
753 | ||
8b6bf747 | 754 | static int __init xen_blkif_init(void) |
4d05a28d KRW |
755 | { |
756 | int i, mmap_pages; | |
8770b268 | 757 | int rc = 0; |
4d05a28d | 758 | |
88122933 | 759 | if (!xen_pv_domain()) |
4d05a28d KRW |
760 | return -ENODEV; |
761 | ||
2e9977c2 | 762 | blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); |
e8e28871 KRW |
763 | if (!blkbk) { |
764 | printk(KERN_ALERT "%s: out of memory!\n", __func__); | |
765 | return -ENOMEM; | |
766 | } | |
767 | ||
8b6bf747 | 768 | mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; |
4d05a28d | 769 | |
e8e28871 | 770 | blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * |
8b6bf747 | 771 | xen_blkif_reqs, GFP_KERNEL); |
a742b02c KRW |
772 | blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) * |
773 | mmap_pages, GFP_KERNEL); | |
774 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * | |
775 | mmap_pages, GFP_KERNEL); | |
4d05a28d | 776 | |
2e9977c2 KRW |
777 | if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || |
778 | !blkbk->pending_pages) { | |
8770b268 | 779 | rc = -ENOMEM; |
4d05a28d | 780 | goto out_of_memory; |
8770b268 | 781 | } |
4d05a28d | 782 | |
464fb419 | 783 | for (i = 0; i < mmap_pages; i++) { |
e8e28871 | 784 | blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; |
a742b02c | 785 | blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); |
464fb419 KRW |
786 | if (blkbk->pending_pages[i] == NULL) { |
787 | rc = -ENOMEM; | |
788 | goto out_of_memory; | |
789 | } | |
790 | } | |
8b6bf747 | 791 | rc = xen_blkif_interface_init(); |
8770b268 KRW |
792 | if (rc) |
793 | goto failed_init; | |
4d05a28d | 794 | |
e8e28871 KRW |
795 | memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs)); |
796 | ||
797 | INIT_LIST_HEAD(&blkbk->pending_free); | |
798 | spin_lock_init(&blkbk->pending_free_lock); | |
799 | init_waitqueue_head(&blkbk->pending_free_wq); | |
4d05a28d | 800 | |
8b6bf747 | 801 | for (i = 0; i < xen_blkif_reqs; i++) |
2e9977c2 KRW |
802 | list_add_tail(&blkbk->pending_reqs[i].free_list, |
803 | &blkbk->pending_free); | |
4d05a28d | 804 | |
8b6bf747 | 805 | rc = xen_blkif_xenbus_init(); |
8770b268 KRW |
806 | if (rc) |
807 | goto failed_init; | |
4d05a28d KRW |
808 | |
809 | return 0; | |
810 | ||
811 | out_of_memory: | |
8770b268 KRW |
812 | printk(KERN_ERR "%s: out of memory\n", __func__); |
813 | failed_init: | |
e8e28871 | 814 | kfree(blkbk->pending_reqs); |
a742b02c | 815 | kfree(blkbk->pending_grant_handles); |
464fb419 KRW |
816 | for (i = 0; i < mmap_pages; i++) { |
817 | if (blkbk->pending_pages[i]) | |
818 | __free_page(blkbk->pending_pages[i]); | |
819 | } | |
a742b02c KRW |
820 | kfree(blkbk->pending_pages); |
821 | kfree(blkbk); | |
e8e28871 | 822 | blkbk = NULL; |
8770b268 | 823 | return rc; |
4d05a28d KRW |
824 | } |
825 | ||
8b6bf747 | 826 | module_init(xen_blkif_init); |
4d05a28d KRW |
827 | |
828 | MODULE_LICENSE("Dual BSD/GPL"); |