Commit | Line | Data |
---|---|---|
4d05a28d | 1 | /****************************************************************************** |
4d05a28d KRW |
2 | * |
3 | * Back-end of the driver for virtual block devices. This portion of the | |
4 | * driver exports a 'unified' block-device interface that can be accessed | |
5 | * by any operating system that implements a compatible front end. A | |
6 | * reference front-end implementation can be found in: | |
a1397fa3 | 7 | * drivers/block/xen-blkfront.c |
4d05a28d KRW |
8 | * |
9 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License version 2 | |
14 | * as published by the Free Software Foundation; or, when distributed | |
15 | * separately from the Linux kernel or incorporated into other | |
16 | * software packages, subject to the following license: | |
17 | * | |
18 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
19 | * of this source file (the "Software"), to deal in the Software without | |
20 | * restriction, including without limitation the rights to use, copy, modify, | |
21 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
22 | * and to permit persons to whom the Software is furnished to do so, subject to | |
23 | * the following conditions: | |
24 | * | |
25 | * The above copyright notice and this permission notice shall be included in | |
26 | * all copies or substantial portions of the Software. | |
27 | * | |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
31 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
32 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
33 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
34 | * IN THE SOFTWARE. | |
35 | */ | |
36 | ||
37 | #include <linux/spinlock.h> | |
38 | #include <linux/kthread.h> | |
39 | #include <linux/list.h> | |
40 | #include <linux/delay.h> | |
88122933 | 41 | #include <linux/freezer.h> |
0a8704a5 | 42 | #include <linux/bitmap.h> |
afd91d07 | 43 | |
88122933 JF |
44 | #include <xen/events.h> |
45 | #include <xen/page.h> | |
e79affc3 | 46 | #include <xen/xen.h> |
88122933 JF |
47 | #include <asm/xen/hypervisor.h> |
48 | #include <asm/xen/hypercall.h> | |
087ffecd | 49 | #include <xen/balloon.h> |
4d05a28d KRW |
50 | #include "common.h" |
51 | ||
c6cc142d RPM |
52 | /* |
53 | * Maximum number of unused free pages to keep in the internal buffer. | |
54 | * Setting this to a value too low will reduce memory used in each backend, | |
55 | * but can have a performance penalty. | |
56 | * | |
57 | * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can | |
58 | * be set to a lower value that might degrade performance on some intensive | |
59 | * IO workloads. | |
60 | */ | |
61 | ||
62 | static int xen_blkif_max_buffer_pages = 704; | |
63 | module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); | |
64 | MODULE_PARM_DESC(max_buffer_pages, | |
65 | "Maximum number of free pages to keep in each block backend buffer"); | |
66 | ||
3f3aad5e RPM |
67 | /* |
68 | * Maximum number of grants to map persistently in blkback. For maximum | |
69 | * performance this should be the total numbers of grants that can be used | |
70 | * to fill the ring, but since this might become too high, specially with | |
71 | * the use of indirect descriptors, we set it to a value that provides good | |
72 | * performance without using too much memory. | |
73 | * | |
74 | * When the list of persistent grants is full we clean it up using a LRU | |
75 | * algorithm. | |
76 | */ | |
77 | ||
78 | static int xen_blkif_max_pgrants = 352; | |
79 | module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); | |
80 | MODULE_PARM_DESC(max_persistent_grants, | |
81 | "Maximum number of grants to map persistently"); | |
82 | ||
83 | /* | |
84 | * The LRU mechanism to clean the lists of persistent grants needs to | |
85 | * be executed periodically. The time interval between consecutive executions | |
86 | * of the purge mechanism is set in ms. | |
87 | */ | |
88 | #define LRU_INTERVAL 100 | |
89 | ||
90 | /* | |
91 | * When the persistent grants list is full we will remove unused grants | |
92 | * from the list. The percent number of grants to be removed at each LRU | |
93 | * execution. | |
94 | */ | |
95 | #define LRU_PERCENT_CLEAN 5 | |
96 | ||
4d05a28d | 97 | /* Run-time switchable: /sys/module/blkback/parameters/ */ |
2e9977c2 | 98 | static unsigned int log_stats; |
4d05a28d | 99 | module_param(log_stats, int, 0644); |
4d05a28d | 100 | |
4d05a28d KRW |
101 | #define BLKBACK_INVALID_HANDLE (~0) |
102 | ||
c6cc142d RPM |
103 | /* Number of free pages to remove on each call to free_xenballooned_pages */ |
104 | #define NUM_BATCH_FREE_PAGES 10 | |
105 | ||
c6cc142d RPM |
106 | static inline int get_free_page(struct xen_blkif *blkif, struct page **page) |
107 | { | |
108 | unsigned long flags; | |
109 | ||
110 | spin_lock_irqsave(&blkif->free_pages_lock, flags); | |
111 | if (list_empty(&blkif->free_pages)) { | |
112 | BUG_ON(blkif->free_pages_num != 0); | |
113 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | |
114 | return alloc_xenballooned_pages(1, page, false); | |
115 | } | |
116 | BUG_ON(blkif->free_pages_num == 0); | |
117 | page[0] = list_first_entry(&blkif->free_pages, struct page, lru); | |
118 | list_del(&page[0]->lru); | |
119 | blkif->free_pages_num--; | |
120 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | |
efe08a3e | 121 | |
c6cc142d RPM |
122 | return 0; |
123 | } | |
124 | ||
125 | static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, | |
126 | int num) | |
4d05a28d | 127 | { |
c6cc142d RPM |
128 | unsigned long flags; |
129 | int i; | |
130 | ||
131 | spin_lock_irqsave(&blkif->free_pages_lock, flags); | |
132 | for (i = 0; i < num; i++) | |
133 | list_add(&page[i]->lru, &blkif->free_pages); | |
134 | blkif->free_pages_num += num; | |
135 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | |
136 | } | |
137 | ||
138 | static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) | |
139 | { | |
140 | /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ | |
141 | struct page *page[NUM_BATCH_FREE_PAGES]; | |
142 | unsigned int num_pages = 0; | |
143 | unsigned long flags; | |
144 | ||
145 | spin_lock_irqsave(&blkif->free_pages_lock, flags); | |
146 | while (blkif->free_pages_num > num) { | |
147 | BUG_ON(list_empty(&blkif->free_pages)); | |
148 | page[num_pages] = list_first_entry(&blkif->free_pages, | |
149 | struct page, lru); | |
150 | list_del(&page[num_pages]->lru); | |
151 | blkif->free_pages_num--; | |
152 | if (++num_pages == NUM_BATCH_FREE_PAGES) { | |
153 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | |
154 | free_xenballooned_pages(num_pages, page); | |
155 | spin_lock_irqsave(&blkif->free_pages_lock, flags); | |
156 | num_pages = 0; | |
157 | } | |
158 | } | |
159 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | |
160 | if (num_pages != 0) | |
161 | free_xenballooned_pages(num_pages, page); | |
4d05a28d KRW |
162 | } |
163 | ||
c6cc142d RPM |
164 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) |
165 | ||
30fd1502 KRW |
166 | static int do_block_io_op(struct xen_blkif *blkif); |
167 | static int dispatch_rw_block_io(struct xen_blkif *blkif, | |
fc53bf75 KRW |
168 | struct blkif_request *req, |
169 | struct pending_req *pending_req); | |
30fd1502 | 170 | static void make_response(struct xen_blkif *blkif, u64 id, |
4d05a28d KRW |
171 | unsigned short op, int st); |
172 | ||
7dc34117 RPM |
173 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
174 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ | |
217fd5e7 | 175 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ |
0a8704a5 | 176 | &(pos)->node != NULL; \ |
7dc34117 RPM |
177 | (pos) = container_of(n, typeof(*(pos)), node), \ |
178 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | |
0a8704a5 RPM |
179 | |
180 | ||
3f3aad5e RPM |
181 | /* |
182 | * We don't need locking around the persistent grant helpers | |
183 | * because blkback uses a single-thread for each backed, so we | |
184 | * can be sure that this functions will never be called recursively. | |
185 | * | |
186 | * The only exception to that is put_persistent_grant, that can be called | |
187 | * from interrupt context (by xen_blkbk_unmap), so we have to use atomic | |
188 | * bit operations to modify the flags of a persistent grant and to count | |
189 | * the number of used grants. | |
190 | */ | |
191 | static int add_persistent_gnt(struct xen_blkif *blkif, | |
0a8704a5 RPM |
192 | struct persistent_gnt *persistent_gnt) |
193 | { | |
3f3aad5e | 194 | struct rb_node **new = NULL, *parent = NULL; |
0a8704a5 RPM |
195 | struct persistent_gnt *this; |
196 | ||
3f3aad5e RPM |
197 | if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { |
198 | if (!blkif->vbd.overflow_max_grants) | |
199 | blkif->vbd.overflow_max_grants = 1; | |
200 | return -EBUSY; | |
201 | } | |
0a8704a5 | 202 | /* Figure out where to put new node */ |
3f3aad5e | 203 | new = &blkif->persistent_gnts.rb_node; |
0a8704a5 RPM |
204 | while (*new) { |
205 | this = container_of(*new, struct persistent_gnt, node); | |
206 | ||
207 | parent = *new; | |
208 | if (persistent_gnt->gnt < this->gnt) | |
209 | new = &((*new)->rb_left); | |
210 | else if (persistent_gnt->gnt > this->gnt) | |
211 | new = &((*new)->rb_right); | |
212 | else { | |
c6cc142d RPM |
213 | pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); |
214 | return -EINVAL; | |
0a8704a5 RPM |
215 | } |
216 | } | |
217 | ||
3f3aad5e RPM |
218 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); |
219 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | |
0a8704a5 RPM |
220 | /* Add new node and rebalance tree. */ |
221 | rb_link_node(&(persistent_gnt->node), parent, new); | |
3f3aad5e RPM |
222 | rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); |
223 | blkif->persistent_gnt_c++; | |
224 | atomic_inc(&blkif->persistent_gnt_in_use); | |
c6cc142d | 225 | return 0; |
0a8704a5 RPM |
226 | } |
227 | ||
3f3aad5e | 228 | static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, |
0a8704a5 RPM |
229 | grant_ref_t gref) |
230 | { | |
231 | struct persistent_gnt *data; | |
3f3aad5e | 232 | struct rb_node *node = NULL; |
0a8704a5 | 233 | |
3f3aad5e | 234 | node = blkif->persistent_gnts.rb_node; |
0a8704a5 RPM |
235 | while (node) { |
236 | data = container_of(node, struct persistent_gnt, node); | |
237 | ||
238 | if (gref < data->gnt) | |
239 | node = node->rb_left; | |
240 | else if (gref > data->gnt) | |
241 | node = node->rb_right; | |
3f3aad5e RPM |
242 | else { |
243 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | |
244 | pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); | |
245 | return NULL; | |
246 | } | |
247 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | |
248 | atomic_inc(&blkif->persistent_gnt_in_use); | |
0a8704a5 | 249 | return data; |
3f3aad5e | 250 | } |
0a8704a5 RPM |
251 | } |
252 | return NULL; | |
253 | } | |
254 | ||
3f3aad5e RPM |
255 | static void put_persistent_gnt(struct xen_blkif *blkif, |
256 | struct persistent_gnt *persistent_gnt) | |
257 | { | |
258 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | |
259 | pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); | |
260 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | |
261 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | |
262 | atomic_dec(&blkif->persistent_gnt_in_use); | |
263 | } | |
264 | ||
c6cc142d RPM |
265 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, |
266 | unsigned int num) | |
4d4f270f RPM |
267 | { |
268 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
269 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
270 | struct persistent_gnt *persistent_gnt; | |
7dc34117 | 271 | struct rb_node *n; |
4d4f270f RPM |
272 | int ret = 0; |
273 | int segs_to_unmap = 0; | |
274 | ||
7dc34117 | 275 | foreach_grant_safe(persistent_gnt, n, root, node) { |
4d4f270f RPM |
276 | BUG_ON(persistent_gnt->handle == |
277 | BLKBACK_INVALID_HANDLE); | |
278 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | |
279 | (unsigned long) pfn_to_kaddr(page_to_pfn( | |
280 | persistent_gnt->page)), | |
281 | GNTMAP_host_map, | |
282 | persistent_gnt->handle); | |
283 | ||
284 | pages[segs_to_unmap] = persistent_gnt->page; | |
4d4f270f RPM |
285 | |
286 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || | |
287 | !rb_next(&persistent_gnt->node)) { | |
288 | ret = gnttab_unmap_refs(unmap, NULL, pages, | |
289 | segs_to_unmap); | |
290 | BUG_ON(ret); | |
c6cc142d | 291 | put_free_pages(blkif, pages, segs_to_unmap); |
4d4f270f RPM |
292 | segs_to_unmap = 0; |
293 | } | |
7dc34117 RPM |
294 | |
295 | rb_erase(&persistent_gnt->node, root); | |
296 | kfree(persistent_gnt); | |
297 | num--; | |
4d4f270f RPM |
298 | } |
299 | BUG_ON(num != 0); | |
300 | } | |
301 | ||
3f3aad5e RPM |
302 | static void unmap_purged_grants(struct work_struct *work) |
303 | { | |
304 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
305 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
306 | struct persistent_gnt *persistent_gnt; | |
307 | int ret, segs_to_unmap = 0; | |
308 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); | |
309 | ||
310 | while(!list_empty(&blkif->persistent_purge_list)) { | |
311 | persistent_gnt = list_first_entry(&blkif->persistent_purge_list, | |
312 | struct persistent_gnt, | |
313 | remove_node); | |
314 | list_del(&persistent_gnt->remove_node); | |
315 | ||
316 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | |
317 | vaddr(persistent_gnt->page), | |
318 | GNTMAP_host_map, | |
319 | persistent_gnt->handle); | |
320 | ||
321 | pages[segs_to_unmap] = persistent_gnt->page; | |
322 | ||
323 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | |
324 | ret = gnttab_unmap_refs(unmap, NULL, pages, | |
325 | segs_to_unmap); | |
326 | BUG_ON(ret); | |
327 | put_free_pages(blkif, pages, segs_to_unmap); | |
328 | segs_to_unmap = 0; | |
329 | } | |
330 | kfree(persistent_gnt); | |
331 | } | |
332 | if (segs_to_unmap > 0) { | |
333 | ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); | |
334 | BUG_ON(ret); | |
335 | put_free_pages(blkif, pages, segs_to_unmap); | |
336 | } | |
337 | } | |
338 | ||
339 | static void purge_persistent_gnt(struct xen_blkif *blkif) | |
340 | { | |
341 | struct persistent_gnt *persistent_gnt; | |
342 | struct rb_node *n; | |
343 | unsigned int num_clean, total; | |
344 | bool scan_used = false; | |
345 | struct rb_root *root; | |
346 | ||
347 | if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || | |
348 | (blkif->persistent_gnt_c == xen_blkif_max_pgrants && | |
349 | !blkif->vbd.overflow_max_grants)) { | |
350 | return; | |
351 | } | |
352 | ||
353 | if (work_pending(&blkif->persistent_purge_work)) { | |
354 | pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); | |
355 | return; | |
356 | } | |
357 | ||
358 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | |
359 | num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | |
360 | num_clean = min(blkif->persistent_gnt_c, num_clean); | |
361 | if (num_clean > | |
362 | (blkif->persistent_gnt_c - | |
363 | atomic_read(&blkif->persistent_gnt_in_use))) | |
364 | return; | |
365 | ||
366 | /* | |
367 | * At this point, we can assure that there will be no calls | |
368 | * to get_persistent_grant (because we are executing this code from | |
369 | * xen_blkif_schedule), there can only be calls to put_persistent_gnt, | |
370 | * which means that the number of currently used grants will go down, | |
371 | * but never up, so we will always be able to remove the requested | |
372 | * number of grants. | |
373 | */ | |
374 | ||
375 | total = num_clean; | |
376 | ||
377 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); | |
378 | ||
379 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | |
380 | root = &blkif->persistent_gnts; | |
381 | purge_list: | |
382 | foreach_grant_safe(persistent_gnt, n, root, node) { | |
383 | BUG_ON(persistent_gnt->handle == | |
384 | BLKBACK_INVALID_HANDLE); | |
385 | ||
386 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | |
387 | continue; | |
388 | if (!scan_used && | |
389 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | |
390 | continue; | |
391 | ||
392 | rb_erase(&persistent_gnt->node, root); | |
393 | list_add(&persistent_gnt->remove_node, | |
394 | &blkif->persistent_purge_list); | |
395 | if (--num_clean == 0) | |
396 | goto finished; | |
397 | } | |
398 | /* | |
399 | * If we get here it means we also need to start cleaning | |
400 | * grants that were used since last purge in order to cope | |
401 | * with the requested num | |
402 | */ | |
403 | if (!scan_used) { | |
404 | pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); | |
405 | scan_used = true; | |
406 | goto purge_list; | |
407 | } | |
408 | finished: | |
409 | /* Remove the "used" flag from all the persistent grants */ | |
410 | foreach_grant_safe(persistent_gnt, n, root, node) { | |
411 | BUG_ON(persistent_gnt->handle == | |
412 | BLKBACK_INVALID_HANDLE); | |
413 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | |
414 | } | |
415 | blkif->persistent_gnt_c -= (total - num_clean); | |
416 | blkif->vbd.overflow_max_grants = 0; | |
417 | ||
418 | /* We can defer this work */ | |
419 | INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); | |
420 | schedule_work(&blkif->persistent_purge_work); | |
421 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); | |
422 | return; | |
423 | } | |
424 | ||
a1397fa3 KRW |
425 | /* |
426 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | |
4d05a28d | 427 | */ |
bf0720c4 | 428 | static struct pending_req *alloc_req(struct xen_blkif *blkif) |
4d05a28d | 429 | { |
2e9977c2 | 430 | struct pending_req *req = NULL; |
4d05a28d KRW |
431 | unsigned long flags; |
432 | ||
bf0720c4 RPM |
433 | spin_lock_irqsave(&blkif->pending_free_lock, flags); |
434 | if (!list_empty(&blkif->pending_free)) { | |
435 | req = list_entry(blkif->pending_free.next, struct pending_req, | |
2e9977c2 | 436 | free_list); |
4d05a28d KRW |
437 | list_del(&req->free_list); |
438 | } | |
bf0720c4 | 439 | spin_unlock_irqrestore(&blkif->pending_free_lock, flags); |
4d05a28d KRW |
440 | return req; |
441 | } | |
442 | ||
a1397fa3 KRW |
443 | /* |
444 | * Return the 'pending_req' structure back to the freepool. We also | |
445 | * wake up the thread if it was waiting for a free page. | |
446 | */ | |
bf0720c4 | 447 | static void free_req(struct xen_blkif *blkif, struct pending_req *req) |
4d05a28d KRW |
448 | { |
449 | unsigned long flags; | |
450 | int was_empty; | |
451 | ||
bf0720c4 RPM |
452 | spin_lock_irqsave(&blkif->pending_free_lock, flags); |
453 | was_empty = list_empty(&blkif->pending_free); | |
454 | list_add(&req->free_list, &blkif->pending_free); | |
455 | spin_unlock_irqrestore(&blkif->pending_free_lock, flags); | |
4d05a28d | 456 | if (was_empty) |
bf0720c4 | 457 | wake_up(&blkif->pending_free_wq); |
4d05a28d KRW |
458 | } |
459 | ||
ee9ff853 KRW |
460 | /* |
461 | * Routines for managing virtual block devices (vbds). | |
462 | */ | |
3d814731 KRW |
463 | static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, |
464 | int operation) | |
ee9ff853 | 465 | { |
3d814731 | 466 | struct xen_vbd *vbd = &blkif->vbd; |
ee9ff853 KRW |
467 | int rc = -EACCES; |
468 | ||
469 | if ((operation != READ) && vbd->readonly) | |
470 | goto out; | |
471 | ||
8ab52150 JB |
472 | if (likely(req->nr_sects)) { |
473 | blkif_sector_t end = req->sector_number + req->nr_sects; | |
474 | ||
475 | if (unlikely(end < req->sector_number)) | |
476 | goto out; | |
477 | if (unlikely(end > vbd_sz(vbd))) | |
478 | goto out; | |
479 | } | |
ee9ff853 KRW |
480 | |
481 | req->dev = vbd->pdevice; | |
482 | req->bdev = vbd->bdev; | |
483 | rc = 0; | |
484 | ||
485 | out: | |
486 | return rc; | |
487 | } | |
488 | ||
3d814731 | 489 | static void xen_vbd_resize(struct xen_blkif *blkif) |
ee9ff853 | 490 | { |
3d814731 | 491 | struct xen_vbd *vbd = &blkif->vbd; |
ee9ff853 KRW |
492 | struct xenbus_transaction xbt; |
493 | int err; | |
8b6bf747 | 494 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); |
42c7841d | 495 | unsigned long long new_size = vbd_sz(vbd); |
ee9ff853 | 496 | |
22b20f2d | 497 | pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", |
ee9ff853 | 498 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); |
22b20f2d | 499 | pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); |
ee9ff853 KRW |
500 | vbd->size = new_size; |
501 | again: | |
502 | err = xenbus_transaction_start(&xbt); | |
503 | if (err) { | |
22b20f2d | 504 | pr_warn(DRV_PFX "Error starting transaction"); |
ee9ff853 KRW |
505 | return; |
506 | } | |
507 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | |
42c7841d | 508 | (unsigned long long)vbd_sz(vbd)); |
ee9ff853 | 509 | if (err) { |
22b20f2d | 510 | pr_warn(DRV_PFX "Error writing new size"); |
ee9ff853 KRW |
511 | goto abort; |
512 | } | |
513 | /* | |
514 | * Write the current state; we will use this to synchronize | |
515 | * the front-end. If the current state is "connected" the | |
516 | * front-end will get the new size information online. | |
517 | */ | |
518 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); | |
519 | if (err) { | |
22b20f2d | 520 | pr_warn(DRV_PFX "Error writing the state"); |
ee9ff853 KRW |
521 | goto abort; |
522 | } | |
523 | ||
524 | err = xenbus_transaction_end(xbt, 0); | |
525 | if (err == -EAGAIN) | |
526 | goto again; | |
527 | if (err) | |
22b20f2d | 528 | pr_warn(DRV_PFX "Error ending transaction"); |
496b318e | 529 | return; |
ee9ff853 KRW |
530 | abort: |
531 | xenbus_transaction_end(xbt, 1); | |
532 | } | |
533 | ||
a1397fa3 | 534 | /* |
b0aef179 KRW |
535 | * Notification from the guest OS. |
536 | */ | |
30fd1502 | 537 | static void blkif_notify_work(struct xen_blkif *blkif) |
4d05a28d | 538 | { |
b0aef179 KRW |
539 | blkif->waiting_reqs = 1; |
540 | wake_up(&blkif->wq); | |
541 | } | |
4d05a28d | 542 | |
8b6bf747 | 543 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id) |
b0aef179 KRW |
544 | { |
545 | blkif_notify_work(dev_id); | |
546 | return IRQ_HANDLED; | |
4d05a28d KRW |
547 | } |
548 | ||
2e9977c2 | 549 | /* |
4d05a28d KRW |
550 | * SCHEDULER FUNCTIONS |
551 | */ | |
552 | ||
30fd1502 | 553 | static void print_stats(struct xen_blkif *blkif) |
4d05a28d | 554 | { |
986cacbd | 555 | pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
3f3aad5e | 556 | " | ds %4llu | pg: %4u/%4d\n", |
ebe81906 | 557 | current->comm, blkif->st_oo_req, |
b3cb0d6a | 558 | blkif->st_rd_req, blkif->st_wr_req, |
c1a15d08 RPM |
559 | blkif->st_f_req, blkif->st_ds_req, |
560 | blkif->persistent_gnt_c, | |
3f3aad5e | 561 | xen_blkif_max_pgrants); |
4d05a28d KRW |
562 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); |
563 | blkif->st_rd_req = 0; | |
564 | blkif->st_wr_req = 0; | |
565 | blkif->st_oo_req = 0; | |
b3cb0d6a | 566 | blkif->st_ds_req = 0; |
4d05a28d KRW |
567 | } |
568 | ||
8b6bf747 | 569 | int xen_blkif_schedule(void *arg) |
4d05a28d | 570 | { |
30fd1502 | 571 | struct xen_blkif *blkif = arg; |
3d814731 | 572 | struct xen_vbd *vbd = &blkif->vbd; |
3f3aad5e | 573 | unsigned long timeout; |
4d05a28d | 574 | |
8b6bf747 | 575 | xen_blkif_get(blkif); |
4d05a28d | 576 | |
4d05a28d KRW |
577 | while (!kthread_should_stop()) { |
578 | if (try_to_freeze()) | |
579 | continue; | |
42c7841d | 580 | if (unlikely(vbd->size != vbd_sz(vbd))) |
3d814731 | 581 | xen_vbd_resize(blkif); |
4d05a28d | 582 | |
3f3aad5e RPM |
583 | timeout = msecs_to_jiffies(LRU_INTERVAL); |
584 | ||
585 | timeout = wait_event_interruptible_timeout( | |
4d05a28d | 586 | blkif->wq, |
3f3aad5e RPM |
587 | blkif->waiting_reqs || kthread_should_stop(), |
588 | timeout); | |
589 | if (timeout == 0) | |
590 | goto purge_gnt_list; | |
591 | timeout = wait_event_interruptible_timeout( | |
bf0720c4 RPM |
592 | blkif->pending_free_wq, |
593 | !list_empty(&blkif->pending_free) || | |
3f3aad5e RPM |
594 | kthread_should_stop(), |
595 | timeout); | |
596 | if (timeout == 0) | |
597 | goto purge_gnt_list; | |
4d05a28d KRW |
598 | |
599 | blkif->waiting_reqs = 0; | |
600 | smp_mb(); /* clear flag *before* checking for work */ | |
601 | ||
602 | if (do_block_io_op(blkif)) | |
603 | blkif->waiting_reqs = 1; | |
4d05a28d | 604 | |
3f3aad5e RPM |
605 | purge_gnt_list: |
606 | if (blkif->vbd.feature_gnt_persistent && | |
607 | time_after(jiffies, blkif->next_lru)) { | |
608 | purge_persistent_gnt(blkif); | |
609 | blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); | |
610 | } | |
611 | ||
c6cc142d RPM |
612 | /* Shrink if we have more than xen_blkif_max_buffer_pages */ |
613 | shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); | |
614 | ||
4d05a28d KRW |
615 | if (log_stats && time_after(jiffies, blkif->st_print)) |
616 | print_stats(blkif); | |
617 | } | |
618 | ||
c6cc142d RPM |
619 | /* Since we are shutting down remove all pages from the buffer */ |
620 | shrink_free_pagepool(blkif, 0 /* All */); | |
621 | ||
0a8704a5 | 622 | /* Free all persistent grant pages */ |
4d4f270f | 623 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) |
c6cc142d | 624 | free_persistent_gnts(blkif, &blkif->persistent_gnts, |
4d4f270f | 625 | blkif->persistent_gnt_c); |
0a8704a5 | 626 | |
0a8704a5 | 627 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
4d4f270f | 628 | blkif->persistent_gnt_c = 0; |
0a8704a5 | 629 | |
4d05a28d KRW |
630 | if (log_stats) |
631 | print_stats(blkif); | |
4d05a28d KRW |
632 | |
633 | blkif->xenblkd = NULL; | |
8b6bf747 | 634 | xen_blkif_put(blkif); |
4d05a28d KRW |
635 | |
636 | return 0; | |
637 | } | |
638 | ||
1a95fe6e | 639 | struct seg_buf { |
ffb1dabd | 640 | unsigned int offset; |
1a95fe6e KRW |
641 | unsigned int nsec; |
642 | }; | |
b0aef179 KRW |
643 | /* |
644 | * Unmap the grant references, and also remove the M2P over-rides | |
645 | * used in the 'pending_req'. | |
01f37f2d | 646 | */ |
31552ee3 RPM |
647 | static void xen_blkbk_unmap(struct xen_blkif *blkif, |
648 | grant_handle_t handles[], | |
649 | struct page *pages[], | |
650 | struct persistent_gnt *persistent_gnts[], | |
651 | int num) | |
b0aef179 KRW |
652 | { |
653 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
31552ee3 | 654 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
b0aef179 | 655 | unsigned int i, invcount = 0; |
b0aef179 KRW |
656 | int ret; |
657 | ||
31552ee3 RPM |
658 | for (i = 0; i < num; i++) { |
659 | if (persistent_gnts[i] != NULL) { | |
660 | put_persistent_gnt(blkif, persistent_gnts[i]); | |
0a8704a5 | 661 | continue; |
3f3aad5e | 662 | } |
31552ee3 | 663 | if (handles[i] == BLKBACK_INVALID_HANDLE) |
b0aef179 | 664 | continue; |
31552ee3 RPM |
665 | unmap_pages[invcount] = pages[i]; |
666 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]), | |
667 | GNTMAP_host_map, handles[i]); | |
668 | handles[i] = BLKBACK_INVALID_HANDLE; | |
669 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | |
670 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | |
671 | invcount); | |
672 | BUG_ON(ret); | |
673 | put_free_pages(blkif, unmap_pages, invcount); | |
674 | invcount = 0; | |
675 | } | |
676 | } | |
677 | if (invcount) { | |
678 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | |
679 | BUG_ON(ret); | |
680 | put_free_pages(blkif, unmap_pages, invcount); | |
b0aef179 | 681 | } |
b0aef179 | 682 | } |
01f37f2d | 683 | |
31552ee3 RPM |
684 | static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[], |
685 | struct persistent_gnt *persistent_gnts[], | |
686 | grant_handle_t handles[], | |
687 | struct page *pages[], | |
688 | int num, bool ro) | |
1a95fe6e KRW |
689 | { |
690 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
0a8704a5 RPM |
691 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
692 | struct persistent_gnt *persistent_gnt = NULL; | |
0a8704a5 | 693 | phys_addr_t addr = 0; |
c6cc142d | 694 | int i, seg_idx, new_map_idx; |
0a8704a5 | 695 | int segs_to_map = 0; |
1a95fe6e | 696 | int ret = 0; |
31552ee3 | 697 | int last_map = 0, map_until = 0; |
0a8704a5 RPM |
698 | int use_persistent_gnts; |
699 | ||
700 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); | |
701 | ||
01f37f2d KRW |
702 | /* |
703 | * Fill out preq.nr_sects with proper amount of sectors, and setup | |
1a95fe6e KRW |
704 | * assign map[..] with the PFN of the page in our domain with the |
705 | * corresponding grant reference for each page. | |
706 | */ | |
31552ee3 RPM |
707 | again: |
708 | for (i = map_until; i < num; i++) { | |
1a95fe6e KRW |
709 | uint32_t flags; |
710 | ||
0a8704a5 RPM |
711 | if (use_persistent_gnts) |
712 | persistent_gnt = get_persistent_gnt( | |
3f3aad5e | 713 | blkif, |
31552ee3 | 714 | grefs[i]); |
0a8704a5 RPM |
715 | |
716 | if (persistent_gnt) { | |
717 | /* | |
718 | * We are using persistent grants and | |
719 | * the grant is already mapped | |
720 | */ | |
0a8704a5 RPM |
721 | pages[i] = persistent_gnt->page; |
722 | persistent_gnts[i] = persistent_gnt; | |
723 | } else { | |
c6cc142d RPM |
724 | if (get_free_page(blkif, &pages[i])) |
725 | goto out_of_memory; | |
726 | addr = vaddr(pages[i]); | |
727 | pages_to_gnt[segs_to_map] = pages[i]; | |
0a8704a5 | 728 | persistent_gnts[i] = NULL; |
0a8704a5 | 729 | flags = GNTMAP_host_map; |
31552ee3 | 730 | if (!use_persistent_gnts && ro) |
0a8704a5 RPM |
731 | flags |= GNTMAP_readonly; |
732 | gnttab_set_map_op(&map[segs_to_map++], addr, | |
31552ee3 | 733 | flags, grefs[i], |
0a8704a5 RPM |
734 | blkif->domid); |
735 | } | |
31552ee3 RPM |
736 | map_until = i + 1; |
737 | if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) | |
738 | break; | |
1a95fe6e KRW |
739 | } |
740 | ||
0a8704a5 RPM |
741 | if (segs_to_map) { |
742 | ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); | |
743 | BUG_ON(ret); | |
744 | } | |
1a95fe6e | 745 | |
01f37f2d KRW |
746 | /* |
747 | * Now swizzle the MFN in our domain with the MFN from the other domain | |
1a95fe6e KRW |
748 | * so that when we access vaddr(pending_req,i) it has the contents of |
749 | * the page from the other domain. | |
750 | */ | |
31552ee3 | 751 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
c6cc142d | 752 | if (!persistent_gnts[seg_idx]) { |
0a8704a5 | 753 | /* This is a newly mapped grant */ |
c6cc142d RPM |
754 | BUG_ON(new_map_idx >= segs_to_map); |
755 | if (unlikely(map[new_map_idx].status != 0)) { | |
0a8704a5 | 756 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); |
31552ee3 | 757 | handles[seg_idx] = BLKBACK_INVALID_HANDLE; |
0a8704a5 | 758 | ret |= 1; |
31552ee3 | 759 | goto next; |
0a8704a5 | 760 | } |
31552ee3 | 761 | handles[seg_idx] = map[new_map_idx].handle; |
c6cc142d | 762 | } else { |
31552ee3 | 763 | continue; |
0a8704a5 | 764 | } |
c6cc142d | 765 | if (use_persistent_gnts && |
3f3aad5e | 766 | blkif->persistent_gnt_c < xen_blkif_max_pgrants) { |
c6cc142d RPM |
767 | /* |
768 | * We are using persistent grants, the grant is | |
3f3aad5e | 769 | * not mapped but we might have room for it. |
c6cc142d RPM |
770 | */ |
771 | persistent_gnt = kmalloc(sizeof(struct persistent_gnt), | |
772 | GFP_KERNEL); | |
773 | if (!persistent_gnt) { | |
0a8704a5 | 774 | /* |
c6cc142d RPM |
775 | * If we don't have enough memory to |
776 | * allocate the persistent_gnt struct | |
777 | * map this grant non-persistenly | |
0a8704a5 | 778 | */ |
31552ee3 | 779 | goto next; |
0a8704a5 | 780 | } |
c6cc142d RPM |
781 | persistent_gnt->gnt = map[new_map_idx].ref; |
782 | persistent_gnt->handle = map[new_map_idx].handle; | |
783 | persistent_gnt->page = pages[seg_idx]; | |
3f3aad5e | 784 | if (add_persistent_gnt(blkif, |
c6cc142d RPM |
785 | persistent_gnt)) { |
786 | kfree(persistent_gnt); | |
787 | persistent_gnt = NULL; | |
31552ee3 | 788 | goto next; |
c6cc142d | 789 | } |
3f3aad5e | 790 | persistent_gnts[seg_idx] = persistent_gnt; |
c6cc142d RPM |
791 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", |
792 | persistent_gnt->gnt, blkif->persistent_gnt_c, | |
3f3aad5e | 793 | xen_blkif_max_pgrants); |
c6cc142d RPM |
794 | goto next; |
795 | } | |
796 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { | |
797 | blkif->vbd.overflow_max_grants = 1; | |
798 | pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", | |
799 | blkif->domid, blkif->vbd.handle); | |
1a95fe6e | 800 | } |
c6cc142d RPM |
801 | /* |
802 | * We could not map this grant persistently, so use it as | |
803 | * a non-persistent grant. | |
804 | */ | |
c6cc142d | 805 | next: |
31552ee3 | 806 | new_map_idx++; |
1a95fe6e | 807 | } |
31552ee3 RPM |
808 | segs_to_map = 0; |
809 | last_map = map_until; | |
810 | if (map_until != num) | |
811 | goto again; | |
812 | ||
1a95fe6e | 813 | return ret; |
c6cc142d RPM |
814 | |
815 | out_of_memory: | |
816 | pr_alert(DRV_PFX "%s: out of memory\n", __func__); | |
817 | put_free_pages(blkif, pages_to_gnt, segs_to_map); | |
818 | return -ENOMEM; | |
1a95fe6e KRW |
819 | } |
820 | ||
31552ee3 RPM |
821 | static int xen_blkbk_map_seg(struct blkif_request *req, |
822 | struct pending_req *pending_req, | |
823 | struct seg_buf seg[], | |
824 | struct page *pages[]) | |
825 | { | |
826 | int i, rc; | |
827 | grant_ref_t grefs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
828 | ||
829 | for (i = 0; i < req->u.rw.nr_segments; i++) | |
830 | grefs[i] = req->u.rw.seg[i].gref; | |
831 | ||
832 | rc = xen_blkbk_map(pending_req->blkif, grefs, | |
833 | pending_req->persistent_gnts, | |
834 | pending_req->grant_handles, pending_req->pages, | |
835 | req->u.rw.nr_segments, | |
836 | (pending_req->operation != BLKIF_OP_READ)); | |
837 | if (rc) | |
838 | return rc; | |
839 | ||
840 | for (i = 0; i < req->u.rw.nr_segments; i++) | |
841 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
42146352 KRW |
846 | static int dispatch_discard_io(struct xen_blkif *blkif, |
847 | struct blkif_request *req) | |
b3cb0d6a LD |
848 | { |
849 | int err = 0; | |
850 | int status = BLKIF_RSP_OKAY; | |
851 | struct block_device *bdev = blkif->vbd.bdev; | |
4dae7670 | 852 | unsigned long secure; |
b3cb0d6a | 853 | |
42146352 KRW |
854 | blkif->st_ds_req++; |
855 | ||
856 | xen_blkif_get(blkif); | |
4dae7670 KRW |
857 | secure = (blkif->vbd.discard_secure && |
858 | (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? | |
859 | BLKDEV_DISCARD_SECURE : 0; | |
860 | ||
861 | err = blkdev_issue_discard(bdev, req->u.discard.sector_number, | |
862 | req->u.discard.nr_sectors, | |
863 | GFP_KERNEL, secure); | |
b3cb0d6a LD |
864 | |
865 | if (err == -EOPNOTSUPP) { | |
866 | pr_debug(DRV_PFX "discard op failed, not supported\n"); | |
867 | status = BLKIF_RSP_EOPNOTSUPP; | |
868 | } else if (err) | |
869 | status = BLKIF_RSP_ERROR; | |
870 | ||
97e36834 | 871 | make_response(blkif, req->u.discard.id, req->operation, status); |
42146352 KRW |
872 | xen_blkif_put(blkif); |
873 | return err; | |
b3cb0d6a LD |
874 | } |
875 | ||
0e367ae4 DV |
876 | static int dispatch_other_io(struct xen_blkif *blkif, |
877 | struct blkif_request *req, | |
878 | struct pending_req *pending_req) | |
879 | { | |
bf0720c4 | 880 | free_req(blkif, pending_req); |
0e367ae4 DV |
881 | make_response(blkif, req->u.other.id, req->operation, |
882 | BLKIF_RSP_EOPNOTSUPP); | |
883 | return -EIO; | |
884 | } | |
885 | ||
29bde093 KRW |
886 | static void xen_blk_drain_io(struct xen_blkif *blkif) |
887 | { | |
888 | atomic_set(&blkif->drain, 1); | |
889 | do { | |
6927d920 KRW |
890 | /* The initial value is one, and one refcnt taken at the |
891 | * start of the xen_blkif_schedule thread. */ | |
892 | if (atomic_read(&blkif->refcnt) <= 2) | |
893 | break; | |
29bde093 KRW |
894 | wait_for_completion_interruptible_timeout( |
895 | &blkif->drain_complete, HZ); | |
896 | ||
897 | if (!atomic_read(&blkif->drain)) | |
898 | break; | |
29bde093 KRW |
899 | } while (!kthread_should_stop()); |
900 | atomic_set(&blkif->drain, 0); | |
901 | } | |
902 | ||
a1397fa3 KRW |
903 | /* |
904 | * Completion callback on the bio's. Called as bh->b_end_io() | |
4d05a28d KRW |
905 | */ |
906 | ||
2e9977c2 | 907 | static void __end_block_io_op(struct pending_req *pending_req, int error) |
4d05a28d KRW |
908 | { |
909 | /* An error fails the entire request. */ | |
24f567f9 | 910 | if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && |
4d05a28d | 911 | (error == -EOPNOTSUPP)) { |
22b20f2d | 912 | pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); |
24f567f9 | 913 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); |
4d05a28d | 914 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
29bde093 KRW |
915 | } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && |
916 | (error == -EOPNOTSUPP)) { | |
917 | pr_debug(DRV_PFX "write barrier op failed, not supported\n"); | |
918 | xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); | |
919 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | |
4d05a28d | 920 | } else if (error) { |
22b20f2d | 921 | pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," |
ebe81906 | 922 | " error=%d\n", error); |
4d05a28d KRW |
923 | pending_req->status = BLKIF_RSP_ERROR; |
924 | } | |
925 | ||
01f37f2d KRW |
926 | /* |
927 | * If all of the bio's have completed it is time to unmap | |
a1397fa3 | 928 | * the grant references associated with 'request' and provide |
2e9977c2 KRW |
929 | * the proper response on the ring. |
930 | */ | |
4d05a28d | 931 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
31552ee3 RPM |
932 | xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles, |
933 | pending_req->pages, | |
934 | pending_req->persistent_gnts, | |
935 | pending_req->nr_pages); | |
4d05a28d KRW |
936 | make_response(pending_req->blkif, pending_req->id, |
937 | pending_req->operation, pending_req->status); | |
8b6bf747 | 938 | xen_blkif_put(pending_req->blkif); |
29bde093 KRW |
939 | if (atomic_read(&pending_req->blkif->refcnt) <= 2) { |
940 | if (atomic_read(&pending_req->blkif->drain)) | |
941 | complete(&pending_req->blkif->drain_complete); | |
942 | } | |
bf0720c4 | 943 | free_req(pending_req->blkif, pending_req); |
4d05a28d KRW |
944 | } |
945 | } | |
946 | ||
a1397fa3 KRW |
947 | /* |
948 | * bio callback. | |
949 | */ | |
88122933 | 950 | static void end_block_io_op(struct bio *bio, int error) |
4d05a28d | 951 | { |
4d05a28d KRW |
952 | __end_block_io_op(bio->bi_private, error); |
953 | bio_put(bio); | |
4d05a28d KRW |
954 | } |
955 | ||
956 | ||
4d05a28d | 957 | |
a1397fa3 KRW |
958 | /* |
959 | * Function to copy the from the ring buffer the 'struct blkif_request' | |
960 | * (which has the sectors we want, number of them, grant references, etc), | |
961 | * and transmute it to the block API to hand it over to the proper block disk. | |
4d05a28d | 962 | */ |
b4726a9d DS |
963 | static int |
964 | __do_block_io_op(struct xen_blkif *blkif) | |
4d05a28d | 965 | { |
88122933 JF |
966 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
967 | struct blkif_request req; | |
2e9977c2 | 968 | struct pending_req *pending_req; |
4d05a28d KRW |
969 | RING_IDX rc, rp; |
970 | int more_to_do = 0; | |
971 | ||
972 | rc = blk_rings->common.req_cons; | |
973 | rp = blk_rings->common.sring->req_prod; | |
974 | rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
975 | ||
976 | while (rc != rp) { | |
977 | ||
978 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) | |
979 | break; | |
980 | ||
8270b45b | 981 | if (kthread_should_stop()) { |
4d05a28d KRW |
982 | more_to_do = 1; |
983 | break; | |
984 | } | |
985 | ||
bf0720c4 | 986 | pending_req = alloc_req(blkif); |
8270b45b KF |
987 | if (NULL == pending_req) { |
988 | blkif->st_oo_req++; | |
4d05a28d KRW |
989 | more_to_do = 1; |
990 | break; | |
991 | } | |
992 | ||
993 | switch (blkif->blk_protocol) { | |
994 | case BLKIF_PROTOCOL_NATIVE: | |
995 | memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); | |
996 | break; | |
997 | case BLKIF_PROTOCOL_X86_32: | |
998 | blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); | |
999 | break; | |
1000 | case BLKIF_PROTOCOL_X86_64: | |
1001 | blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); | |
1002 | break; | |
1003 | default: | |
1004 | BUG(); | |
1005 | } | |
1006 | blk_rings->common.req_cons = ++rc; /* before make_response() */ | |
1007 | ||
1008 | /* Apply all sanity checks to /private copy/ of request. */ | |
1009 | barrier(); | |
0e367ae4 DV |
1010 | |
1011 | switch (req.operation) { | |
1012 | case BLKIF_OP_READ: | |
1013 | case BLKIF_OP_WRITE: | |
1014 | case BLKIF_OP_WRITE_BARRIER: | |
1015 | case BLKIF_OP_FLUSH_DISKCACHE: | |
1016 | if (dispatch_rw_block_io(blkif, &req, pending_req)) | |
1017 | goto done; | |
1018 | break; | |
1019 | case BLKIF_OP_DISCARD: | |
bf0720c4 | 1020 | free_req(blkif, pending_req); |
42146352 | 1021 | if (dispatch_discard_io(blkif, &req)) |
0e367ae4 | 1022 | goto done; |
4d05a28d | 1023 | break; |
0e367ae4 DV |
1024 | default: |
1025 | if (dispatch_other_io(blkif, &req, pending_req)) | |
1026 | goto done; | |
1027 | break; | |
1028 | } | |
4d05a28d KRW |
1029 | |
1030 | /* Yield point for this unbounded loop. */ | |
1031 | cond_resched(); | |
1032 | } | |
0e367ae4 | 1033 | done: |
4d05a28d KRW |
1034 | return more_to_do; |
1035 | } | |
1036 | ||
b4726a9d DS |
1037 | static int |
1038 | do_block_io_op(struct xen_blkif *blkif) | |
1039 | { | |
1040 | union blkif_back_rings *blk_rings = &blkif->blk_rings; | |
1041 | int more_to_do; | |
1042 | ||
1043 | do { | |
1044 | more_to_do = __do_block_io_op(blkif); | |
1045 | if (more_to_do) | |
1046 | break; | |
1047 | ||
1048 | RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); | |
1049 | } while (more_to_do); | |
1050 | ||
1051 | return more_to_do; | |
1052 | } | |
a1397fa3 | 1053 | /* |
01f37f2d KRW |
1054 | * Transmutation of the 'struct blkif_request' to a proper 'struct bio' |
1055 | * and call the 'submit_bio' to pass it to the underlying storage. | |
a1397fa3 | 1056 | */ |
30fd1502 KRW |
1057 | static int dispatch_rw_block_io(struct xen_blkif *blkif, |
1058 | struct blkif_request *req, | |
1059 | struct pending_req *pending_req) | |
4d05a28d | 1060 | { |
4d05a28d | 1061 | struct phys_req preq; |
1a95fe6e | 1062 | struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
4d05a28d KRW |
1063 | unsigned int nseg; |
1064 | struct bio *bio = NULL; | |
77089926 | 1065 | struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
1a95fe6e | 1066 | int i, nbio = 0; |
4d05a28d | 1067 | int operation; |
a19be5f0 | 1068 | struct blk_plug plug; |
29bde093 | 1069 | bool drain = false; |
c6cc142d | 1070 | struct page **pages = pending_req->pages; |
4d05a28d KRW |
1071 | |
1072 | switch (req->operation) { | |
1073 | case BLKIF_OP_READ: | |
fc53bf75 | 1074 | blkif->st_rd_req++; |
4d05a28d KRW |
1075 | operation = READ; |
1076 | break; | |
1077 | case BLKIF_OP_WRITE: | |
fc53bf75 | 1078 | blkif->st_wr_req++; |
013c3ca1 | 1079 | operation = WRITE_ODIRECT; |
4d05a28d | 1080 | break; |
29bde093 KRW |
1081 | case BLKIF_OP_WRITE_BARRIER: |
1082 | drain = true; | |
24f567f9 | 1083 | case BLKIF_OP_FLUSH_DISKCACHE: |
fc53bf75 | 1084 | blkif->st_f_req++; |
24f567f9 | 1085 | operation = WRITE_FLUSH; |
4d05a28d KRW |
1086 | break; |
1087 | default: | |
1088 | operation = 0; /* make gcc happy */ | |
fc53bf75 KRW |
1089 | goto fail_response; |
1090 | break; | |
4d05a28d KRW |
1091 | } |
1092 | ||
42146352 KRW |
1093 | /* Check that the number of segments is sane. */ |
1094 | nseg = req->u.rw.nr_segments; | |
97e36834 | 1095 | |
42146352 | 1096 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || |
4d05a28d | 1097 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { |
22b20f2d | 1098 | pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", |
ebe81906 | 1099 | nseg); |
1a95fe6e | 1100 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
1101 | goto fail_response; |
1102 | } | |
1103 | ||
c35950bf | 1104 | preq.sector_number = req->u.rw.sector_number; |
4d05a28d KRW |
1105 | preq.nr_sects = 0; |
1106 | ||
1107 | pending_req->blkif = blkif; | |
97e36834 | 1108 | pending_req->id = req->u.rw.id; |
4d05a28d KRW |
1109 | pending_req->operation = req->operation; |
1110 | pending_req->status = BLKIF_RSP_OKAY; | |
1111 | pending_req->nr_pages = nseg; | |
e9350493 | 1112 | |
4d05a28d | 1113 | for (i = 0; i < nseg; i++) { |
c35950bf KRW |
1114 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
1115 | req->u.rw.seg[i].first_sect + 1; | |
c35950bf KRW |
1116 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || |
1117 | (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) | |
4d05a28d KRW |
1118 | goto fail_response; |
1119 | preq.nr_sects += seg[i].nsec; | |
976222e0 | 1120 | |
4d05a28d KRW |
1121 | } |
1122 | ||
3d814731 | 1123 | if (xen_vbd_translate(&preq, blkif, operation) != 0) { |
22b20f2d | 1124 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", |
ebe81906 KRW |
1125 | operation == READ ? "read" : "write", |
1126 | preq.sector_number, | |
a72d9002 CG |
1127 | preq.sector_number + preq.nr_sects, |
1128 | blkif->vbd.pdevice); | |
1a95fe6e | 1129 | goto fail_response; |
4d05a28d | 1130 | } |
01f37f2d KRW |
1131 | |
1132 | /* | |
3d814731 | 1133 | * This check _MUST_ be done after xen_vbd_translate as the preq.bdev |
01f37f2d KRW |
1134 | * is set there. |
1135 | */ | |
e9350493 KRW |
1136 | for (i = 0; i < nseg; i++) { |
1137 | if (((int)preq.sector_number|(int)seg[i].nsec) & | |
1138 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { | |
22b20f2d | 1139 | pr_debug(DRV_PFX "Misaligned I/O request from domain %d", |
ebe81906 | 1140 | blkif->domid); |
e9350493 KRW |
1141 | goto fail_response; |
1142 | } | |
1143 | } | |
01f37f2d | 1144 | |
29bde093 KRW |
1145 | /* Wait on all outstanding I/O's and once that has been completed |
1146 | * issue the WRITE_FLUSH. | |
1147 | */ | |
1148 | if (drain) | |
1149 | xen_blk_drain_io(pending_req->blkif); | |
1150 | ||
01f37f2d KRW |
1151 | /* |
1152 | * If we have failed at this point, we need to undo the M2P override, | |
2e9977c2 KRW |
1153 | * set gnttab_set_unmap_op on all of the grant references and perform |
1154 | * the hypercall to unmap the grants - that is all done in | |
9f3aedf5 | 1155 | * xen_blkbk_unmap. |
2e9977c2 | 1156 | */ |
31552ee3 | 1157 | if (xen_blkbk_map_seg(req, pending_req, seg, pages)) |
4d05a28d KRW |
1158 | goto fail_flush; |
1159 | ||
b3cb0d6a LD |
1160 | /* |
1161 | * This corresponding xen_blkif_put is done in __end_block_io_op, or | |
1162 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | |
1163 | */ | |
8b6bf747 | 1164 | xen_blkif_get(blkif); |
4d05a28d KRW |
1165 | |
1166 | for (i = 0; i < nseg; i++) { | |
4d05a28d KRW |
1167 | while ((bio == NULL) || |
1168 | (bio_add_page(bio, | |
0a8704a5 | 1169 | pages[i], |
4d05a28d | 1170 | seg[i].nsec << 9, |
ffb1dabd | 1171 | seg[i].offset) == 0)) { |
2e9977c2 | 1172 | |
03e0edf9 | 1173 | bio = bio_alloc(GFP_KERNEL, nseg-i); |
4d05a28d KRW |
1174 | if (unlikely(bio == NULL)) |
1175 | goto fail_put_bio; | |
1176 | ||
03e0edf9 | 1177 | biolist[nbio++] = bio; |
4d05a28d KRW |
1178 | bio->bi_bdev = preq.bdev; |
1179 | bio->bi_private = pending_req; | |
1180 | bio->bi_end_io = end_block_io_op; | |
1181 | bio->bi_sector = preq.sector_number; | |
1182 | } | |
1183 | ||
1184 | preq.sector_number += seg[i].nsec; | |
1185 | } | |
1186 | ||
b3cb0d6a | 1187 | /* This will be hit if the operation was a flush or discard. */ |
4d05a28d | 1188 | if (!bio) { |
42146352 | 1189 | BUG_ON(operation != WRITE_FLUSH); |
b0f80127 | 1190 | |
42146352 KRW |
1191 | bio = bio_alloc(GFP_KERNEL, 0); |
1192 | if (unlikely(bio == NULL)) | |
1193 | goto fail_put_bio; | |
4d05a28d | 1194 | |
42146352 KRW |
1195 | biolist[nbio++] = bio; |
1196 | bio->bi_bdev = preq.bdev; | |
1197 | bio->bi_private = pending_req; | |
1198 | bio->bi_end_io = end_block_io_op; | |
4d05a28d KRW |
1199 | } |
1200 | ||
77089926 | 1201 | atomic_set(&pending_req->pendcnt, nbio); |
a19be5f0 KRW |
1202 | blk_start_plug(&plug); |
1203 | ||
77089926 KRW |
1204 | for (i = 0; i < nbio; i++) |
1205 | submit_bio(operation, biolist[i]); | |
1206 | ||
a19be5f0 | 1207 | /* Let the I/Os go.. */ |
3d68b399 | 1208 | blk_finish_plug(&plug); |
a19be5f0 | 1209 | |
4d05a28d KRW |
1210 | if (operation == READ) |
1211 | blkif->st_rd_sect += preq.nr_sects; | |
5c62cb48 | 1212 | else if (operation & WRITE) |
4d05a28d KRW |
1213 | blkif->st_wr_sect += preq.nr_sects; |
1214 | ||
fc53bf75 | 1215 | return 0; |
4d05a28d KRW |
1216 | |
1217 | fail_flush: | |
31552ee3 RPM |
1218 | xen_blkbk_unmap(blkif, pending_req->grant_handles, |
1219 | pending_req->pages, pending_req->persistent_gnts, | |
1220 | pending_req->nr_pages); | |
4d05a28d | 1221 | fail_response: |
0faa8cca | 1222 | /* Haven't submitted any bio's yet. */ |
97e36834 | 1223 | make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); |
bf0720c4 | 1224 | free_req(blkif, pending_req); |
4d05a28d | 1225 | msleep(1); /* back off a bit */ |
fc53bf75 | 1226 | return -EIO; |
4d05a28d KRW |
1227 | |
1228 | fail_put_bio: | |
03e0edf9 | 1229 | for (i = 0; i < nbio; i++) |
77089926 | 1230 | bio_put(biolist[i]); |
0e5e098a | 1231 | atomic_set(&pending_req->pendcnt, 1); |
4d05a28d | 1232 | __end_block_io_op(pending_req, -EINVAL); |
4d05a28d | 1233 | msleep(1); /* back off a bit */ |
fc53bf75 | 1234 | return -EIO; |
4d05a28d KRW |
1235 | } |
1236 | ||
1237 | ||
1238 | ||
a1397fa3 KRW |
1239 | /* |
1240 | * Put a response on the ring on how the operation fared. | |
4d05a28d | 1241 | */ |
30fd1502 | 1242 | static void make_response(struct xen_blkif *blkif, u64 id, |
4d05a28d KRW |
1243 | unsigned short op, int st) |
1244 | { | |
88122933 | 1245 | struct blkif_response resp; |
4d05a28d | 1246 | unsigned long flags; |
88122933 | 1247 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
4d05a28d KRW |
1248 | int notify; |
1249 | ||
1250 | resp.id = id; | |
1251 | resp.operation = op; | |
1252 | resp.status = st; | |
1253 | ||
1254 | spin_lock_irqsave(&blkif->blk_ring_lock, flags); | |
1255 | /* Place on the response ring for the relevant domain. */ | |
1256 | switch (blkif->blk_protocol) { | |
1257 | case BLKIF_PROTOCOL_NATIVE: | |
1258 | memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), | |
1259 | &resp, sizeof(resp)); | |
1260 | break; | |
1261 | case BLKIF_PROTOCOL_X86_32: | |
1262 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), | |
1263 | &resp, sizeof(resp)); | |
1264 | break; | |
1265 | case BLKIF_PROTOCOL_X86_64: | |
1266 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), | |
1267 | &resp, sizeof(resp)); | |
1268 | break; | |
1269 | default: | |
1270 | BUG(); | |
1271 | } | |
1272 | blk_rings->common.rsp_prod_pvt++; | |
1273 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | |
4d05a28d | 1274 | spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); |
4d05a28d KRW |
1275 | if (notify) |
1276 | notify_remote_via_irq(blkif->irq); | |
1277 | } | |
1278 | ||
8b6bf747 | 1279 | static int __init xen_blkif_init(void) |
4d05a28d | 1280 | { |
8770b268 | 1281 | int rc = 0; |
4d05a28d | 1282 | |
b2167ba6 | 1283 | if (!xen_domain()) |
4d05a28d KRW |
1284 | return -ENODEV; |
1285 | ||
8b6bf747 | 1286 | rc = xen_blkif_interface_init(); |
8770b268 KRW |
1287 | if (rc) |
1288 | goto failed_init; | |
4d05a28d | 1289 | |
8b6bf747 | 1290 | rc = xen_blkif_xenbus_init(); |
8770b268 KRW |
1291 | if (rc) |
1292 | goto failed_init; | |
4d05a28d | 1293 | |
8770b268 | 1294 | failed_init: |
8770b268 | 1295 | return rc; |
4d05a28d KRW |
1296 | } |
1297 | ||
8b6bf747 | 1298 | module_init(xen_blkif_init); |
4d05a28d KRW |
1299 | |
1300 | MODULE_LICENSE("Dual BSD/GPL"); | |
a7e9357f | 1301 | MODULE_ALIAS("xen-backend:vbd"); |