rbd: rbd_dev_header_unwatch_sync() variant
[deliverable/linux.git] / drivers / block / rbd.c
1
2 /*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25 For usage instructions, please refer to:
26
27 Documentation/ABI/testing/sysfs-bus-rbd
28
29 */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
37
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
42 #include <linux/fs.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
47
48 #include "rbd_types.h"
49
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
51
52 /*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
61 /*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67 static int atomic_inc_return_safe(atomic_t *v)
68 {
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78 }
79
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t *v)
82 {
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92 }
93
94 #define RBD_DRV_NAME "rbd"
95
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
98
99 #define RBD_MAX_PARENT_CHAIN_LEN 16
100
101 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102 #define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
104
105 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
106
107 #define RBD_SNAP_HEAD_NAME "-"
108
109 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
110
111 /* This allows a single page to hold an image name sent by OSD */
112 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
113 #define RBD_IMAGE_ID_LEN_MAX 64
114
115 #define RBD_OBJ_PREFIX_LEN_MAX 64
116
117 /* Feature bits */
118
119 #define RBD_FEATURE_LAYERING (1<<0)
120 #define RBD_FEATURE_STRIPINGV2 (1<<1)
121 #define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
123
124 /* Features supported by this (client software) implementation. */
125
126 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
127
128 /*
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
133 */
134 #define DEV_NAME_LEN 32
135 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
136
137 /*
138 * block device image metadata (in-memory version)
139 */
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
142 char *object_prefix;
143 __u8 obj_order;
144 __u8 crypt_type;
145 __u8 comp_type;
146 u64 stripe_unit;
147 u64 stripe_count;
148 u64 features; /* Might be changeable someday? */
149
150 /* The remaining fields need to be updated occasionally */
151 u64 image_size;
152 struct ceph_snap_context *snapc;
153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
155 };
156
157 /*
158 * An rbd image specification.
159 *
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
163 *
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
168 *
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
174 *
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
178 *
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
181 */
182 struct rbd_spec {
183 u64 pool_id;
184 const char *pool_name;
185
186 const char *image_id;
187 const char *image_name;
188
189 u64 snap_id;
190 const char *snap_name;
191
192 struct kref kref;
193 };
194
195 /*
196 * an instance of the client. multiple devices may share an rbd client.
197 */
198 struct rbd_client {
199 struct ceph_client *client;
200 struct kref kref;
201 struct list_head node;
202 };
203
204 struct rbd_img_request;
205 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
206
207 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
208
209 struct rbd_obj_request;
210 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
211
212 enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214 };
215
216 enum obj_operation_type {
217 OBJ_OP_WRITE,
218 OBJ_OP_READ,
219 OBJ_OP_DISCARD,
220 };
221
222 enum obj_req_flags {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
227 };
228
229 struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
233 unsigned long flags;
234
235 /*
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
238 *
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
241 *
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
245 *
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
250 */
251 union {
252 struct rbd_obj_request *obj_request; /* STAT op */
253 struct {
254 struct rbd_img_request *img_request;
255 u64 img_offset;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
258 };
259 };
260 u32 which; /* posn image request list */
261
262 enum obj_request_type type;
263 union {
264 struct bio *bio_list;
265 struct {
266 struct page **pages;
267 u32 page_count;
268 };
269 };
270 struct page **copyup_pages;
271 u32 copyup_page_count;
272
273 struct ceph_osd_request *osd_req;
274
275 u64 xferred; /* bytes transferred */
276 int result;
277
278 rbd_obj_callback_t callback;
279 struct completion completion;
280
281 struct kref kref;
282 };
283
284 enum img_req_flags {
285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
289 };
290
291 struct rbd_img_request {
292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
295 unsigned long flags;
296 union {
297 u64 snap_id; /* for reads */
298 struct ceph_snap_context *snapc; /* for writes */
299 };
300 union {
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
303 };
304 struct page **copyup_pages;
305 u32 copyup_page_count;
306 spinlock_t completion_lock;/* protects next_completion */
307 u32 next_completion;
308 rbd_img_callback_t callback;
309 u64 xferred;/* aggregate bytes transferred */
310 int result; /* first nonzero obj_request result */
311
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
314
315 struct kref kref;
316 };
317
318 #define for_each_obj_request(ireq, oreq) \
319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_from(ireq, oreq) \
321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
322 #define for_each_obj_request_safe(ireq, oreq, n) \
323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324
325 struct rbd_mapping {
326 u64 size;
327 u64 features;
328 bool read_only;
329 };
330
331 /*
332 * a single device
333 */
334 struct rbd_device {
335 int dev_id; /* blkdev unique id */
336
337 int major; /* blkdev assigned major */
338 int minor;
339 struct gendisk *disk; /* blkdev's gendisk and rq */
340
341 u32 image_format; /* Either 1 or 2 */
342 struct rbd_client *rbd_client;
343
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
345
346 spinlock_t lock; /* queue, flags, open_count */
347
348 struct rbd_image_header header;
349 unsigned long flags; /* possibly lock protected */
350 struct rbd_spec *spec;
351 struct rbd_options *opts;
352
353 struct ceph_object_id header_oid;
354
355 struct ceph_file_layout layout;
356
357 struct ceph_osd_event *watch_event;
358 struct rbd_obj_request *watch_request;
359
360 struct rbd_spec *parent_spec;
361 u64 parent_overlap;
362 atomic_t parent_ref;
363 struct rbd_device *parent;
364
365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
367
368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
370
371 struct rbd_mapping mapping;
372
373 struct list_head node;
374
375 /* sysfs related */
376 struct device dev;
377 unsigned long open_count; /* protected by lock */
378 };
379
380 /*
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
383 *
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
386 */
387 enum rbd_dev_flags {
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
390 };
391
392 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
393
394 static LIST_HEAD(rbd_dev_list); /* devices */
395 static DEFINE_SPINLOCK(rbd_dev_list_lock);
396
397 static LIST_HEAD(rbd_client_list); /* clients */
398 static DEFINE_SPINLOCK(rbd_client_list_lock);
399
400 /* Slab caches for frequently-allocated structures */
401
402 static struct kmem_cache *rbd_img_request_cache;
403 static struct kmem_cache *rbd_obj_request_cache;
404 static struct kmem_cache *rbd_segment_name_cache;
405
406 static int rbd_major;
407 static DEFINE_IDA(rbd_dev_id_ida);
408
409 static struct workqueue_struct *rbd_wq;
410
411 /*
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
414 */
415 static bool single_major = false;
416 module_param(single_major, bool, S_IRUGO);
417 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
418
419 static int rbd_img_request_submit(struct rbd_img_request *img_request);
420
421 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
425 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
429 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
430 static void rbd_spec_put(struct rbd_spec *spec);
431
432 static int rbd_dev_id_to_minor(int dev_id)
433 {
434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
435 }
436
437 static int minor_to_rbd_dev_id(int minor)
438 {
439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
440 }
441
442 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
444 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
446
447 static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
452 NULL,
453 };
454
455 static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457 {
458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
463 return attr->mode;
464 }
465
466 static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469 };
470 __ATTRIBUTE_GROUPS(rbd_bus);
471
472 static struct bus_type rbd_bus_type = {
473 .name = "rbd",
474 .bus_groups = rbd_bus_groups,
475 };
476
477 static void rbd_root_dev_release(struct device *dev)
478 {
479 }
480
481 static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484 };
485
486 static __printf(2, 3)
487 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488 {
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511 }
512
513 #ifdef RBD_DEBUG
514 #define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522 #else /* !RBD_DEBUG */
523 # define rbd_assert(expr) ((void) 0)
524 #endif /* !RBD_DEBUG */
525
526 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
527 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
528 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
530
531 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
532 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
533 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
534 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
535 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 u64 snap_id);
537 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features);
541
542 static int rbd_open(struct block_device *bdev, fmode_t mode)
543 {
544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
545 bool removing = false;
546
547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
548 return -EROFS;
549
550 spin_lock_irq(&rbd_dev->lock);
551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
552 removing = true;
553 else
554 rbd_dev->open_count++;
555 spin_unlock_irq(&rbd_dev->lock);
556 if (removing)
557 return -ENOENT;
558
559 (void) get_device(&rbd_dev->dev);
560
561 return 0;
562 }
563
564 static void rbd_release(struct gendisk *disk, fmode_t mode)
565 {
566 struct rbd_device *rbd_dev = disk->private_data;
567 unsigned long open_count_before;
568
569 spin_lock_irq(&rbd_dev->lock);
570 open_count_before = rbd_dev->open_count--;
571 spin_unlock_irq(&rbd_dev->lock);
572 rbd_assert(open_count_before > 0);
573
574 put_device(&rbd_dev->dev);
575 }
576
577 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578 {
579 int ret = 0;
580 int val;
581 bool ro;
582 bool ro_changed = false;
583
584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
585 if (get_user(val, (int __user *)(arg)))
586 return -EFAULT;
587
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
591 return -EROFS;
592
593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
596 ret = -EBUSY;
597 goto out;
598 }
599
600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
602 ro_changed = true;
603 }
604
605 out:
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610
611 return ret;
612 }
613
614 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
616 {
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 int ret = 0;
619
620 switch (cmd) {
621 case BLKROSET:
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
623 break;
624 default:
625 ret = -ENOTTY;
626 }
627
628 return ret;
629 }
630
631 #ifdef CONFIG_COMPAT
632 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
634 {
635 return rbd_ioctl(bdev, mode, cmd, arg);
636 }
637 #endif /* CONFIG_COMPAT */
638
639 static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
641 .open = rbd_open,
642 .release = rbd_release,
643 .ioctl = rbd_ioctl,
644 #ifdef CONFIG_COMPAT
645 .compat_ioctl = rbd_compat_ioctl,
646 #endif
647 };
648
649 /*
650 * Initialize an rbd client instance. Success or not, this function
651 * consumes ceph_opts. Caller holds client_mutex.
652 */
653 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
654 {
655 struct rbd_client *rbdc;
656 int ret = -ENOMEM;
657
658 dout("%s:\n", __func__);
659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
660 if (!rbdc)
661 goto out_opt;
662
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
665
666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
667 if (IS_ERR(rbdc->client))
668 goto out_rbdc;
669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
670
671 ret = ceph_open_session(rbdc->client);
672 if (ret < 0)
673 goto out_client;
674
675 spin_lock(&rbd_client_list_lock);
676 list_add_tail(&rbdc->node, &rbd_client_list);
677 spin_unlock(&rbd_client_list_lock);
678
679 dout("%s: rbdc %p\n", __func__, rbdc);
680
681 return rbdc;
682 out_client:
683 ceph_destroy_client(rbdc->client);
684 out_rbdc:
685 kfree(rbdc);
686 out_opt:
687 if (ceph_opts)
688 ceph_destroy_options(ceph_opts);
689 dout("%s: error %d\n", __func__, ret);
690
691 return ERR_PTR(ret);
692 }
693
694 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695 {
696 kref_get(&rbdc->kref);
697
698 return rbdc;
699 }
700
701 /*
702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
704 */
705 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
706 {
707 struct rbd_client *client_node;
708 bool found = false;
709
710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
711 return NULL;
712
713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
716 __rbd_get_client(client_node);
717
718 found = true;
719 break;
720 }
721 }
722 spin_unlock(&rbd_client_list_lock);
723
724 return found ? client_node : NULL;
725 }
726
727 /*
728 * (Per device) rbd map options
729 */
730 enum {
731 Opt_queue_depth,
732 Opt_last_int,
733 /* int args above */
734 Opt_last_string,
735 /* string args above */
736 Opt_read_only,
737 Opt_read_write,
738 Opt_err
739 };
740
741 static match_table_t rbd_opts_tokens = {
742 {Opt_queue_depth, "queue_depth=%d"},
743 /* int args above */
744 /* string args above */
745 {Opt_read_only, "read_only"},
746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
749 {Opt_err, NULL}
750 };
751
752 struct rbd_options {
753 int queue_depth;
754 bool read_only;
755 };
756
757 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
758 #define RBD_READ_ONLY_DEFAULT false
759
760 static int parse_rbd_opts_token(char *c, void *private)
761 {
762 struct rbd_options *rbd_opts = private;
763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
765
766 token = match_token(c, rbd_opts_tokens, argstr);
767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
769 if (ret < 0) {
770 pr_err("bad mount option arg (not int) at '%s'\n", c);
771 return ret;
772 }
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
775 dout("got string token %d val %s\n", token, argstr[0].from);
776 } else {
777 dout("got token %d\n", token);
778 }
779
780 switch (token) {
781 case Opt_queue_depth:
782 if (intval < 1) {
783 pr_err("queue_depth out of range\n");
784 return -EINVAL;
785 }
786 rbd_opts->queue_depth = intval;
787 break;
788 case Opt_read_only:
789 rbd_opts->read_only = true;
790 break;
791 case Opt_read_write:
792 rbd_opts->read_only = false;
793 break;
794 default:
795 /* libceph prints "bad option" msg */
796 return -EINVAL;
797 }
798
799 return 0;
800 }
801
802 static char* obj_op_name(enum obj_operation_type op_type)
803 {
804 switch (op_type) {
805 case OBJ_OP_READ:
806 return "read";
807 case OBJ_OP_WRITE:
808 return "write";
809 case OBJ_OP_DISCARD:
810 return "discard";
811 default:
812 return "???";
813 }
814 }
815
816 /*
817 * Get a ceph client with specific addr and configuration, if one does
818 * not exist create it. Either way, ceph_opts is consumed by this
819 * function.
820 */
821 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
822 {
823 struct rbd_client *rbdc;
824
825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
826 rbdc = rbd_client_find(ceph_opts);
827 if (rbdc) /* using an existing client */
828 ceph_destroy_options(ceph_opts);
829 else
830 rbdc = rbd_client_create(ceph_opts);
831 mutex_unlock(&client_mutex);
832
833 return rbdc;
834 }
835
836 /*
837 * Destroy ceph client
838 *
839 * Caller must hold rbd_client_list_lock.
840 */
841 static void rbd_client_release(struct kref *kref)
842 {
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
844
845 dout("%s: rbdc %p\n", __func__, rbdc);
846 spin_lock(&rbd_client_list_lock);
847 list_del(&rbdc->node);
848 spin_unlock(&rbd_client_list_lock);
849
850 ceph_destroy_client(rbdc->client);
851 kfree(rbdc);
852 }
853
854 /*
855 * Drop reference to ceph client node. If it's not referenced anymore, release
856 * it.
857 */
858 static void rbd_put_client(struct rbd_client *rbdc)
859 {
860 if (rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
862 }
863
864 static bool rbd_image_format_valid(u32 image_format)
865 {
866 return image_format == 1 || image_format == 2;
867 }
868
869 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
870 {
871 size_t size;
872 u32 snap_count;
873
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
876 return false;
877
878 /* The bio layer requires at least sector-sized I/O */
879
880 if (ondisk->options.order < SECTOR_SHIFT)
881 return false;
882
883 /* If we use u64 in a few spots we may be able to loosen this */
884
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 return false;
887
888 /*
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
891 */
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
895 return false;
896
897 /*
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
900 */
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
903 return false;
904
905 return true;
906 }
907
908 /*
909 * Fill an rbd image header with information from the given format 1
910 * on-disk header.
911 */
912 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
913 struct rbd_image_header_ondisk *ondisk)
914 {
915 struct rbd_image_header *header = &rbd_dev->header;
916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
921 u32 snap_count;
922 size_t size;
923 int ret = -ENOMEM;
924 u32 i;
925
926 /* Allocate this now to avoid having to handle failure below */
927
928 if (first_time) {
929 size_t len;
930
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
934 if (!object_prefix)
935 return -ENOMEM;
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
938 }
939
940 /* Allocate the snapshot context and fill it in */
941
942 snap_count = le32_to_cpu(ondisk->snap_count);
943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
944 if (!snapc)
945 goto out_err;
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
947 if (snap_count) {
948 struct rbd_image_snap_ondisk *snaps;
949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
950
951 /* We'll keep a copy of the snapshot names... */
952
953 if (snap_names_len > (u64)SIZE_MAX)
954 goto out_2big;
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 if (!snap_names)
957 goto out_err;
958
959 /* ...as well as the array of their sizes. */
960
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
963 if (!snap_sizes)
964 goto out_err;
965
966 /*
967 * Copy the names, and fill in each snapshot's id
968 * and size.
969 *
970 * Note that rbd_dev_v1_header_info() guarantees the
971 * ondisk buffer we're working with has
972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
974 */
975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 }
981 }
982
983 /* We won't fail any more, fill in the header */
984
985 if (first_time) {
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
994 } else {
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
998 }
999
1000 /* The remaining fields always get updated (when we refresh) */
1001
1002 header->image_size = le64_to_cpu(ondisk->image_size);
1003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
1006
1007 return 0;
1008 out_2big:
1009 ret = -EIO;
1010 out_err:
1011 kfree(snap_sizes);
1012 kfree(snap_names);
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
1015
1016 return ret;
1017 }
1018
1019 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1020 {
1021 const char *snap_name;
1022
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1024
1025 /* Skip over names until we find the one we are looking for */
1026
1027 snap_name = rbd_dev->header.snap_names;
1028 while (which--)
1029 snap_name += strlen(snap_name) + 1;
1030
1031 return kstrdup(snap_name, GFP_KERNEL);
1032 }
1033
1034 /*
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1037 */
1038 static int snapid_compare_reverse(const void *s1, const void *s2)
1039 {
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1042
1043 if (snap_id1 < snap_id2)
1044 return 1;
1045 return snap_id1 == snap_id2 ? 0 : -1;
1046 }
1047
1048 /*
1049 * Search a snapshot context to see if the given snapshot id is
1050 * present.
1051 *
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1054 *
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1057 */
1058 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1059 {
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1061 u64 *found;
1062
1063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
1065
1066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1067 }
1068
1069 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 u64 snap_id)
1071 {
1072 u32 which;
1073 const char *snap_name;
1074
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
1077 return ERR_PTR(-ENOENT);
1078
1079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1081 }
1082
1083 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1084 {
1085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1087
1088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1091
1092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1093 }
1094
1095 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1096 u64 *snap_size)
1097 {
1098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1102 u32 which;
1103
1104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1106 return -ENOENT;
1107
1108 *snap_size = rbd_dev->header.snap_sizes[which];
1109 } else {
1110 u64 size = 0;
1111 int ret;
1112
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 if (ret)
1115 return ret;
1116
1117 *snap_size = size;
1118 }
1119 return 0;
1120 }
1121
1122 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1123 u64 *snap_features)
1124 {
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1130 } else {
1131 u64 features = 0;
1132 int ret;
1133
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 if (ret)
1136 return ret;
1137
1138 *snap_features = features;
1139 }
1140 return 0;
1141 }
1142
1143 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1144 {
1145 u64 snap_id = rbd_dev->spec->snap_id;
1146 u64 size = 0;
1147 u64 features = 0;
1148 int ret;
1149
1150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1151 if (ret)
1152 return ret;
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 if (ret)
1155 return ret;
1156
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1159
1160 return 0;
1161 }
1162
1163 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1164 {
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
1167 }
1168
1169 static void rbd_segment_name_free(const char *name)
1170 {
1171 /* The explicit cast here is needed to drop the const qualifier */
1172
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1174 }
1175
1176 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1177 {
1178 char *name;
1179 u64 segment;
1180 int ret;
1181 char *name_format;
1182
1183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1184 if (!name)
1185 return NULL;
1186 segment = offset >> rbd_dev->header.obj_order;
1187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
1190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1191 rbd_dev->header.object_prefix, segment);
1192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1193 pr_err("error formatting segment name for #%llu (%d)\n",
1194 segment, ret);
1195 rbd_segment_name_free(name);
1196 name = NULL;
1197 }
1198
1199 return name;
1200 }
1201
1202 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1203 {
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1205
1206 return offset & (segment_size - 1);
1207 }
1208
1209 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1211 {
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1213
1214 offset &= segment_size - 1;
1215
1216 rbd_assert(length <= U64_MAX - offset);
1217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1219
1220 return length;
1221 }
1222
1223 /*
1224 * returns the size of an object in the image
1225 */
1226 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1227 {
1228 return 1 << header->obj_order;
1229 }
1230
1231 /*
1232 * bio helpers
1233 */
1234
1235 static void bio_chain_put(struct bio *chain)
1236 {
1237 struct bio *tmp;
1238
1239 while (chain) {
1240 tmp = chain;
1241 chain = chain->bi_next;
1242 bio_put(tmp);
1243 }
1244 }
1245
1246 /*
1247 * zeros a bio chain, starting at specific offset
1248 */
1249 static void zero_bio_chain(struct bio *chain, int start_ofs)
1250 {
1251 struct bio_vec bv;
1252 struct bvec_iter iter;
1253 unsigned long flags;
1254 void *buf;
1255 int pos = 0;
1256
1257 while (chain) {
1258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
1260 int remainder = max(start_ofs - pos, 0);
1261 buf = bvec_kmap_irq(&bv, &flags);
1262 memset(buf + remainder, 0,
1263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
1265 bvec_kunmap_irq(buf, &flags);
1266 }
1267 pos += bv.bv_len;
1268 }
1269
1270 chain = chain->bi_next;
1271 }
1272 }
1273
1274 /*
1275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1279 */
1280 static void zero_pages(struct page **pages, u64 offset, u64 end)
1281 {
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1283
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1287 size_t page_offset;
1288 size_t length;
1289 unsigned long flags;
1290 void *kaddr;
1291
1292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
1297 flush_dcache_page(*page);
1298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1300
1301 offset += length;
1302 page++;
1303 }
1304 }
1305
1306 /*
1307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
1309 */
1310 static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1312 unsigned int len,
1313 gfp_t gfpmask)
1314 {
1315 struct bio *bio;
1316
1317 bio = bio_clone(bio_src, gfpmask);
1318 if (!bio)
1319 return NULL; /* ENOMEM */
1320
1321 bio_advance(bio, offset);
1322 bio->bi_iter.bi_size = len;
1323
1324 return bio;
1325 }
1326
1327 /*
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1332 *
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1336 *
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1340 */
1341 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1343 unsigned int len,
1344 gfp_t gfpmask)
1345 {
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1349 struct bio **end;
1350
1351 /* Build up a chain of clone bios up to the limit */
1352
1353 if (!bi || off >= bi->bi_iter.bi_size || !len)
1354 return NULL; /* Nothing to clone */
1355
1356 end = &chain;
1357 while (len) {
1358 unsigned int bi_size;
1359 struct bio *bio;
1360
1361 if (!bi) {
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1363 goto out_err; /* EINVAL; ran out of bio's */
1364 }
1365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1367 if (!bio)
1368 goto out_err; /* ENOMEM */
1369
1370 *end = bio;
1371 end = &bio->bi_next;
1372
1373 off += bi_size;
1374 if (off == bi->bi_iter.bi_size) {
1375 bi = bi->bi_next;
1376 off = 0;
1377 }
1378 len -= bi_size;
1379 }
1380 *bio_src = bi;
1381 *offset = off;
1382
1383 return chain;
1384 out_err:
1385 bio_chain_put(chain);
1386
1387 return NULL;
1388 }
1389
1390 /*
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1393 * again.
1394 */
1395 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396 {
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1398 struct rbd_device *rbd_dev;
1399
1400 rbd_dev = obj_request->img_request->rbd_dev;
1401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1402 obj_request);
1403 }
1404 }
1405
1406 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407 {
1408 smp_mb();
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410 }
1411
1412 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413 {
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1416
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
1419 rbd_warn(rbd_dev, "obj_request %p already marked done",
1420 obj_request);
1421 }
1422 }
1423
1424 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425 {
1426 smp_mb();
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428 }
1429
1430 /*
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1433 *
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1439 */
1440 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 bool exists)
1442 {
1443 if (exists)
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 smp_mb();
1447 }
1448
1449 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1450 {
1451 smp_mb();
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1453 }
1454
1455 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1456 {
1457 smp_mb();
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1459 }
1460
1461 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1462 {
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1464
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1467 }
1468
1469 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1470 {
1471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
1473 kref_get(&obj_request->kref);
1474 }
1475
1476 static void rbd_obj_request_destroy(struct kref *kref);
1477 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1478 {
1479 rbd_assert(obj_request != NULL);
1480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
1482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1483 }
1484
1485 static void rbd_img_request_get(struct rbd_img_request *img_request)
1486 {
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1490 }
1491
1492 static bool img_request_child_test(struct rbd_img_request *img_request);
1493 static void rbd_parent_request_destroy(struct kref *kref);
1494 static void rbd_img_request_destroy(struct kref *kref);
1495 static void rbd_img_request_put(struct rbd_img_request *img_request)
1496 {
1497 rbd_assert(img_request != NULL);
1498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
1500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1502 else
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
1504 }
1505
1506 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1508 {
1509 rbd_assert(obj_request->img_request == NULL);
1510
1511 /* Image request now owns object's original reference */
1512 obj_request->img_request = img_request;
1513 obj_request->which = img_request->obj_request_count;
1514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
1516 rbd_assert(obj_request->which != BAD_WHICH);
1517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
1519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
1521 }
1522
1523 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1525 {
1526 rbd_assert(obj_request->which != BAD_WHICH);
1527
1528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
1530 list_del(&obj_request->links);
1531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
1535 rbd_assert(obj_request_img_data_test(obj_request));
1536 rbd_assert(obj_request->img_request == img_request);
1537 obj_request->img_request = NULL;
1538 obj_request->callback = NULL;
1539 rbd_obj_request_put(obj_request);
1540 }
1541
1542 static bool obj_request_type_valid(enum obj_request_type type)
1543 {
1544 switch (type) {
1545 case OBJ_REQUEST_NODATA:
1546 case OBJ_REQUEST_BIO:
1547 case OBJ_REQUEST_PAGES:
1548 return true;
1549 default:
1550 return false;
1551 }
1552 }
1553
1554 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1556 {
1557 dout("%s %p\n", __func__, obj_request);
1558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1559 }
1560
1561 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1562 {
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1565 }
1566
1567 /*
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
1570 *
1571 * @timeout: in jiffies, 0 means "wait forever"
1572 */
1573 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
1575 {
1576 long ret;
1577
1578 dout("%s %p\n", __func__, obj_request);
1579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1582 if (ret <= 0) {
1583 if (ret == 0)
1584 ret = -ETIMEDOUT;
1585 rbd_obj_request_end(obj_request);
1586 } else {
1587 ret = 0;
1588 }
1589
1590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1591 return ret;
1592 }
1593
1594 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1595 {
1596 return __rbd_obj_request_wait(obj_request, 0);
1597 }
1598
1599 static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1600 unsigned long timeout)
1601 {
1602 return __rbd_obj_request_wait(obj_request, timeout);
1603 }
1604
1605 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1606 {
1607
1608 dout("%s: img %p\n", __func__, img_request);
1609
1610 /*
1611 * If no error occurred, compute the aggregate transfer
1612 * count for the image request. We could instead use
1613 * atomic64_cmpxchg() to update it as each object request
1614 * completes; not clear which way is better off hand.
1615 */
1616 if (!img_request->result) {
1617 struct rbd_obj_request *obj_request;
1618 u64 xferred = 0;
1619
1620 for_each_obj_request(img_request, obj_request)
1621 xferred += obj_request->xferred;
1622 img_request->xferred = xferred;
1623 }
1624
1625 if (img_request->callback)
1626 img_request->callback(img_request);
1627 else
1628 rbd_img_request_put(img_request);
1629 }
1630
1631 /*
1632 * The default/initial value for all image request flags is 0. Each
1633 * is conditionally set to 1 at image request initialization time
1634 * and currently never change thereafter.
1635 */
1636 static void img_request_write_set(struct rbd_img_request *img_request)
1637 {
1638 set_bit(IMG_REQ_WRITE, &img_request->flags);
1639 smp_mb();
1640 }
1641
1642 static bool img_request_write_test(struct rbd_img_request *img_request)
1643 {
1644 smp_mb();
1645 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1646 }
1647
1648 /*
1649 * Set the discard flag when the img_request is an discard request
1650 */
1651 static void img_request_discard_set(struct rbd_img_request *img_request)
1652 {
1653 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1654 smp_mb();
1655 }
1656
1657 static bool img_request_discard_test(struct rbd_img_request *img_request)
1658 {
1659 smp_mb();
1660 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1661 }
1662
1663 static void img_request_child_set(struct rbd_img_request *img_request)
1664 {
1665 set_bit(IMG_REQ_CHILD, &img_request->flags);
1666 smp_mb();
1667 }
1668
1669 static void img_request_child_clear(struct rbd_img_request *img_request)
1670 {
1671 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1672 smp_mb();
1673 }
1674
1675 static bool img_request_child_test(struct rbd_img_request *img_request)
1676 {
1677 smp_mb();
1678 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1679 }
1680
1681 static void img_request_layered_set(struct rbd_img_request *img_request)
1682 {
1683 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1684 smp_mb();
1685 }
1686
1687 static void img_request_layered_clear(struct rbd_img_request *img_request)
1688 {
1689 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1690 smp_mb();
1691 }
1692
1693 static bool img_request_layered_test(struct rbd_img_request *img_request)
1694 {
1695 smp_mb();
1696 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1697 }
1698
1699 static enum obj_operation_type
1700 rbd_img_request_op_type(struct rbd_img_request *img_request)
1701 {
1702 if (img_request_write_test(img_request))
1703 return OBJ_OP_WRITE;
1704 else if (img_request_discard_test(img_request))
1705 return OBJ_OP_DISCARD;
1706 else
1707 return OBJ_OP_READ;
1708 }
1709
1710 static void
1711 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1712 {
1713 u64 xferred = obj_request->xferred;
1714 u64 length = obj_request->length;
1715
1716 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1717 obj_request, obj_request->img_request, obj_request->result,
1718 xferred, length);
1719 /*
1720 * ENOENT means a hole in the image. We zero-fill the entire
1721 * length of the request. A short read also implies zero-fill
1722 * to the end of the request. An error requires the whole
1723 * length of the request to be reported finished with an error
1724 * to the block layer. In each case we update the xferred
1725 * count to indicate the whole request was satisfied.
1726 */
1727 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1728 if (obj_request->result == -ENOENT) {
1729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, 0);
1731 else
1732 zero_pages(obj_request->pages, 0, length);
1733 obj_request->result = 0;
1734 } else if (xferred < length && !obj_request->result) {
1735 if (obj_request->type == OBJ_REQUEST_BIO)
1736 zero_bio_chain(obj_request->bio_list, xferred);
1737 else
1738 zero_pages(obj_request->pages, xferred, length);
1739 }
1740 obj_request->xferred = length;
1741 obj_request_done_set(obj_request);
1742 }
1743
1744 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1745 {
1746 dout("%s: obj %p cb %p\n", __func__, obj_request,
1747 obj_request->callback);
1748 if (obj_request->callback)
1749 obj_request->callback(obj_request);
1750 else
1751 complete_all(&obj_request->completion);
1752 }
1753
1754 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1755 {
1756 dout("%s: obj %p\n", __func__, obj_request);
1757 obj_request_done_set(obj_request);
1758 }
1759
1760 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1761 {
1762 struct rbd_img_request *img_request = NULL;
1763 struct rbd_device *rbd_dev = NULL;
1764 bool layered = false;
1765
1766 if (obj_request_img_data_test(obj_request)) {
1767 img_request = obj_request->img_request;
1768 layered = img_request && img_request_layered_test(img_request);
1769 rbd_dev = img_request->rbd_dev;
1770 }
1771
1772 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1773 obj_request, img_request, obj_request->result,
1774 obj_request->xferred, obj_request->length);
1775 if (layered && obj_request->result == -ENOENT &&
1776 obj_request->img_offset < rbd_dev->parent_overlap)
1777 rbd_img_parent_read(obj_request);
1778 else if (img_request)
1779 rbd_img_obj_request_read_callback(obj_request);
1780 else
1781 obj_request_done_set(obj_request);
1782 }
1783
1784 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1785 {
1786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1788 /*
1789 * There is no such thing as a successful short write. Set
1790 * it to our originally-requested length.
1791 */
1792 obj_request->xferred = obj_request->length;
1793 obj_request_done_set(obj_request);
1794 }
1795
1796 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1797 {
1798 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1799 obj_request->result, obj_request->length);
1800 /*
1801 * There is no such thing as a successful short discard. Set
1802 * it to our originally-requested length.
1803 */
1804 obj_request->xferred = obj_request->length;
1805 /* discarding a non-existent object is not a problem */
1806 if (obj_request->result == -ENOENT)
1807 obj_request->result = 0;
1808 obj_request_done_set(obj_request);
1809 }
1810
1811 /*
1812 * For a simple stat call there's nothing to do. We'll do more if
1813 * this is part of a write sequence for a layered image.
1814 */
1815 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1816 {
1817 dout("%s: obj %p\n", __func__, obj_request);
1818 obj_request_done_set(obj_request);
1819 }
1820
1821 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1822 {
1823 dout("%s: obj %p\n", __func__, obj_request);
1824
1825 if (obj_request_img_data_test(obj_request))
1826 rbd_osd_copyup_callback(obj_request);
1827 else
1828 obj_request_done_set(obj_request);
1829 }
1830
1831 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1832 {
1833 struct rbd_obj_request *obj_request = osd_req->r_priv;
1834 u16 opcode;
1835
1836 dout("%s: osd_req %p\n", __func__, osd_req);
1837 rbd_assert(osd_req == obj_request->osd_req);
1838 if (obj_request_img_data_test(obj_request)) {
1839 rbd_assert(obj_request->img_request);
1840 rbd_assert(obj_request->which != BAD_WHICH);
1841 } else {
1842 rbd_assert(obj_request->which == BAD_WHICH);
1843 }
1844
1845 if (osd_req->r_result < 0)
1846 obj_request->result = osd_req->r_result;
1847
1848 /*
1849 * We support a 64-bit length, but ultimately it has to be
1850 * passed to the block layer, which just supports a 32-bit
1851 * length field.
1852 */
1853 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1854 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1855
1856 opcode = osd_req->r_ops[0].op;
1857 switch (opcode) {
1858 case CEPH_OSD_OP_READ:
1859 rbd_osd_read_callback(obj_request);
1860 break;
1861 case CEPH_OSD_OP_SETALLOCHINT:
1862 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1863 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1864 /* fall through */
1865 case CEPH_OSD_OP_WRITE:
1866 case CEPH_OSD_OP_WRITEFULL:
1867 rbd_osd_write_callback(obj_request);
1868 break;
1869 case CEPH_OSD_OP_STAT:
1870 rbd_osd_stat_callback(obj_request);
1871 break;
1872 case CEPH_OSD_OP_DELETE:
1873 case CEPH_OSD_OP_TRUNCATE:
1874 case CEPH_OSD_OP_ZERO:
1875 rbd_osd_discard_callback(obj_request);
1876 break;
1877 case CEPH_OSD_OP_CALL:
1878 rbd_osd_call_callback(obj_request);
1879 break;
1880 case CEPH_OSD_OP_NOTIFY_ACK:
1881 case CEPH_OSD_OP_WATCH:
1882 rbd_osd_trivial_callback(obj_request);
1883 break;
1884 default:
1885 rbd_warn(NULL, "%s: unsupported op %hu",
1886 obj_request->object_name, (unsigned short) opcode);
1887 break;
1888 }
1889
1890 if (obj_request_done_test(obj_request))
1891 rbd_obj_request_complete(obj_request);
1892 }
1893
1894 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1895 {
1896 struct rbd_img_request *img_request = obj_request->img_request;
1897 struct ceph_osd_request *osd_req = obj_request->osd_req;
1898
1899 if (img_request)
1900 osd_req->r_snapid = img_request->snap_id;
1901 }
1902
1903 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1904 {
1905 struct ceph_osd_request *osd_req = obj_request->osd_req;
1906
1907 osd_req->r_mtime = CURRENT_TIME;
1908 osd_req->r_data_offset = obj_request->offset;
1909 }
1910
1911 /*
1912 * Create an osd request. A read request has one osd op (read).
1913 * A write request has either one (watch) or two (hint+write) osd ops.
1914 * (All rbd data writes are prefixed with an allocation hint op, but
1915 * technically osd watch is a write request, hence this distinction.)
1916 */
1917 static struct ceph_osd_request *rbd_osd_req_create(
1918 struct rbd_device *rbd_dev,
1919 enum obj_operation_type op_type,
1920 unsigned int num_ops,
1921 struct rbd_obj_request *obj_request)
1922 {
1923 struct ceph_snap_context *snapc = NULL;
1924 struct ceph_osd_client *osdc;
1925 struct ceph_osd_request *osd_req;
1926
1927 if (obj_request_img_data_test(obj_request) &&
1928 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1929 struct rbd_img_request *img_request = obj_request->img_request;
1930 if (op_type == OBJ_OP_WRITE) {
1931 rbd_assert(img_request_write_test(img_request));
1932 } else {
1933 rbd_assert(img_request_discard_test(img_request));
1934 }
1935 snapc = img_request->snapc;
1936 }
1937
1938 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1939
1940 /* Allocate and initialize the request, for the num_ops ops */
1941
1942 osdc = &rbd_dev->rbd_client->client->osdc;
1943 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1944 GFP_NOIO);
1945 if (!osd_req)
1946 goto fail;
1947
1948 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1949 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1950 else
1951 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1952
1953 osd_req->r_callback = rbd_osd_req_callback;
1954 osd_req->r_priv = obj_request;
1955
1956 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1957 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
1958 obj_request->object_name))
1959 goto fail;
1960
1961 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
1962 goto fail;
1963
1964 return osd_req;
1965
1966 fail:
1967 ceph_osdc_put_request(osd_req);
1968 return NULL;
1969 }
1970
1971 /*
1972 * Create a copyup osd request based on the information in the object
1973 * request supplied. A copyup request has two or three osd ops, a
1974 * copyup method call, potentially a hint op, and a write or truncate
1975 * or zero op.
1976 */
1977 static struct ceph_osd_request *
1978 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1979 {
1980 struct rbd_img_request *img_request;
1981 struct ceph_snap_context *snapc;
1982 struct rbd_device *rbd_dev;
1983 struct ceph_osd_client *osdc;
1984 struct ceph_osd_request *osd_req;
1985 int num_osd_ops = 3;
1986
1987 rbd_assert(obj_request_img_data_test(obj_request));
1988 img_request = obj_request->img_request;
1989 rbd_assert(img_request);
1990 rbd_assert(img_request_write_test(img_request) ||
1991 img_request_discard_test(img_request));
1992
1993 if (img_request_discard_test(img_request))
1994 num_osd_ops = 2;
1995
1996 /* Allocate and initialize the request, for all the ops */
1997
1998 snapc = img_request->snapc;
1999 rbd_dev = img_request->rbd_dev;
2000 osdc = &rbd_dev->rbd_client->client->osdc;
2001 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2002 false, GFP_NOIO);
2003 if (!osd_req)
2004 goto fail;
2005
2006 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2007 osd_req->r_callback = rbd_osd_req_callback;
2008 osd_req->r_priv = obj_request;
2009
2010 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2011 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2012 obj_request->object_name))
2013 goto fail;
2014
2015 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2016 goto fail;
2017
2018 return osd_req;
2019
2020 fail:
2021 ceph_osdc_put_request(osd_req);
2022 return NULL;
2023 }
2024
2025
2026 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2027 {
2028 ceph_osdc_put_request(osd_req);
2029 }
2030
2031 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2032
2033 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2034 u64 offset, u64 length,
2035 enum obj_request_type type)
2036 {
2037 struct rbd_obj_request *obj_request;
2038 size_t size;
2039 char *name;
2040
2041 rbd_assert(obj_request_type_valid(type));
2042
2043 size = strlen(object_name) + 1;
2044 name = kmalloc(size, GFP_NOIO);
2045 if (!name)
2046 return NULL;
2047
2048 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2049 if (!obj_request) {
2050 kfree(name);
2051 return NULL;
2052 }
2053
2054 obj_request->object_name = memcpy(name, object_name, size);
2055 obj_request->offset = offset;
2056 obj_request->length = length;
2057 obj_request->flags = 0;
2058 obj_request->which = BAD_WHICH;
2059 obj_request->type = type;
2060 INIT_LIST_HEAD(&obj_request->links);
2061 init_completion(&obj_request->completion);
2062 kref_init(&obj_request->kref);
2063
2064 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2065 offset, length, (int)type, obj_request);
2066
2067 return obj_request;
2068 }
2069
2070 static void rbd_obj_request_destroy(struct kref *kref)
2071 {
2072 struct rbd_obj_request *obj_request;
2073
2074 obj_request = container_of(kref, struct rbd_obj_request, kref);
2075
2076 dout("%s: obj %p\n", __func__, obj_request);
2077
2078 rbd_assert(obj_request->img_request == NULL);
2079 rbd_assert(obj_request->which == BAD_WHICH);
2080
2081 if (obj_request->osd_req)
2082 rbd_osd_req_destroy(obj_request->osd_req);
2083
2084 rbd_assert(obj_request_type_valid(obj_request->type));
2085 switch (obj_request->type) {
2086 case OBJ_REQUEST_NODATA:
2087 break; /* Nothing to do */
2088 case OBJ_REQUEST_BIO:
2089 if (obj_request->bio_list)
2090 bio_chain_put(obj_request->bio_list);
2091 break;
2092 case OBJ_REQUEST_PAGES:
2093 if (obj_request->pages)
2094 ceph_release_page_vector(obj_request->pages,
2095 obj_request->page_count);
2096 break;
2097 }
2098
2099 kfree(obj_request->object_name);
2100 obj_request->object_name = NULL;
2101 kmem_cache_free(rbd_obj_request_cache, obj_request);
2102 }
2103
2104 /* It's OK to call this for a device with no parent */
2105
2106 static void rbd_spec_put(struct rbd_spec *spec);
2107 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2108 {
2109 rbd_dev_remove_parent(rbd_dev);
2110 rbd_spec_put(rbd_dev->parent_spec);
2111 rbd_dev->parent_spec = NULL;
2112 rbd_dev->parent_overlap = 0;
2113 }
2114
2115 /*
2116 * Parent image reference counting is used to determine when an
2117 * image's parent fields can be safely torn down--after there are no
2118 * more in-flight requests to the parent image. When the last
2119 * reference is dropped, cleaning them up is safe.
2120 */
2121 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2122 {
2123 int counter;
2124
2125 if (!rbd_dev->parent_spec)
2126 return;
2127
2128 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2129 if (counter > 0)
2130 return;
2131
2132 /* Last reference; clean up parent data structures */
2133
2134 if (!counter)
2135 rbd_dev_unparent(rbd_dev);
2136 else
2137 rbd_warn(rbd_dev, "parent reference underflow");
2138 }
2139
2140 /*
2141 * If an image has a non-zero parent overlap, get a reference to its
2142 * parent.
2143 *
2144 * Returns true if the rbd device has a parent with a non-zero
2145 * overlap and a reference for it was successfully taken, or
2146 * false otherwise.
2147 */
2148 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2149 {
2150 int counter = 0;
2151
2152 if (!rbd_dev->parent_spec)
2153 return false;
2154
2155 down_read(&rbd_dev->header_rwsem);
2156 if (rbd_dev->parent_overlap)
2157 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2158 up_read(&rbd_dev->header_rwsem);
2159
2160 if (counter < 0)
2161 rbd_warn(rbd_dev, "parent reference overflow");
2162
2163 return counter > 0;
2164 }
2165
2166 /*
2167 * Caller is responsible for filling in the list of object requests
2168 * that comprises the image request, and the Linux request pointer
2169 * (if there is one).
2170 */
2171 static struct rbd_img_request *rbd_img_request_create(
2172 struct rbd_device *rbd_dev,
2173 u64 offset, u64 length,
2174 enum obj_operation_type op_type,
2175 struct ceph_snap_context *snapc)
2176 {
2177 struct rbd_img_request *img_request;
2178
2179 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2180 if (!img_request)
2181 return NULL;
2182
2183 img_request->rq = NULL;
2184 img_request->rbd_dev = rbd_dev;
2185 img_request->offset = offset;
2186 img_request->length = length;
2187 img_request->flags = 0;
2188 if (op_type == OBJ_OP_DISCARD) {
2189 img_request_discard_set(img_request);
2190 img_request->snapc = snapc;
2191 } else if (op_type == OBJ_OP_WRITE) {
2192 img_request_write_set(img_request);
2193 img_request->snapc = snapc;
2194 } else {
2195 img_request->snap_id = rbd_dev->spec->snap_id;
2196 }
2197 if (rbd_dev_parent_get(rbd_dev))
2198 img_request_layered_set(img_request);
2199 spin_lock_init(&img_request->completion_lock);
2200 img_request->next_completion = 0;
2201 img_request->callback = NULL;
2202 img_request->result = 0;
2203 img_request->obj_request_count = 0;
2204 INIT_LIST_HEAD(&img_request->obj_requests);
2205 kref_init(&img_request->kref);
2206
2207 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2208 obj_op_name(op_type), offset, length, img_request);
2209
2210 return img_request;
2211 }
2212
2213 static void rbd_img_request_destroy(struct kref *kref)
2214 {
2215 struct rbd_img_request *img_request;
2216 struct rbd_obj_request *obj_request;
2217 struct rbd_obj_request *next_obj_request;
2218
2219 img_request = container_of(kref, struct rbd_img_request, kref);
2220
2221 dout("%s: img %p\n", __func__, img_request);
2222
2223 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2224 rbd_img_obj_request_del(img_request, obj_request);
2225 rbd_assert(img_request->obj_request_count == 0);
2226
2227 if (img_request_layered_test(img_request)) {
2228 img_request_layered_clear(img_request);
2229 rbd_dev_parent_put(img_request->rbd_dev);
2230 }
2231
2232 if (img_request_write_test(img_request) ||
2233 img_request_discard_test(img_request))
2234 ceph_put_snap_context(img_request->snapc);
2235
2236 kmem_cache_free(rbd_img_request_cache, img_request);
2237 }
2238
2239 static struct rbd_img_request *rbd_parent_request_create(
2240 struct rbd_obj_request *obj_request,
2241 u64 img_offset, u64 length)
2242 {
2243 struct rbd_img_request *parent_request;
2244 struct rbd_device *rbd_dev;
2245
2246 rbd_assert(obj_request->img_request);
2247 rbd_dev = obj_request->img_request->rbd_dev;
2248
2249 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2250 length, OBJ_OP_READ, NULL);
2251 if (!parent_request)
2252 return NULL;
2253
2254 img_request_child_set(parent_request);
2255 rbd_obj_request_get(obj_request);
2256 parent_request->obj_request = obj_request;
2257
2258 return parent_request;
2259 }
2260
2261 static void rbd_parent_request_destroy(struct kref *kref)
2262 {
2263 struct rbd_img_request *parent_request;
2264 struct rbd_obj_request *orig_request;
2265
2266 parent_request = container_of(kref, struct rbd_img_request, kref);
2267 orig_request = parent_request->obj_request;
2268
2269 parent_request->obj_request = NULL;
2270 rbd_obj_request_put(orig_request);
2271 img_request_child_clear(parent_request);
2272
2273 rbd_img_request_destroy(kref);
2274 }
2275
2276 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2277 {
2278 struct rbd_img_request *img_request;
2279 unsigned int xferred;
2280 int result;
2281 bool more;
2282
2283 rbd_assert(obj_request_img_data_test(obj_request));
2284 img_request = obj_request->img_request;
2285
2286 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2287 xferred = (unsigned int)obj_request->xferred;
2288 result = obj_request->result;
2289 if (result) {
2290 struct rbd_device *rbd_dev = img_request->rbd_dev;
2291 enum obj_operation_type op_type;
2292
2293 if (img_request_discard_test(img_request))
2294 op_type = OBJ_OP_DISCARD;
2295 else if (img_request_write_test(img_request))
2296 op_type = OBJ_OP_WRITE;
2297 else
2298 op_type = OBJ_OP_READ;
2299
2300 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2301 obj_op_name(op_type), obj_request->length,
2302 obj_request->img_offset, obj_request->offset);
2303 rbd_warn(rbd_dev, " result %d xferred %x",
2304 result, xferred);
2305 if (!img_request->result)
2306 img_request->result = result;
2307 /*
2308 * Need to end I/O on the entire obj_request worth of
2309 * bytes in case of error.
2310 */
2311 xferred = obj_request->length;
2312 }
2313
2314 /* Image object requests don't own their page array */
2315
2316 if (obj_request->type == OBJ_REQUEST_PAGES) {
2317 obj_request->pages = NULL;
2318 obj_request->page_count = 0;
2319 }
2320
2321 if (img_request_child_test(img_request)) {
2322 rbd_assert(img_request->obj_request != NULL);
2323 more = obj_request->which < img_request->obj_request_count - 1;
2324 } else {
2325 rbd_assert(img_request->rq != NULL);
2326
2327 more = blk_update_request(img_request->rq, result, xferred);
2328 if (!more)
2329 __blk_mq_end_request(img_request->rq, result);
2330 }
2331
2332 return more;
2333 }
2334
2335 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2336 {
2337 struct rbd_img_request *img_request;
2338 u32 which = obj_request->which;
2339 bool more = true;
2340
2341 rbd_assert(obj_request_img_data_test(obj_request));
2342 img_request = obj_request->img_request;
2343
2344 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2345 rbd_assert(img_request != NULL);
2346 rbd_assert(img_request->obj_request_count > 0);
2347 rbd_assert(which != BAD_WHICH);
2348 rbd_assert(which < img_request->obj_request_count);
2349
2350 spin_lock_irq(&img_request->completion_lock);
2351 if (which != img_request->next_completion)
2352 goto out;
2353
2354 for_each_obj_request_from(img_request, obj_request) {
2355 rbd_assert(more);
2356 rbd_assert(which < img_request->obj_request_count);
2357
2358 if (!obj_request_done_test(obj_request))
2359 break;
2360 more = rbd_img_obj_end_request(obj_request);
2361 which++;
2362 }
2363
2364 rbd_assert(more ^ (which == img_request->obj_request_count));
2365 img_request->next_completion = which;
2366 out:
2367 spin_unlock_irq(&img_request->completion_lock);
2368 rbd_img_request_put(img_request);
2369
2370 if (!more)
2371 rbd_img_request_complete(img_request);
2372 }
2373
2374 /*
2375 * Add individual osd ops to the given ceph_osd_request and prepare
2376 * them for submission. num_ops is the current number of
2377 * osd operations already to the object request.
2378 */
2379 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2380 struct ceph_osd_request *osd_request,
2381 enum obj_operation_type op_type,
2382 unsigned int num_ops)
2383 {
2384 struct rbd_img_request *img_request = obj_request->img_request;
2385 struct rbd_device *rbd_dev = img_request->rbd_dev;
2386 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2387 u64 offset = obj_request->offset;
2388 u64 length = obj_request->length;
2389 u64 img_end;
2390 u16 opcode;
2391
2392 if (op_type == OBJ_OP_DISCARD) {
2393 if (!offset && length == object_size &&
2394 (!img_request_layered_test(img_request) ||
2395 !obj_request_overlaps_parent(obj_request))) {
2396 opcode = CEPH_OSD_OP_DELETE;
2397 } else if ((offset + length == object_size)) {
2398 opcode = CEPH_OSD_OP_TRUNCATE;
2399 } else {
2400 down_read(&rbd_dev->header_rwsem);
2401 img_end = rbd_dev->header.image_size;
2402 up_read(&rbd_dev->header_rwsem);
2403
2404 if (obj_request->img_offset + length == img_end)
2405 opcode = CEPH_OSD_OP_TRUNCATE;
2406 else
2407 opcode = CEPH_OSD_OP_ZERO;
2408 }
2409 } else if (op_type == OBJ_OP_WRITE) {
2410 if (!offset && length == object_size)
2411 opcode = CEPH_OSD_OP_WRITEFULL;
2412 else
2413 opcode = CEPH_OSD_OP_WRITE;
2414 osd_req_op_alloc_hint_init(osd_request, num_ops,
2415 object_size, object_size);
2416 num_ops++;
2417 } else {
2418 opcode = CEPH_OSD_OP_READ;
2419 }
2420
2421 if (opcode == CEPH_OSD_OP_DELETE)
2422 osd_req_op_init(osd_request, num_ops, opcode, 0);
2423 else
2424 osd_req_op_extent_init(osd_request, num_ops, opcode,
2425 offset, length, 0, 0);
2426
2427 if (obj_request->type == OBJ_REQUEST_BIO)
2428 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2429 obj_request->bio_list, length);
2430 else if (obj_request->type == OBJ_REQUEST_PAGES)
2431 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2432 obj_request->pages, length,
2433 offset & ~PAGE_MASK, false, false);
2434
2435 /* Discards are also writes */
2436 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2437 rbd_osd_req_format_write(obj_request);
2438 else
2439 rbd_osd_req_format_read(obj_request);
2440 }
2441
2442 /*
2443 * Split up an image request into one or more object requests, each
2444 * to a different object. The "type" parameter indicates whether
2445 * "data_desc" is the pointer to the head of a list of bio
2446 * structures, or the base of a page array. In either case this
2447 * function assumes data_desc describes memory sufficient to hold
2448 * all data described by the image request.
2449 */
2450 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2451 enum obj_request_type type,
2452 void *data_desc)
2453 {
2454 struct rbd_device *rbd_dev = img_request->rbd_dev;
2455 struct rbd_obj_request *obj_request = NULL;
2456 struct rbd_obj_request *next_obj_request;
2457 struct bio *bio_list = NULL;
2458 unsigned int bio_offset = 0;
2459 struct page **pages = NULL;
2460 enum obj_operation_type op_type;
2461 u64 img_offset;
2462 u64 resid;
2463
2464 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2465 (int)type, data_desc);
2466
2467 img_offset = img_request->offset;
2468 resid = img_request->length;
2469 rbd_assert(resid > 0);
2470 op_type = rbd_img_request_op_type(img_request);
2471
2472 if (type == OBJ_REQUEST_BIO) {
2473 bio_list = data_desc;
2474 rbd_assert(img_offset ==
2475 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2476 } else if (type == OBJ_REQUEST_PAGES) {
2477 pages = data_desc;
2478 }
2479
2480 while (resid) {
2481 struct ceph_osd_request *osd_req;
2482 const char *object_name;
2483 u64 offset;
2484 u64 length;
2485
2486 object_name = rbd_segment_name(rbd_dev, img_offset);
2487 if (!object_name)
2488 goto out_unwind;
2489 offset = rbd_segment_offset(rbd_dev, img_offset);
2490 length = rbd_segment_length(rbd_dev, img_offset, resid);
2491 obj_request = rbd_obj_request_create(object_name,
2492 offset, length, type);
2493 /* object request has its own copy of the object name */
2494 rbd_segment_name_free(object_name);
2495 if (!obj_request)
2496 goto out_unwind;
2497
2498 /*
2499 * set obj_request->img_request before creating the
2500 * osd_request so that it gets the right snapc
2501 */
2502 rbd_img_obj_request_add(img_request, obj_request);
2503
2504 if (type == OBJ_REQUEST_BIO) {
2505 unsigned int clone_size;
2506
2507 rbd_assert(length <= (u64)UINT_MAX);
2508 clone_size = (unsigned int)length;
2509 obj_request->bio_list =
2510 bio_chain_clone_range(&bio_list,
2511 &bio_offset,
2512 clone_size,
2513 GFP_NOIO);
2514 if (!obj_request->bio_list)
2515 goto out_unwind;
2516 } else if (type == OBJ_REQUEST_PAGES) {
2517 unsigned int page_count;
2518
2519 obj_request->pages = pages;
2520 page_count = (u32)calc_pages_for(offset, length);
2521 obj_request->page_count = page_count;
2522 if ((offset + length) & ~PAGE_MASK)
2523 page_count--; /* more on last page */
2524 pages += page_count;
2525 }
2526
2527 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2528 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2529 obj_request);
2530 if (!osd_req)
2531 goto out_unwind;
2532
2533 obj_request->osd_req = osd_req;
2534 obj_request->callback = rbd_img_obj_callback;
2535 obj_request->img_offset = img_offset;
2536
2537 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2538
2539 rbd_img_request_get(img_request);
2540
2541 img_offset += length;
2542 resid -= length;
2543 }
2544
2545 return 0;
2546
2547 out_unwind:
2548 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2549 rbd_img_obj_request_del(img_request, obj_request);
2550
2551 return -ENOMEM;
2552 }
2553
2554 static void
2555 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2556 {
2557 struct rbd_img_request *img_request;
2558 struct rbd_device *rbd_dev;
2559 struct page **pages;
2560 u32 page_count;
2561
2562 dout("%s: obj %p\n", __func__, obj_request);
2563
2564 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2565 obj_request->type == OBJ_REQUEST_NODATA);
2566 rbd_assert(obj_request_img_data_test(obj_request));
2567 img_request = obj_request->img_request;
2568 rbd_assert(img_request);
2569
2570 rbd_dev = img_request->rbd_dev;
2571 rbd_assert(rbd_dev);
2572
2573 pages = obj_request->copyup_pages;
2574 rbd_assert(pages != NULL);
2575 obj_request->copyup_pages = NULL;
2576 page_count = obj_request->copyup_page_count;
2577 rbd_assert(page_count);
2578 obj_request->copyup_page_count = 0;
2579 ceph_release_page_vector(pages, page_count);
2580
2581 /*
2582 * We want the transfer count to reflect the size of the
2583 * original write request. There is no such thing as a
2584 * successful short write, so if the request was successful
2585 * we can just set it to the originally-requested length.
2586 */
2587 if (!obj_request->result)
2588 obj_request->xferred = obj_request->length;
2589
2590 obj_request_done_set(obj_request);
2591 }
2592
2593 static void
2594 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2595 {
2596 struct rbd_obj_request *orig_request;
2597 struct ceph_osd_request *osd_req;
2598 struct ceph_osd_client *osdc;
2599 struct rbd_device *rbd_dev;
2600 struct page **pages;
2601 enum obj_operation_type op_type;
2602 u32 page_count;
2603 int img_result;
2604 u64 parent_length;
2605
2606 rbd_assert(img_request_child_test(img_request));
2607
2608 /* First get what we need from the image request */
2609
2610 pages = img_request->copyup_pages;
2611 rbd_assert(pages != NULL);
2612 img_request->copyup_pages = NULL;
2613 page_count = img_request->copyup_page_count;
2614 rbd_assert(page_count);
2615 img_request->copyup_page_count = 0;
2616
2617 orig_request = img_request->obj_request;
2618 rbd_assert(orig_request != NULL);
2619 rbd_assert(obj_request_type_valid(orig_request->type));
2620 img_result = img_request->result;
2621 parent_length = img_request->length;
2622 rbd_assert(parent_length == img_request->xferred);
2623 rbd_img_request_put(img_request);
2624
2625 rbd_assert(orig_request->img_request);
2626 rbd_dev = orig_request->img_request->rbd_dev;
2627 rbd_assert(rbd_dev);
2628
2629 /*
2630 * If the overlap has become 0 (most likely because the
2631 * image has been flattened) we need to free the pages
2632 * and re-submit the original write request.
2633 */
2634 if (!rbd_dev->parent_overlap) {
2635 struct ceph_osd_client *osdc;
2636
2637 ceph_release_page_vector(pages, page_count);
2638 osdc = &rbd_dev->rbd_client->client->osdc;
2639 img_result = rbd_obj_request_submit(osdc, orig_request);
2640 if (!img_result)
2641 return;
2642 }
2643
2644 if (img_result)
2645 goto out_err;
2646
2647 /*
2648 * The original osd request is of no use to use any more.
2649 * We need a new one that can hold the three ops in a copyup
2650 * request. Allocate the new copyup osd request for the
2651 * original request, and release the old one.
2652 */
2653 img_result = -ENOMEM;
2654 osd_req = rbd_osd_req_create_copyup(orig_request);
2655 if (!osd_req)
2656 goto out_err;
2657 rbd_osd_req_destroy(orig_request->osd_req);
2658 orig_request->osd_req = osd_req;
2659 orig_request->copyup_pages = pages;
2660 orig_request->copyup_page_count = page_count;
2661
2662 /* Initialize the copyup op */
2663
2664 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2665 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2666 false, false);
2667
2668 /* Add the other op(s) */
2669
2670 op_type = rbd_img_request_op_type(orig_request->img_request);
2671 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2672
2673 /* All set, send it off. */
2674
2675 osdc = &rbd_dev->rbd_client->client->osdc;
2676 img_result = rbd_obj_request_submit(osdc, orig_request);
2677 if (!img_result)
2678 return;
2679 out_err:
2680 /* Record the error code and complete the request */
2681
2682 orig_request->result = img_result;
2683 orig_request->xferred = 0;
2684 obj_request_done_set(orig_request);
2685 rbd_obj_request_complete(orig_request);
2686 }
2687
2688 /*
2689 * Read from the parent image the range of data that covers the
2690 * entire target of the given object request. This is used for
2691 * satisfying a layered image write request when the target of an
2692 * object request from the image request does not exist.
2693 *
2694 * A page array big enough to hold the returned data is allocated
2695 * and supplied to rbd_img_request_fill() as the "data descriptor."
2696 * When the read completes, this page array will be transferred to
2697 * the original object request for the copyup operation.
2698 *
2699 * If an error occurs, record it as the result of the original
2700 * object request and mark it done so it gets completed.
2701 */
2702 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2703 {
2704 struct rbd_img_request *img_request = NULL;
2705 struct rbd_img_request *parent_request = NULL;
2706 struct rbd_device *rbd_dev;
2707 u64 img_offset;
2708 u64 length;
2709 struct page **pages = NULL;
2710 u32 page_count;
2711 int result;
2712
2713 rbd_assert(obj_request_img_data_test(obj_request));
2714 rbd_assert(obj_request_type_valid(obj_request->type));
2715
2716 img_request = obj_request->img_request;
2717 rbd_assert(img_request != NULL);
2718 rbd_dev = img_request->rbd_dev;
2719 rbd_assert(rbd_dev->parent != NULL);
2720
2721 /*
2722 * Determine the byte range covered by the object in the
2723 * child image to which the original request was to be sent.
2724 */
2725 img_offset = obj_request->img_offset - obj_request->offset;
2726 length = (u64)1 << rbd_dev->header.obj_order;
2727
2728 /*
2729 * There is no defined parent data beyond the parent
2730 * overlap, so limit what we read at that boundary if
2731 * necessary.
2732 */
2733 if (img_offset + length > rbd_dev->parent_overlap) {
2734 rbd_assert(img_offset < rbd_dev->parent_overlap);
2735 length = rbd_dev->parent_overlap - img_offset;
2736 }
2737
2738 /*
2739 * Allocate a page array big enough to receive the data read
2740 * from the parent.
2741 */
2742 page_count = (u32)calc_pages_for(0, length);
2743 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2744 if (IS_ERR(pages)) {
2745 result = PTR_ERR(pages);
2746 pages = NULL;
2747 goto out_err;
2748 }
2749
2750 result = -ENOMEM;
2751 parent_request = rbd_parent_request_create(obj_request,
2752 img_offset, length);
2753 if (!parent_request)
2754 goto out_err;
2755
2756 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2757 if (result)
2758 goto out_err;
2759 parent_request->copyup_pages = pages;
2760 parent_request->copyup_page_count = page_count;
2761
2762 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2763 result = rbd_img_request_submit(parent_request);
2764 if (!result)
2765 return 0;
2766
2767 parent_request->copyup_pages = NULL;
2768 parent_request->copyup_page_count = 0;
2769 parent_request->obj_request = NULL;
2770 rbd_obj_request_put(obj_request);
2771 out_err:
2772 if (pages)
2773 ceph_release_page_vector(pages, page_count);
2774 if (parent_request)
2775 rbd_img_request_put(parent_request);
2776 obj_request->result = result;
2777 obj_request->xferred = 0;
2778 obj_request_done_set(obj_request);
2779
2780 return result;
2781 }
2782
2783 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2784 {
2785 struct rbd_obj_request *orig_request;
2786 struct rbd_device *rbd_dev;
2787 int result;
2788
2789 rbd_assert(!obj_request_img_data_test(obj_request));
2790
2791 /*
2792 * All we need from the object request is the original
2793 * request and the result of the STAT op. Grab those, then
2794 * we're done with the request.
2795 */
2796 orig_request = obj_request->obj_request;
2797 obj_request->obj_request = NULL;
2798 rbd_obj_request_put(orig_request);
2799 rbd_assert(orig_request);
2800 rbd_assert(orig_request->img_request);
2801
2802 result = obj_request->result;
2803 obj_request->result = 0;
2804
2805 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2806 obj_request, orig_request, result,
2807 obj_request->xferred, obj_request->length);
2808 rbd_obj_request_put(obj_request);
2809
2810 /*
2811 * If the overlap has become 0 (most likely because the
2812 * image has been flattened) we need to free the pages
2813 * and re-submit the original write request.
2814 */
2815 rbd_dev = orig_request->img_request->rbd_dev;
2816 if (!rbd_dev->parent_overlap) {
2817 struct ceph_osd_client *osdc;
2818
2819 osdc = &rbd_dev->rbd_client->client->osdc;
2820 result = rbd_obj_request_submit(osdc, orig_request);
2821 if (!result)
2822 return;
2823 }
2824
2825 /*
2826 * Our only purpose here is to determine whether the object
2827 * exists, and we don't want to treat the non-existence as
2828 * an error. If something else comes back, transfer the
2829 * error to the original request and complete it now.
2830 */
2831 if (!result) {
2832 obj_request_existence_set(orig_request, true);
2833 } else if (result == -ENOENT) {
2834 obj_request_existence_set(orig_request, false);
2835 } else if (result) {
2836 orig_request->result = result;
2837 goto out;
2838 }
2839
2840 /*
2841 * Resubmit the original request now that we have recorded
2842 * whether the target object exists.
2843 */
2844 orig_request->result = rbd_img_obj_request_submit(orig_request);
2845 out:
2846 if (orig_request->result)
2847 rbd_obj_request_complete(orig_request);
2848 }
2849
2850 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2851 {
2852 struct rbd_obj_request *stat_request;
2853 struct rbd_device *rbd_dev;
2854 struct ceph_osd_client *osdc;
2855 struct page **pages = NULL;
2856 u32 page_count;
2857 size_t size;
2858 int ret;
2859
2860 /*
2861 * The response data for a STAT call consists of:
2862 * le64 length;
2863 * struct {
2864 * le32 tv_sec;
2865 * le32 tv_nsec;
2866 * } mtime;
2867 */
2868 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2869 page_count = (u32)calc_pages_for(0, size);
2870 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2871 if (IS_ERR(pages))
2872 return PTR_ERR(pages);
2873
2874 ret = -ENOMEM;
2875 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2876 OBJ_REQUEST_PAGES);
2877 if (!stat_request)
2878 goto out;
2879
2880 rbd_obj_request_get(obj_request);
2881 stat_request->obj_request = obj_request;
2882 stat_request->pages = pages;
2883 stat_request->page_count = page_count;
2884
2885 rbd_assert(obj_request->img_request);
2886 rbd_dev = obj_request->img_request->rbd_dev;
2887 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2888 stat_request);
2889 if (!stat_request->osd_req)
2890 goto out;
2891 stat_request->callback = rbd_img_obj_exists_callback;
2892
2893 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2894 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2895 false, false);
2896 rbd_osd_req_format_read(stat_request);
2897
2898 osdc = &rbd_dev->rbd_client->client->osdc;
2899 ret = rbd_obj_request_submit(osdc, stat_request);
2900 out:
2901 if (ret)
2902 rbd_obj_request_put(obj_request);
2903
2904 return ret;
2905 }
2906
2907 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2908 {
2909 struct rbd_img_request *img_request;
2910 struct rbd_device *rbd_dev;
2911
2912 rbd_assert(obj_request_img_data_test(obj_request));
2913
2914 img_request = obj_request->img_request;
2915 rbd_assert(img_request);
2916 rbd_dev = img_request->rbd_dev;
2917
2918 /* Reads */
2919 if (!img_request_write_test(img_request) &&
2920 !img_request_discard_test(img_request))
2921 return true;
2922
2923 /* Non-layered writes */
2924 if (!img_request_layered_test(img_request))
2925 return true;
2926
2927 /*
2928 * Layered writes outside of the parent overlap range don't
2929 * share any data with the parent.
2930 */
2931 if (!obj_request_overlaps_parent(obj_request))
2932 return true;
2933
2934 /*
2935 * Entire-object layered writes - we will overwrite whatever
2936 * parent data there is anyway.
2937 */
2938 if (!obj_request->offset &&
2939 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2940 return true;
2941
2942 /*
2943 * If the object is known to already exist, its parent data has
2944 * already been copied.
2945 */
2946 if (obj_request_known_test(obj_request) &&
2947 obj_request_exists_test(obj_request))
2948 return true;
2949
2950 return false;
2951 }
2952
2953 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2954 {
2955 if (img_obj_request_simple(obj_request)) {
2956 struct rbd_device *rbd_dev;
2957 struct ceph_osd_client *osdc;
2958
2959 rbd_dev = obj_request->img_request->rbd_dev;
2960 osdc = &rbd_dev->rbd_client->client->osdc;
2961
2962 return rbd_obj_request_submit(osdc, obj_request);
2963 }
2964
2965 /*
2966 * It's a layered write. The target object might exist but
2967 * we may not know that yet. If we know it doesn't exist,
2968 * start by reading the data for the full target object from
2969 * the parent so we can use it for a copyup to the target.
2970 */
2971 if (obj_request_known_test(obj_request))
2972 return rbd_img_obj_parent_read_full(obj_request);
2973
2974 /* We don't know whether the target exists. Go find out. */
2975
2976 return rbd_img_obj_exists_submit(obj_request);
2977 }
2978
2979 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2980 {
2981 struct rbd_obj_request *obj_request;
2982 struct rbd_obj_request *next_obj_request;
2983 int ret = 0;
2984
2985 dout("%s: img %p\n", __func__, img_request);
2986
2987 rbd_img_request_get(img_request);
2988 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2989 ret = rbd_img_obj_request_submit(obj_request);
2990 if (ret)
2991 goto out_put_ireq;
2992 }
2993
2994 out_put_ireq:
2995 rbd_img_request_put(img_request);
2996 return ret;
2997 }
2998
2999 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
3000 {
3001 struct rbd_obj_request *obj_request;
3002 struct rbd_device *rbd_dev;
3003 u64 obj_end;
3004 u64 img_xferred;
3005 int img_result;
3006
3007 rbd_assert(img_request_child_test(img_request));
3008
3009 /* First get what we need from the image request and release it */
3010
3011 obj_request = img_request->obj_request;
3012 img_xferred = img_request->xferred;
3013 img_result = img_request->result;
3014 rbd_img_request_put(img_request);
3015
3016 /*
3017 * If the overlap has become 0 (most likely because the
3018 * image has been flattened) we need to re-submit the
3019 * original request.
3020 */
3021 rbd_assert(obj_request);
3022 rbd_assert(obj_request->img_request);
3023 rbd_dev = obj_request->img_request->rbd_dev;
3024 if (!rbd_dev->parent_overlap) {
3025 struct ceph_osd_client *osdc;
3026
3027 osdc = &rbd_dev->rbd_client->client->osdc;
3028 img_result = rbd_obj_request_submit(osdc, obj_request);
3029 if (!img_result)
3030 return;
3031 }
3032
3033 obj_request->result = img_result;
3034 if (obj_request->result)
3035 goto out;
3036
3037 /*
3038 * We need to zero anything beyond the parent overlap
3039 * boundary. Since rbd_img_obj_request_read_callback()
3040 * will zero anything beyond the end of a short read, an
3041 * easy way to do this is to pretend the data from the
3042 * parent came up short--ending at the overlap boundary.
3043 */
3044 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3045 obj_end = obj_request->img_offset + obj_request->length;
3046 if (obj_end > rbd_dev->parent_overlap) {
3047 u64 xferred = 0;
3048
3049 if (obj_request->img_offset < rbd_dev->parent_overlap)
3050 xferred = rbd_dev->parent_overlap -
3051 obj_request->img_offset;
3052
3053 obj_request->xferred = min(img_xferred, xferred);
3054 } else {
3055 obj_request->xferred = img_xferred;
3056 }
3057 out:
3058 rbd_img_obj_request_read_callback(obj_request);
3059 rbd_obj_request_complete(obj_request);
3060 }
3061
3062 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3063 {
3064 struct rbd_img_request *img_request;
3065 int result;
3066
3067 rbd_assert(obj_request_img_data_test(obj_request));
3068 rbd_assert(obj_request->img_request != NULL);
3069 rbd_assert(obj_request->result == (s32) -ENOENT);
3070 rbd_assert(obj_request_type_valid(obj_request->type));
3071
3072 /* rbd_read_finish(obj_request, obj_request->length); */
3073 img_request = rbd_parent_request_create(obj_request,
3074 obj_request->img_offset,
3075 obj_request->length);
3076 result = -ENOMEM;
3077 if (!img_request)
3078 goto out_err;
3079
3080 if (obj_request->type == OBJ_REQUEST_BIO)
3081 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3082 obj_request->bio_list);
3083 else
3084 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3085 obj_request->pages);
3086 if (result)
3087 goto out_err;
3088
3089 img_request->callback = rbd_img_parent_read_callback;
3090 result = rbd_img_request_submit(img_request);
3091 if (result)
3092 goto out_err;
3093
3094 return;
3095 out_err:
3096 if (img_request)
3097 rbd_img_request_put(img_request);
3098 obj_request->result = result;
3099 obj_request->xferred = 0;
3100 obj_request_done_set(obj_request);
3101 }
3102
3103 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3104 {
3105 struct rbd_obj_request *obj_request;
3106 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3107 int ret;
3108
3109 obj_request = rbd_obj_request_create(rbd_dev->header_oid.name, 0, 0,
3110 OBJ_REQUEST_NODATA);
3111 if (!obj_request)
3112 return -ENOMEM;
3113
3114 ret = -ENOMEM;
3115 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3116 obj_request);
3117 if (!obj_request->osd_req)
3118 goto out;
3119
3120 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3121 notify_id, 0, 0);
3122 rbd_osd_req_format_read(obj_request);
3123
3124 ret = rbd_obj_request_submit(osdc, obj_request);
3125 if (ret)
3126 goto out;
3127 ret = rbd_obj_request_wait(obj_request);
3128 out:
3129 rbd_obj_request_put(obj_request);
3130
3131 return ret;
3132 }
3133
3134 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3135 {
3136 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3137 int ret;
3138
3139 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3140 rbd_dev->header_oid.name, (unsigned long long)notify_id,
3141 (unsigned int)opcode);
3142
3143 /*
3144 * Until adequate refresh error handling is in place, there is
3145 * not much we can do here, except warn.
3146 *
3147 * See http://tracker.ceph.com/issues/5040
3148 */
3149 ret = rbd_dev_refresh(rbd_dev);
3150 if (ret)
3151 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3152
3153 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3154 if (ret)
3155 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3156 }
3157
3158 /*
3159 * Send a (un)watch request and wait for the ack. Return a request
3160 * with a ref held on success or error.
3161 */
3162 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3163 struct rbd_device *rbd_dev,
3164 bool watch)
3165 {
3166 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3167 struct ceph_options *opts = osdc->client->options;
3168 struct rbd_obj_request *obj_request;
3169 int ret;
3170
3171 obj_request = rbd_obj_request_create(rbd_dev->header_oid.name, 0, 0,
3172 OBJ_REQUEST_NODATA);
3173 if (!obj_request)
3174 return ERR_PTR(-ENOMEM);
3175
3176 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3177 obj_request);
3178 if (!obj_request->osd_req) {
3179 ret = -ENOMEM;
3180 goto out;
3181 }
3182
3183 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3184 rbd_dev->watch_event->cookie, 0, watch);
3185 rbd_osd_req_format_write(obj_request);
3186
3187 if (watch)
3188 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3189
3190 ret = rbd_obj_request_submit(osdc, obj_request);
3191 if (ret)
3192 goto out;
3193
3194 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3195 if (ret)
3196 goto out;
3197
3198 ret = obj_request->result;
3199 if (ret) {
3200 if (watch)
3201 rbd_obj_request_end(obj_request);
3202 goto out;
3203 }
3204
3205 return obj_request;
3206
3207 out:
3208 rbd_obj_request_put(obj_request);
3209 return ERR_PTR(ret);
3210 }
3211
3212 /*
3213 * Initiate a watch request, synchronously.
3214 */
3215 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3216 {
3217 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3218 struct rbd_obj_request *obj_request;
3219 int ret;
3220
3221 rbd_assert(!rbd_dev->watch_event);
3222 rbd_assert(!rbd_dev->watch_request);
3223
3224 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3225 &rbd_dev->watch_event);
3226 if (ret < 0)
3227 return ret;
3228
3229 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3230 if (IS_ERR(obj_request)) {
3231 ceph_osdc_cancel_event(rbd_dev->watch_event);
3232 rbd_dev->watch_event = NULL;
3233 return PTR_ERR(obj_request);
3234 }
3235
3236 /*
3237 * A watch request is set to linger, so the underlying osd
3238 * request won't go away until we unregister it. We retain
3239 * a pointer to the object request during that time (in
3240 * rbd_dev->watch_request), so we'll keep a reference to it.
3241 * We'll drop that reference after we've unregistered it in
3242 * rbd_dev_header_unwatch_sync().
3243 */
3244 rbd_dev->watch_request = obj_request;
3245
3246 return 0;
3247 }
3248
3249 static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3250 {
3251 struct rbd_obj_request *obj_request;
3252
3253 rbd_assert(rbd_dev->watch_event);
3254 rbd_assert(rbd_dev->watch_request);
3255
3256 rbd_obj_request_end(rbd_dev->watch_request);
3257 rbd_obj_request_put(rbd_dev->watch_request);
3258 rbd_dev->watch_request = NULL;
3259
3260 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3261 if (!IS_ERR(obj_request))
3262 rbd_obj_request_put(obj_request);
3263 else
3264 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3265 PTR_ERR(obj_request));
3266
3267 ceph_osdc_cancel_event(rbd_dev->watch_event);
3268 rbd_dev->watch_event = NULL;
3269 }
3270
3271 /*
3272 * Tear down a watch request, synchronously.
3273 */
3274 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3275 {
3276 __rbd_dev_header_unwatch_sync(rbd_dev);
3277
3278 dout("%s flushing notifies\n", __func__);
3279 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3280 }
3281
3282 /*
3283 * Synchronous osd object method call. Returns the number of bytes
3284 * returned in the outbound buffer, or a negative error code.
3285 */
3286 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3287 const char *object_name,
3288 const char *class_name,
3289 const char *method_name,
3290 const void *outbound,
3291 size_t outbound_size,
3292 void *inbound,
3293 size_t inbound_size)
3294 {
3295 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3296 struct rbd_obj_request *obj_request;
3297 struct page **pages;
3298 u32 page_count;
3299 int ret;
3300
3301 /*
3302 * Method calls are ultimately read operations. The result
3303 * should placed into the inbound buffer provided. They
3304 * also supply outbound data--parameters for the object
3305 * method. Currently if this is present it will be a
3306 * snapshot id.
3307 */
3308 page_count = (u32)calc_pages_for(0, inbound_size);
3309 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3310 if (IS_ERR(pages))
3311 return PTR_ERR(pages);
3312
3313 ret = -ENOMEM;
3314 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3315 OBJ_REQUEST_PAGES);
3316 if (!obj_request)
3317 goto out;
3318
3319 obj_request->pages = pages;
3320 obj_request->page_count = page_count;
3321
3322 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3323 obj_request);
3324 if (!obj_request->osd_req)
3325 goto out;
3326
3327 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3328 class_name, method_name);
3329 if (outbound_size) {
3330 struct ceph_pagelist *pagelist;
3331
3332 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3333 if (!pagelist)
3334 goto out;
3335
3336 ceph_pagelist_init(pagelist);
3337 ceph_pagelist_append(pagelist, outbound, outbound_size);
3338 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3339 pagelist);
3340 }
3341 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3342 obj_request->pages, inbound_size,
3343 0, false, false);
3344 rbd_osd_req_format_read(obj_request);
3345
3346 ret = rbd_obj_request_submit(osdc, obj_request);
3347 if (ret)
3348 goto out;
3349 ret = rbd_obj_request_wait(obj_request);
3350 if (ret)
3351 goto out;
3352
3353 ret = obj_request->result;
3354 if (ret < 0)
3355 goto out;
3356
3357 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3358 ret = (int)obj_request->xferred;
3359 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3360 out:
3361 if (obj_request)
3362 rbd_obj_request_put(obj_request);
3363 else
3364 ceph_release_page_vector(pages, page_count);
3365
3366 return ret;
3367 }
3368
3369 static void rbd_queue_workfn(struct work_struct *work)
3370 {
3371 struct request *rq = blk_mq_rq_from_pdu(work);
3372 struct rbd_device *rbd_dev = rq->q->queuedata;
3373 struct rbd_img_request *img_request;
3374 struct ceph_snap_context *snapc = NULL;
3375 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3376 u64 length = blk_rq_bytes(rq);
3377 enum obj_operation_type op_type;
3378 u64 mapping_size;
3379 int result;
3380
3381 if (rq->cmd_type != REQ_TYPE_FS) {
3382 dout("%s: non-fs request type %d\n", __func__,
3383 (int) rq->cmd_type);
3384 result = -EIO;
3385 goto err;
3386 }
3387
3388 if (rq->cmd_flags & REQ_DISCARD)
3389 op_type = OBJ_OP_DISCARD;
3390 else if (rq->cmd_flags & REQ_WRITE)
3391 op_type = OBJ_OP_WRITE;
3392 else
3393 op_type = OBJ_OP_READ;
3394
3395 /* Ignore/skip any zero-length requests */
3396
3397 if (!length) {
3398 dout("%s: zero-length request\n", __func__);
3399 result = 0;
3400 goto err_rq;
3401 }
3402
3403 /* Only reads are allowed to a read-only device */
3404
3405 if (op_type != OBJ_OP_READ) {
3406 if (rbd_dev->mapping.read_only) {
3407 result = -EROFS;
3408 goto err_rq;
3409 }
3410 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3411 }
3412
3413 /*
3414 * Quit early if the mapped snapshot no longer exists. It's
3415 * still possible the snapshot will have disappeared by the
3416 * time our request arrives at the osd, but there's no sense in
3417 * sending it if we already know.
3418 */
3419 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3420 dout("request for non-existent snapshot");
3421 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3422 result = -ENXIO;
3423 goto err_rq;
3424 }
3425
3426 if (offset && length > U64_MAX - offset + 1) {
3427 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3428 length);
3429 result = -EINVAL;
3430 goto err_rq; /* Shouldn't happen */
3431 }
3432
3433 blk_mq_start_request(rq);
3434
3435 down_read(&rbd_dev->header_rwsem);
3436 mapping_size = rbd_dev->mapping.size;
3437 if (op_type != OBJ_OP_READ) {
3438 snapc = rbd_dev->header.snapc;
3439 ceph_get_snap_context(snapc);
3440 }
3441 up_read(&rbd_dev->header_rwsem);
3442
3443 if (offset + length > mapping_size) {
3444 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3445 length, mapping_size);
3446 result = -EIO;
3447 goto err_rq;
3448 }
3449
3450 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3451 snapc);
3452 if (!img_request) {
3453 result = -ENOMEM;
3454 goto err_rq;
3455 }
3456 img_request->rq = rq;
3457 snapc = NULL; /* img_request consumes a ref */
3458
3459 if (op_type == OBJ_OP_DISCARD)
3460 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3461 NULL);
3462 else
3463 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3464 rq->bio);
3465 if (result)
3466 goto err_img_request;
3467
3468 result = rbd_img_request_submit(img_request);
3469 if (result)
3470 goto err_img_request;
3471
3472 return;
3473
3474 err_img_request:
3475 rbd_img_request_put(img_request);
3476 err_rq:
3477 if (result)
3478 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3479 obj_op_name(op_type), length, offset, result);
3480 ceph_put_snap_context(snapc);
3481 err:
3482 blk_mq_end_request(rq, result);
3483 }
3484
3485 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3486 const struct blk_mq_queue_data *bd)
3487 {
3488 struct request *rq = bd->rq;
3489 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3490
3491 queue_work(rbd_wq, work);
3492 return BLK_MQ_RQ_QUEUE_OK;
3493 }
3494
3495 static void rbd_free_disk(struct rbd_device *rbd_dev)
3496 {
3497 struct gendisk *disk = rbd_dev->disk;
3498
3499 if (!disk)
3500 return;
3501
3502 rbd_dev->disk = NULL;
3503 if (disk->flags & GENHD_FL_UP) {
3504 del_gendisk(disk);
3505 if (disk->queue)
3506 blk_cleanup_queue(disk->queue);
3507 blk_mq_free_tag_set(&rbd_dev->tag_set);
3508 }
3509 put_disk(disk);
3510 }
3511
3512 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3513 const char *object_name,
3514 u64 offset, u64 length, void *buf)
3515
3516 {
3517 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3518 struct rbd_obj_request *obj_request;
3519 struct page **pages = NULL;
3520 u32 page_count;
3521 size_t size;
3522 int ret;
3523
3524 page_count = (u32) calc_pages_for(offset, length);
3525 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3526 if (IS_ERR(pages))
3527 return PTR_ERR(pages);
3528
3529 ret = -ENOMEM;
3530 obj_request = rbd_obj_request_create(object_name, offset, length,
3531 OBJ_REQUEST_PAGES);
3532 if (!obj_request)
3533 goto out;
3534
3535 obj_request->pages = pages;
3536 obj_request->page_count = page_count;
3537
3538 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3539 obj_request);
3540 if (!obj_request->osd_req)
3541 goto out;
3542
3543 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3544 offset, length, 0, 0);
3545 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3546 obj_request->pages,
3547 obj_request->length,
3548 obj_request->offset & ~PAGE_MASK,
3549 false, false);
3550 rbd_osd_req_format_read(obj_request);
3551
3552 ret = rbd_obj_request_submit(osdc, obj_request);
3553 if (ret)
3554 goto out;
3555 ret = rbd_obj_request_wait(obj_request);
3556 if (ret)
3557 goto out;
3558
3559 ret = obj_request->result;
3560 if (ret < 0)
3561 goto out;
3562
3563 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3564 size = (size_t) obj_request->xferred;
3565 ceph_copy_from_page_vector(pages, buf, 0, size);
3566 rbd_assert(size <= (size_t)INT_MAX);
3567 ret = (int)size;
3568 out:
3569 if (obj_request)
3570 rbd_obj_request_put(obj_request);
3571 else
3572 ceph_release_page_vector(pages, page_count);
3573
3574 return ret;
3575 }
3576
3577 /*
3578 * Read the complete header for the given rbd device. On successful
3579 * return, the rbd_dev->header field will contain up-to-date
3580 * information about the image.
3581 */
3582 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3583 {
3584 struct rbd_image_header_ondisk *ondisk = NULL;
3585 u32 snap_count = 0;
3586 u64 names_size = 0;
3587 u32 want_count;
3588 int ret;
3589
3590 /*
3591 * The complete header will include an array of its 64-bit
3592 * snapshot ids, followed by the names of those snapshots as
3593 * a contiguous block of NUL-terminated strings. Note that
3594 * the number of snapshots could change by the time we read
3595 * it in, in which case we re-read it.
3596 */
3597 do {
3598 size_t size;
3599
3600 kfree(ondisk);
3601
3602 size = sizeof (*ondisk);
3603 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3604 size += names_size;
3605 ondisk = kmalloc(size, GFP_KERNEL);
3606 if (!ondisk)
3607 return -ENOMEM;
3608
3609 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
3610 0, size, ondisk);
3611 if (ret < 0)
3612 goto out;
3613 if ((size_t)ret < size) {
3614 ret = -ENXIO;
3615 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3616 size, ret);
3617 goto out;
3618 }
3619 if (!rbd_dev_ondisk_valid(ondisk)) {
3620 ret = -ENXIO;
3621 rbd_warn(rbd_dev, "invalid header");
3622 goto out;
3623 }
3624
3625 names_size = le64_to_cpu(ondisk->snap_names_len);
3626 want_count = snap_count;
3627 snap_count = le32_to_cpu(ondisk->snap_count);
3628 } while (snap_count != want_count);
3629
3630 ret = rbd_header_from_disk(rbd_dev, ondisk);
3631 out:
3632 kfree(ondisk);
3633
3634 return ret;
3635 }
3636
3637 /*
3638 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3639 * has disappeared from the (just updated) snapshot context.
3640 */
3641 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3642 {
3643 u64 snap_id;
3644
3645 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3646 return;
3647
3648 snap_id = rbd_dev->spec->snap_id;
3649 if (snap_id == CEPH_NOSNAP)
3650 return;
3651
3652 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3653 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3654 }
3655
3656 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3657 {
3658 sector_t size;
3659
3660 /*
3661 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3662 * try to update its size. If REMOVING is set, updating size
3663 * is just useless work since the device can't be opened.
3664 */
3665 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3666 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3667 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3668 dout("setting size to %llu sectors", (unsigned long long)size);
3669 set_capacity(rbd_dev->disk, size);
3670 revalidate_disk(rbd_dev->disk);
3671 }
3672 }
3673
3674 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3675 {
3676 u64 mapping_size;
3677 int ret;
3678
3679 down_write(&rbd_dev->header_rwsem);
3680 mapping_size = rbd_dev->mapping.size;
3681
3682 ret = rbd_dev_header_info(rbd_dev);
3683 if (ret)
3684 goto out;
3685
3686 /*
3687 * If there is a parent, see if it has disappeared due to the
3688 * mapped image getting flattened.
3689 */
3690 if (rbd_dev->parent) {
3691 ret = rbd_dev_v2_parent_info(rbd_dev);
3692 if (ret)
3693 goto out;
3694 }
3695
3696 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3697 rbd_dev->mapping.size = rbd_dev->header.image_size;
3698 } else {
3699 /* validate mapped snapshot's EXISTS flag */
3700 rbd_exists_validate(rbd_dev);
3701 }
3702
3703 out:
3704 up_write(&rbd_dev->header_rwsem);
3705 if (!ret && mapping_size != rbd_dev->mapping.size)
3706 rbd_dev_update_size(rbd_dev);
3707
3708 return ret;
3709 }
3710
3711 static int rbd_init_request(void *data, struct request *rq,
3712 unsigned int hctx_idx, unsigned int request_idx,
3713 unsigned int numa_node)
3714 {
3715 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3716
3717 INIT_WORK(work, rbd_queue_workfn);
3718 return 0;
3719 }
3720
3721 static struct blk_mq_ops rbd_mq_ops = {
3722 .queue_rq = rbd_queue_rq,
3723 .map_queue = blk_mq_map_queue,
3724 .init_request = rbd_init_request,
3725 };
3726
3727 static int rbd_init_disk(struct rbd_device *rbd_dev)
3728 {
3729 struct gendisk *disk;
3730 struct request_queue *q;
3731 u64 segment_size;
3732 int err;
3733
3734 /* create gendisk info */
3735 disk = alloc_disk(single_major ?
3736 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3737 RBD_MINORS_PER_MAJOR);
3738 if (!disk)
3739 return -ENOMEM;
3740
3741 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3742 rbd_dev->dev_id);
3743 disk->major = rbd_dev->major;
3744 disk->first_minor = rbd_dev->minor;
3745 if (single_major)
3746 disk->flags |= GENHD_FL_EXT_DEVT;
3747 disk->fops = &rbd_bd_ops;
3748 disk->private_data = rbd_dev;
3749
3750 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3751 rbd_dev->tag_set.ops = &rbd_mq_ops;
3752 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3753 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3754 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3755 rbd_dev->tag_set.nr_hw_queues = 1;
3756 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3757
3758 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3759 if (err)
3760 goto out_disk;
3761
3762 q = blk_mq_init_queue(&rbd_dev->tag_set);
3763 if (IS_ERR(q)) {
3764 err = PTR_ERR(q);
3765 goto out_tag_set;
3766 }
3767
3768 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3769 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3770
3771 /* set io sizes to object size */
3772 segment_size = rbd_obj_bytes(&rbd_dev->header);
3773 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3774 q->limits.max_sectors = queue_max_hw_sectors(q);
3775 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
3776 blk_queue_max_segment_size(q, segment_size);
3777 blk_queue_io_min(q, segment_size);
3778 blk_queue_io_opt(q, segment_size);
3779
3780 /* enable the discard support */
3781 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3782 q->limits.discard_granularity = segment_size;
3783 q->limits.discard_alignment = segment_size;
3784 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3785 q->limits.discard_zeroes_data = 1;
3786
3787 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3788 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3789
3790 disk->queue = q;
3791
3792 q->queuedata = rbd_dev;
3793
3794 rbd_dev->disk = disk;
3795
3796 return 0;
3797 out_tag_set:
3798 blk_mq_free_tag_set(&rbd_dev->tag_set);
3799 out_disk:
3800 put_disk(disk);
3801 return err;
3802 }
3803
3804 /*
3805 sysfs
3806 */
3807
3808 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3809 {
3810 return container_of(dev, struct rbd_device, dev);
3811 }
3812
3813 static ssize_t rbd_size_show(struct device *dev,
3814 struct device_attribute *attr, char *buf)
3815 {
3816 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3817
3818 return sprintf(buf, "%llu\n",
3819 (unsigned long long)rbd_dev->mapping.size);
3820 }
3821
3822 /*
3823 * Note this shows the features for whatever's mapped, which is not
3824 * necessarily the base image.
3825 */
3826 static ssize_t rbd_features_show(struct device *dev,
3827 struct device_attribute *attr, char *buf)
3828 {
3829 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3830
3831 return sprintf(buf, "0x%016llx\n",
3832 (unsigned long long)rbd_dev->mapping.features);
3833 }
3834
3835 static ssize_t rbd_major_show(struct device *dev,
3836 struct device_attribute *attr, char *buf)
3837 {
3838 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3839
3840 if (rbd_dev->major)
3841 return sprintf(buf, "%d\n", rbd_dev->major);
3842
3843 return sprintf(buf, "(none)\n");
3844 }
3845
3846 static ssize_t rbd_minor_show(struct device *dev,
3847 struct device_attribute *attr, char *buf)
3848 {
3849 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3850
3851 return sprintf(buf, "%d\n", rbd_dev->minor);
3852 }
3853
3854 static ssize_t rbd_client_id_show(struct device *dev,
3855 struct device_attribute *attr, char *buf)
3856 {
3857 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3858
3859 return sprintf(buf, "client%lld\n",
3860 ceph_client_id(rbd_dev->rbd_client->client));
3861 }
3862
3863 static ssize_t rbd_pool_show(struct device *dev,
3864 struct device_attribute *attr, char *buf)
3865 {
3866 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3867
3868 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3869 }
3870
3871 static ssize_t rbd_pool_id_show(struct device *dev,
3872 struct device_attribute *attr, char *buf)
3873 {
3874 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3875
3876 return sprintf(buf, "%llu\n",
3877 (unsigned long long) rbd_dev->spec->pool_id);
3878 }
3879
3880 static ssize_t rbd_name_show(struct device *dev,
3881 struct device_attribute *attr, char *buf)
3882 {
3883 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3884
3885 if (rbd_dev->spec->image_name)
3886 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3887
3888 return sprintf(buf, "(unknown)\n");
3889 }
3890
3891 static ssize_t rbd_image_id_show(struct device *dev,
3892 struct device_attribute *attr, char *buf)
3893 {
3894 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3895
3896 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3897 }
3898
3899 /*
3900 * Shows the name of the currently-mapped snapshot (or
3901 * RBD_SNAP_HEAD_NAME for the base image).
3902 */
3903 static ssize_t rbd_snap_show(struct device *dev,
3904 struct device_attribute *attr,
3905 char *buf)
3906 {
3907 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3908
3909 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3910 }
3911
3912 /*
3913 * For a v2 image, shows the chain of parent images, separated by empty
3914 * lines. For v1 images or if there is no parent, shows "(no parent
3915 * image)".
3916 */
3917 static ssize_t rbd_parent_show(struct device *dev,
3918 struct device_attribute *attr,
3919 char *buf)
3920 {
3921 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3922 ssize_t count = 0;
3923
3924 if (!rbd_dev->parent)
3925 return sprintf(buf, "(no parent image)\n");
3926
3927 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3928 struct rbd_spec *spec = rbd_dev->parent_spec;
3929
3930 count += sprintf(&buf[count], "%s"
3931 "pool_id %llu\npool_name %s\n"
3932 "image_id %s\nimage_name %s\n"
3933 "snap_id %llu\nsnap_name %s\n"
3934 "overlap %llu\n",
3935 !count ? "" : "\n", /* first? */
3936 spec->pool_id, spec->pool_name,
3937 spec->image_id, spec->image_name ?: "(unknown)",
3938 spec->snap_id, spec->snap_name,
3939 rbd_dev->parent_overlap);
3940 }
3941
3942 return count;
3943 }
3944
3945 static ssize_t rbd_image_refresh(struct device *dev,
3946 struct device_attribute *attr,
3947 const char *buf,
3948 size_t size)
3949 {
3950 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3951 int ret;
3952
3953 ret = rbd_dev_refresh(rbd_dev);
3954 if (ret)
3955 return ret;
3956
3957 return size;
3958 }
3959
3960 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3961 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3962 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3963 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3964 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3965 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3966 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3967 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3968 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3969 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3970 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3971 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3972
3973 static struct attribute *rbd_attrs[] = {
3974 &dev_attr_size.attr,
3975 &dev_attr_features.attr,
3976 &dev_attr_major.attr,
3977 &dev_attr_minor.attr,
3978 &dev_attr_client_id.attr,
3979 &dev_attr_pool.attr,
3980 &dev_attr_pool_id.attr,
3981 &dev_attr_name.attr,
3982 &dev_attr_image_id.attr,
3983 &dev_attr_current_snap.attr,
3984 &dev_attr_parent.attr,
3985 &dev_attr_refresh.attr,
3986 NULL
3987 };
3988
3989 static struct attribute_group rbd_attr_group = {
3990 .attrs = rbd_attrs,
3991 };
3992
3993 static const struct attribute_group *rbd_attr_groups[] = {
3994 &rbd_attr_group,
3995 NULL
3996 };
3997
3998 static void rbd_dev_release(struct device *dev);
3999
4000 static struct device_type rbd_device_type = {
4001 .name = "rbd",
4002 .groups = rbd_attr_groups,
4003 .release = rbd_dev_release,
4004 };
4005
4006 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4007 {
4008 kref_get(&spec->kref);
4009
4010 return spec;
4011 }
4012
4013 static void rbd_spec_free(struct kref *kref);
4014 static void rbd_spec_put(struct rbd_spec *spec)
4015 {
4016 if (spec)
4017 kref_put(&spec->kref, rbd_spec_free);
4018 }
4019
4020 static struct rbd_spec *rbd_spec_alloc(void)
4021 {
4022 struct rbd_spec *spec;
4023
4024 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4025 if (!spec)
4026 return NULL;
4027
4028 spec->pool_id = CEPH_NOPOOL;
4029 spec->snap_id = CEPH_NOSNAP;
4030 kref_init(&spec->kref);
4031
4032 return spec;
4033 }
4034
4035 static void rbd_spec_free(struct kref *kref)
4036 {
4037 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4038
4039 kfree(spec->pool_name);
4040 kfree(spec->image_id);
4041 kfree(spec->image_name);
4042 kfree(spec->snap_name);
4043 kfree(spec);
4044 }
4045
4046 static void rbd_dev_release(struct device *dev)
4047 {
4048 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4049 bool need_put = !!rbd_dev->opts;
4050
4051 ceph_oid_destroy(&rbd_dev->header_oid);
4052
4053 rbd_put_client(rbd_dev->rbd_client);
4054 rbd_spec_put(rbd_dev->spec);
4055 kfree(rbd_dev->opts);
4056 kfree(rbd_dev);
4057
4058 /*
4059 * This is racy, but way better than putting module outside of
4060 * the release callback. The race window is pretty small, so
4061 * doing something similar to dm (dm-builtin.c) is overkill.
4062 */
4063 if (need_put)
4064 module_put(THIS_MODULE);
4065 }
4066
4067 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4068 struct rbd_spec *spec,
4069 struct rbd_options *opts)
4070 {
4071 struct rbd_device *rbd_dev;
4072
4073 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4074 if (!rbd_dev)
4075 return NULL;
4076
4077 spin_lock_init(&rbd_dev->lock);
4078 rbd_dev->flags = 0;
4079 atomic_set(&rbd_dev->parent_ref, 0);
4080 INIT_LIST_HEAD(&rbd_dev->node);
4081 init_rwsem(&rbd_dev->header_rwsem);
4082
4083 ceph_oid_init(&rbd_dev->header_oid);
4084
4085 rbd_dev->dev.bus = &rbd_bus_type;
4086 rbd_dev->dev.type = &rbd_device_type;
4087 rbd_dev->dev.parent = &rbd_root_dev;
4088 device_initialize(&rbd_dev->dev);
4089
4090 rbd_dev->rbd_client = rbdc;
4091 rbd_dev->spec = spec;
4092 rbd_dev->opts = opts;
4093
4094 /* Initialize the layout used for all rbd requests */
4095
4096 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4097 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4098 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4099 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4100
4101 /*
4102 * If this is a mapping rbd_dev (as opposed to a parent one),
4103 * pin our module. We have a ref from do_rbd_add(), so use
4104 * __module_get().
4105 */
4106 if (rbd_dev->opts)
4107 __module_get(THIS_MODULE);
4108
4109 return rbd_dev;
4110 }
4111
4112 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4113 {
4114 if (rbd_dev)
4115 put_device(&rbd_dev->dev);
4116 }
4117
4118 /*
4119 * Get the size and object order for an image snapshot, or if
4120 * snap_id is CEPH_NOSNAP, gets this information for the base
4121 * image.
4122 */
4123 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4124 u8 *order, u64 *snap_size)
4125 {
4126 __le64 snapid = cpu_to_le64(snap_id);
4127 int ret;
4128 struct {
4129 u8 order;
4130 __le64 size;
4131 } __attribute__ ((packed)) size_buf = { 0 };
4132
4133 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4134 "rbd", "get_size",
4135 &snapid, sizeof (snapid),
4136 &size_buf, sizeof (size_buf));
4137 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4138 if (ret < 0)
4139 return ret;
4140 if (ret < sizeof (size_buf))
4141 return -ERANGE;
4142
4143 if (order) {
4144 *order = size_buf.order;
4145 dout(" order %u", (unsigned int)*order);
4146 }
4147 *snap_size = le64_to_cpu(size_buf.size);
4148
4149 dout(" snap_id 0x%016llx snap_size = %llu\n",
4150 (unsigned long long)snap_id,
4151 (unsigned long long)*snap_size);
4152
4153 return 0;
4154 }
4155
4156 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4157 {
4158 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4159 &rbd_dev->header.obj_order,
4160 &rbd_dev->header.image_size);
4161 }
4162
4163 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4164 {
4165 void *reply_buf;
4166 int ret;
4167 void *p;
4168
4169 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4170 if (!reply_buf)
4171 return -ENOMEM;
4172
4173 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4174 "rbd", "get_object_prefix", NULL, 0,
4175 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4176 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4177 if (ret < 0)
4178 goto out;
4179
4180 p = reply_buf;
4181 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4182 p + ret, NULL, GFP_NOIO);
4183 ret = 0;
4184
4185 if (IS_ERR(rbd_dev->header.object_prefix)) {
4186 ret = PTR_ERR(rbd_dev->header.object_prefix);
4187 rbd_dev->header.object_prefix = NULL;
4188 } else {
4189 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4190 }
4191 out:
4192 kfree(reply_buf);
4193
4194 return ret;
4195 }
4196
4197 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4198 u64 *snap_features)
4199 {
4200 __le64 snapid = cpu_to_le64(snap_id);
4201 struct {
4202 __le64 features;
4203 __le64 incompat;
4204 } __attribute__ ((packed)) features_buf = { 0 };
4205 u64 unsup;
4206 int ret;
4207
4208 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4209 "rbd", "get_features",
4210 &snapid, sizeof (snapid),
4211 &features_buf, sizeof (features_buf));
4212 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4213 if (ret < 0)
4214 return ret;
4215 if (ret < sizeof (features_buf))
4216 return -ERANGE;
4217
4218 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4219 if (unsup) {
4220 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4221 unsup);
4222 return -ENXIO;
4223 }
4224
4225 *snap_features = le64_to_cpu(features_buf.features);
4226
4227 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4228 (unsigned long long)snap_id,
4229 (unsigned long long)*snap_features,
4230 (unsigned long long)le64_to_cpu(features_buf.incompat));
4231
4232 return 0;
4233 }
4234
4235 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4236 {
4237 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4238 &rbd_dev->header.features);
4239 }
4240
4241 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4242 {
4243 struct rbd_spec *parent_spec;
4244 size_t size;
4245 void *reply_buf = NULL;
4246 __le64 snapid;
4247 void *p;
4248 void *end;
4249 u64 pool_id;
4250 char *image_id;
4251 u64 snap_id;
4252 u64 overlap;
4253 int ret;
4254
4255 parent_spec = rbd_spec_alloc();
4256 if (!parent_spec)
4257 return -ENOMEM;
4258
4259 size = sizeof (__le64) + /* pool_id */
4260 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4261 sizeof (__le64) + /* snap_id */
4262 sizeof (__le64); /* overlap */
4263 reply_buf = kmalloc(size, GFP_KERNEL);
4264 if (!reply_buf) {
4265 ret = -ENOMEM;
4266 goto out_err;
4267 }
4268
4269 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4270 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4271 "rbd", "get_parent",
4272 &snapid, sizeof (snapid),
4273 reply_buf, size);
4274 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4275 if (ret < 0)
4276 goto out_err;
4277
4278 p = reply_buf;
4279 end = reply_buf + ret;
4280 ret = -ERANGE;
4281 ceph_decode_64_safe(&p, end, pool_id, out_err);
4282 if (pool_id == CEPH_NOPOOL) {
4283 /*
4284 * Either the parent never existed, or we have
4285 * record of it but the image got flattened so it no
4286 * longer has a parent. When the parent of a
4287 * layered image disappears we immediately set the
4288 * overlap to 0. The effect of this is that all new
4289 * requests will be treated as if the image had no
4290 * parent.
4291 */
4292 if (rbd_dev->parent_overlap) {
4293 rbd_dev->parent_overlap = 0;
4294 rbd_dev_parent_put(rbd_dev);
4295 pr_info("%s: clone image has been flattened\n",
4296 rbd_dev->disk->disk_name);
4297 }
4298
4299 goto out; /* No parent? No problem. */
4300 }
4301
4302 /* The ceph file layout needs to fit pool id in 32 bits */
4303
4304 ret = -EIO;
4305 if (pool_id > (u64)U32_MAX) {
4306 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4307 (unsigned long long)pool_id, U32_MAX);
4308 goto out_err;
4309 }
4310
4311 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4312 if (IS_ERR(image_id)) {
4313 ret = PTR_ERR(image_id);
4314 goto out_err;
4315 }
4316 ceph_decode_64_safe(&p, end, snap_id, out_err);
4317 ceph_decode_64_safe(&p, end, overlap, out_err);
4318
4319 /*
4320 * The parent won't change (except when the clone is
4321 * flattened, already handled that). So we only need to
4322 * record the parent spec we have not already done so.
4323 */
4324 if (!rbd_dev->parent_spec) {
4325 parent_spec->pool_id = pool_id;
4326 parent_spec->image_id = image_id;
4327 parent_spec->snap_id = snap_id;
4328 rbd_dev->parent_spec = parent_spec;
4329 parent_spec = NULL; /* rbd_dev now owns this */
4330 } else {
4331 kfree(image_id);
4332 }
4333
4334 /*
4335 * We always update the parent overlap. If it's zero we issue
4336 * a warning, as we will proceed as if there was no parent.
4337 */
4338 if (!overlap) {
4339 if (parent_spec) {
4340 /* refresh, careful to warn just once */
4341 if (rbd_dev->parent_overlap)
4342 rbd_warn(rbd_dev,
4343 "clone now standalone (overlap became 0)");
4344 } else {
4345 /* initial probe */
4346 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4347 }
4348 }
4349 rbd_dev->parent_overlap = overlap;
4350
4351 out:
4352 ret = 0;
4353 out_err:
4354 kfree(reply_buf);
4355 rbd_spec_put(parent_spec);
4356
4357 return ret;
4358 }
4359
4360 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4361 {
4362 struct {
4363 __le64 stripe_unit;
4364 __le64 stripe_count;
4365 } __attribute__ ((packed)) striping_info_buf = { 0 };
4366 size_t size = sizeof (striping_info_buf);
4367 void *p;
4368 u64 obj_size;
4369 u64 stripe_unit;
4370 u64 stripe_count;
4371 int ret;
4372
4373 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4374 "rbd", "get_stripe_unit_count", NULL, 0,
4375 (char *)&striping_info_buf, size);
4376 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4377 if (ret < 0)
4378 return ret;
4379 if (ret < size)
4380 return -ERANGE;
4381
4382 /*
4383 * We don't actually support the "fancy striping" feature
4384 * (STRIPINGV2) yet, but if the striping sizes are the
4385 * defaults the behavior is the same as before. So find
4386 * out, and only fail if the image has non-default values.
4387 */
4388 ret = -EINVAL;
4389 obj_size = (u64)1 << rbd_dev->header.obj_order;
4390 p = &striping_info_buf;
4391 stripe_unit = ceph_decode_64(&p);
4392 if (stripe_unit != obj_size) {
4393 rbd_warn(rbd_dev, "unsupported stripe unit "
4394 "(got %llu want %llu)",
4395 stripe_unit, obj_size);
4396 return -EINVAL;
4397 }
4398 stripe_count = ceph_decode_64(&p);
4399 if (stripe_count != 1) {
4400 rbd_warn(rbd_dev, "unsupported stripe count "
4401 "(got %llu want 1)", stripe_count);
4402 return -EINVAL;
4403 }
4404 rbd_dev->header.stripe_unit = stripe_unit;
4405 rbd_dev->header.stripe_count = stripe_count;
4406
4407 return 0;
4408 }
4409
4410 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4411 {
4412 size_t image_id_size;
4413 char *image_id;
4414 void *p;
4415 void *end;
4416 size_t size;
4417 void *reply_buf = NULL;
4418 size_t len = 0;
4419 char *image_name = NULL;
4420 int ret;
4421
4422 rbd_assert(!rbd_dev->spec->image_name);
4423
4424 len = strlen(rbd_dev->spec->image_id);
4425 image_id_size = sizeof (__le32) + len;
4426 image_id = kmalloc(image_id_size, GFP_KERNEL);
4427 if (!image_id)
4428 return NULL;
4429
4430 p = image_id;
4431 end = image_id + image_id_size;
4432 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4433
4434 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4435 reply_buf = kmalloc(size, GFP_KERNEL);
4436 if (!reply_buf)
4437 goto out;
4438
4439 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4440 "rbd", "dir_get_name",
4441 image_id, image_id_size,
4442 reply_buf, size);
4443 if (ret < 0)
4444 goto out;
4445 p = reply_buf;
4446 end = reply_buf + ret;
4447
4448 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4449 if (IS_ERR(image_name))
4450 image_name = NULL;
4451 else
4452 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4453 out:
4454 kfree(reply_buf);
4455 kfree(image_id);
4456
4457 return image_name;
4458 }
4459
4460 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4461 {
4462 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4463 const char *snap_name;
4464 u32 which = 0;
4465
4466 /* Skip over names until we find the one we are looking for */
4467
4468 snap_name = rbd_dev->header.snap_names;
4469 while (which < snapc->num_snaps) {
4470 if (!strcmp(name, snap_name))
4471 return snapc->snaps[which];
4472 snap_name += strlen(snap_name) + 1;
4473 which++;
4474 }
4475 return CEPH_NOSNAP;
4476 }
4477
4478 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4479 {
4480 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4481 u32 which;
4482 bool found = false;
4483 u64 snap_id;
4484
4485 for (which = 0; !found && which < snapc->num_snaps; which++) {
4486 const char *snap_name;
4487
4488 snap_id = snapc->snaps[which];
4489 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4490 if (IS_ERR(snap_name)) {
4491 /* ignore no-longer existing snapshots */
4492 if (PTR_ERR(snap_name) == -ENOENT)
4493 continue;
4494 else
4495 break;
4496 }
4497 found = !strcmp(name, snap_name);
4498 kfree(snap_name);
4499 }
4500 return found ? snap_id : CEPH_NOSNAP;
4501 }
4502
4503 /*
4504 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4505 * no snapshot by that name is found, or if an error occurs.
4506 */
4507 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4508 {
4509 if (rbd_dev->image_format == 1)
4510 return rbd_v1_snap_id_by_name(rbd_dev, name);
4511
4512 return rbd_v2_snap_id_by_name(rbd_dev, name);
4513 }
4514
4515 /*
4516 * An image being mapped will have everything but the snap id.
4517 */
4518 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4519 {
4520 struct rbd_spec *spec = rbd_dev->spec;
4521
4522 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4523 rbd_assert(spec->image_id && spec->image_name);
4524 rbd_assert(spec->snap_name);
4525
4526 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4527 u64 snap_id;
4528
4529 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4530 if (snap_id == CEPH_NOSNAP)
4531 return -ENOENT;
4532
4533 spec->snap_id = snap_id;
4534 } else {
4535 spec->snap_id = CEPH_NOSNAP;
4536 }
4537
4538 return 0;
4539 }
4540
4541 /*
4542 * A parent image will have all ids but none of the names.
4543 *
4544 * All names in an rbd spec are dynamically allocated. It's OK if we
4545 * can't figure out the name for an image id.
4546 */
4547 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4548 {
4549 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4550 struct rbd_spec *spec = rbd_dev->spec;
4551 const char *pool_name;
4552 const char *image_name;
4553 const char *snap_name;
4554 int ret;
4555
4556 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4557 rbd_assert(spec->image_id);
4558 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4559
4560 /* Get the pool name; we have to make our own copy of this */
4561
4562 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4563 if (!pool_name) {
4564 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4565 return -EIO;
4566 }
4567 pool_name = kstrdup(pool_name, GFP_KERNEL);
4568 if (!pool_name)
4569 return -ENOMEM;
4570
4571 /* Fetch the image name; tolerate failure here */
4572
4573 image_name = rbd_dev_image_name(rbd_dev);
4574 if (!image_name)
4575 rbd_warn(rbd_dev, "unable to get image name");
4576
4577 /* Fetch the snapshot name */
4578
4579 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4580 if (IS_ERR(snap_name)) {
4581 ret = PTR_ERR(snap_name);
4582 goto out_err;
4583 }
4584
4585 spec->pool_name = pool_name;
4586 spec->image_name = image_name;
4587 spec->snap_name = snap_name;
4588
4589 return 0;
4590
4591 out_err:
4592 kfree(image_name);
4593 kfree(pool_name);
4594 return ret;
4595 }
4596
4597 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4598 {
4599 size_t size;
4600 int ret;
4601 void *reply_buf;
4602 void *p;
4603 void *end;
4604 u64 seq;
4605 u32 snap_count;
4606 struct ceph_snap_context *snapc;
4607 u32 i;
4608
4609 /*
4610 * We'll need room for the seq value (maximum snapshot id),
4611 * snapshot count, and array of that many snapshot ids.
4612 * For now we have a fixed upper limit on the number we're
4613 * prepared to receive.
4614 */
4615 size = sizeof (__le64) + sizeof (__le32) +
4616 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4617 reply_buf = kzalloc(size, GFP_KERNEL);
4618 if (!reply_buf)
4619 return -ENOMEM;
4620
4621 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4622 "rbd", "get_snapcontext", NULL, 0,
4623 reply_buf, size);
4624 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4625 if (ret < 0)
4626 goto out;
4627
4628 p = reply_buf;
4629 end = reply_buf + ret;
4630 ret = -ERANGE;
4631 ceph_decode_64_safe(&p, end, seq, out);
4632 ceph_decode_32_safe(&p, end, snap_count, out);
4633
4634 /*
4635 * Make sure the reported number of snapshot ids wouldn't go
4636 * beyond the end of our buffer. But before checking that,
4637 * make sure the computed size of the snapshot context we
4638 * allocate is representable in a size_t.
4639 */
4640 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4641 / sizeof (u64)) {
4642 ret = -EINVAL;
4643 goto out;
4644 }
4645 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4646 goto out;
4647 ret = 0;
4648
4649 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4650 if (!snapc) {
4651 ret = -ENOMEM;
4652 goto out;
4653 }
4654 snapc->seq = seq;
4655 for (i = 0; i < snap_count; i++)
4656 snapc->snaps[i] = ceph_decode_64(&p);
4657
4658 ceph_put_snap_context(rbd_dev->header.snapc);
4659 rbd_dev->header.snapc = snapc;
4660
4661 dout(" snap context seq = %llu, snap_count = %u\n",
4662 (unsigned long long)seq, (unsigned int)snap_count);
4663 out:
4664 kfree(reply_buf);
4665
4666 return ret;
4667 }
4668
4669 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4670 u64 snap_id)
4671 {
4672 size_t size;
4673 void *reply_buf;
4674 __le64 snapid;
4675 int ret;
4676 void *p;
4677 void *end;
4678 char *snap_name;
4679
4680 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4681 reply_buf = kmalloc(size, GFP_KERNEL);
4682 if (!reply_buf)
4683 return ERR_PTR(-ENOMEM);
4684
4685 snapid = cpu_to_le64(snap_id);
4686 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4687 "rbd", "get_snapshot_name",
4688 &snapid, sizeof (snapid),
4689 reply_buf, size);
4690 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4691 if (ret < 0) {
4692 snap_name = ERR_PTR(ret);
4693 goto out;
4694 }
4695
4696 p = reply_buf;
4697 end = reply_buf + ret;
4698 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4699 if (IS_ERR(snap_name))
4700 goto out;
4701
4702 dout(" snap_id 0x%016llx snap_name = %s\n",
4703 (unsigned long long)snap_id, snap_name);
4704 out:
4705 kfree(reply_buf);
4706
4707 return snap_name;
4708 }
4709
4710 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4711 {
4712 bool first_time = rbd_dev->header.object_prefix == NULL;
4713 int ret;
4714
4715 ret = rbd_dev_v2_image_size(rbd_dev);
4716 if (ret)
4717 return ret;
4718
4719 if (first_time) {
4720 ret = rbd_dev_v2_header_onetime(rbd_dev);
4721 if (ret)
4722 return ret;
4723 }
4724
4725 ret = rbd_dev_v2_snap_context(rbd_dev);
4726 if (ret && first_time) {
4727 kfree(rbd_dev->header.object_prefix);
4728 rbd_dev->header.object_prefix = NULL;
4729 }
4730
4731 return ret;
4732 }
4733
4734 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4735 {
4736 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4737
4738 if (rbd_dev->image_format == 1)
4739 return rbd_dev_v1_header_info(rbd_dev);
4740
4741 return rbd_dev_v2_header_info(rbd_dev);
4742 }
4743
4744 /*
4745 * Get a unique rbd identifier for the given new rbd_dev, and add
4746 * the rbd_dev to the global list.
4747 */
4748 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4749 {
4750 int new_dev_id;
4751
4752 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4753 0, minor_to_rbd_dev_id(1 << MINORBITS),
4754 GFP_KERNEL);
4755 if (new_dev_id < 0)
4756 return new_dev_id;
4757
4758 rbd_dev->dev_id = new_dev_id;
4759
4760 spin_lock(&rbd_dev_list_lock);
4761 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4762 spin_unlock(&rbd_dev_list_lock);
4763
4764 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4765
4766 return 0;
4767 }
4768
4769 /*
4770 * Remove an rbd_dev from the global list, and record that its
4771 * identifier is no longer in use.
4772 */
4773 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4774 {
4775 spin_lock(&rbd_dev_list_lock);
4776 list_del_init(&rbd_dev->node);
4777 spin_unlock(&rbd_dev_list_lock);
4778
4779 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4780
4781 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4782 }
4783
4784 /*
4785 * Skips over white space at *buf, and updates *buf to point to the
4786 * first found non-space character (if any). Returns the length of
4787 * the token (string of non-white space characters) found. Note
4788 * that *buf must be terminated with '\0'.
4789 */
4790 static inline size_t next_token(const char **buf)
4791 {
4792 /*
4793 * These are the characters that produce nonzero for
4794 * isspace() in the "C" and "POSIX" locales.
4795 */
4796 const char *spaces = " \f\n\r\t\v";
4797
4798 *buf += strspn(*buf, spaces); /* Find start of token */
4799
4800 return strcspn(*buf, spaces); /* Return token length */
4801 }
4802
4803 /*
4804 * Finds the next token in *buf, dynamically allocates a buffer big
4805 * enough to hold a copy of it, and copies the token into the new
4806 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4807 * that a duplicate buffer is created even for a zero-length token.
4808 *
4809 * Returns a pointer to the newly-allocated duplicate, or a null
4810 * pointer if memory for the duplicate was not available. If
4811 * the lenp argument is a non-null pointer, the length of the token
4812 * (not including the '\0') is returned in *lenp.
4813 *
4814 * If successful, the *buf pointer will be updated to point beyond
4815 * the end of the found token.
4816 *
4817 * Note: uses GFP_KERNEL for allocation.
4818 */
4819 static inline char *dup_token(const char **buf, size_t *lenp)
4820 {
4821 char *dup;
4822 size_t len;
4823
4824 len = next_token(buf);
4825 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4826 if (!dup)
4827 return NULL;
4828 *(dup + len) = '\0';
4829 *buf += len;
4830
4831 if (lenp)
4832 *lenp = len;
4833
4834 return dup;
4835 }
4836
4837 /*
4838 * Parse the options provided for an "rbd add" (i.e., rbd image
4839 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4840 * and the data written is passed here via a NUL-terminated buffer.
4841 * Returns 0 if successful or an error code otherwise.
4842 *
4843 * The information extracted from these options is recorded in
4844 * the other parameters which return dynamically-allocated
4845 * structures:
4846 * ceph_opts
4847 * The address of a pointer that will refer to a ceph options
4848 * structure. Caller must release the returned pointer using
4849 * ceph_destroy_options() when it is no longer needed.
4850 * rbd_opts
4851 * Address of an rbd options pointer. Fully initialized by
4852 * this function; caller must release with kfree().
4853 * spec
4854 * Address of an rbd image specification pointer. Fully
4855 * initialized by this function based on parsed options.
4856 * Caller must release with rbd_spec_put().
4857 *
4858 * The options passed take this form:
4859 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4860 * where:
4861 * <mon_addrs>
4862 * A comma-separated list of one or more monitor addresses.
4863 * A monitor address is an ip address, optionally followed
4864 * by a port number (separated by a colon).
4865 * I.e.: ip1[:port1][,ip2[:port2]...]
4866 * <options>
4867 * A comma-separated list of ceph and/or rbd options.
4868 * <pool_name>
4869 * The name of the rados pool containing the rbd image.
4870 * <image_name>
4871 * The name of the image in that pool to map.
4872 * <snap_id>
4873 * An optional snapshot id. If provided, the mapping will
4874 * present data from the image at the time that snapshot was
4875 * created. The image head is used if no snapshot id is
4876 * provided. Snapshot mappings are always read-only.
4877 */
4878 static int rbd_add_parse_args(const char *buf,
4879 struct ceph_options **ceph_opts,
4880 struct rbd_options **opts,
4881 struct rbd_spec **rbd_spec)
4882 {
4883 size_t len;
4884 char *options;
4885 const char *mon_addrs;
4886 char *snap_name;
4887 size_t mon_addrs_size;
4888 struct rbd_spec *spec = NULL;
4889 struct rbd_options *rbd_opts = NULL;
4890 struct ceph_options *copts;
4891 int ret;
4892
4893 /* The first four tokens are required */
4894
4895 len = next_token(&buf);
4896 if (!len) {
4897 rbd_warn(NULL, "no monitor address(es) provided");
4898 return -EINVAL;
4899 }
4900 mon_addrs = buf;
4901 mon_addrs_size = len + 1;
4902 buf += len;
4903
4904 ret = -EINVAL;
4905 options = dup_token(&buf, NULL);
4906 if (!options)
4907 return -ENOMEM;
4908 if (!*options) {
4909 rbd_warn(NULL, "no options provided");
4910 goto out_err;
4911 }
4912
4913 spec = rbd_spec_alloc();
4914 if (!spec)
4915 goto out_mem;
4916
4917 spec->pool_name = dup_token(&buf, NULL);
4918 if (!spec->pool_name)
4919 goto out_mem;
4920 if (!*spec->pool_name) {
4921 rbd_warn(NULL, "no pool name provided");
4922 goto out_err;
4923 }
4924
4925 spec->image_name = dup_token(&buf, NULL);
4926 if (!spec->image_name)
4927 goto out_mem;
4928 if (!*spec->image_name) {
4929 rbd_warn(NULL, "no image name provided");
4930 goto out_err;
4931 }
4932
4933 /*
4934 * Snapshot name is optional; default is to use "-"
4935 * (indicating the head/no snapshot).
4936 */
4937 len = next_token(&buf);
4938 if (!len) {
4939 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4940 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4941 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4942 ret = -ENAMETOOLONG;
4943 goto out_err;
4944 }
4945 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4946 if (!snap_name)
4947 goto out_mem;
4948 *(snap_name + len) = '\0';
4949 spec->snap_name = snap_name;
4950
4951 /* Initialize all rbd options to the defaults */
4952
4953 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4954 if (!rbd_opts)
4955 goto out_mem;
4956
4957 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4958 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
4959
4960 copts = ceph_parse_options(options, mon_addrs,
4961 mon_addrs + mon_addrs_size - 1,
4962 parse_rbd_opts_token, rbd_opts);
4963 if (IS_ERR(copts)) {
4964 ret = PTR_ERR(copts);
4965 goto out_err;
4966 }
4967 kfree(options);
4968
4969 *ceph_opts = copts;
4970 *opts = rbd_opts;
4971 *rbd_spec = spec;
4972
4973 return 0;
4974 out_mem:
4975 ret = -ENOMEM;
4976 out_err:
4977 kfree(rbd_opts);
4978 rbd_spec_put(spec);
4979 kfree(options);
4980
4981 return ret;
4982 }
4983
4984 /*
4985 * Return pool id (>= 0) or a negative error code.
4986 */
4987 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4988 {
4989 struct ceph_options *opts = rbdc->client->options;
4990 u64 newest_epoch;
4991 int tries = 0;
4992 int ret;
4993
4994 again:
4995 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4996 if (ret == -ENOENT && tries++ < 1) {
4997 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4998 &newest_epoch);
4999 if (ret < 0)
5000 return ret;
5001
5002 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5003 ceph_monc_request_next_osdmap(&rbdc->client->monc);
5004 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5005 newest_epoch,
5006 opts->mount_timeout);
5007 goto again;
5008 } else {
5009 /* the osdmap we have is new enough */
5010 return -ENOENT;
5011 }
5012 }
5013
5014 return ret;
5015 }
5016
5017 /*
5018 * An rbd format 2 image has a unique identifier, distinct from the
5019 * name given to it by the user. Internally, that identifier is
5020 * what's used to specify the names of objects related to the image.
5021 *
5022 * A special "rbd id" object is used to map an rbd image name to its
5023 * id. If that object doesn't exist, then there is no v2 rbd image
5024 * with the supplied name.
5025 *
5026 * This function will record the given rbd_dev's image_id field if
5027 * it can be determined, and in that case will return 0. If any
5028 * errors occur a negative errno will be returned and the rbd_dev's
5029 * image_id field will be unchanged (and should be NULL).
5030 */
5031 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5032 {
5033 int ret;
5034 size_t size;
5035 char *object_name;
5036 void *response;
5037 char *image_id;
5038
5039 /*
5040 * When probing a parent image, the image id is already
5041 * known (and the image name likely is not). There's no
5042 * need to fetch the image id again in this case. We
5043 * do still need to set the image format though.
5044 */
5045 if (rbd_dev->spec->image_id) {
5046 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5047
5048 return 0;
5049 }
5050
5051 /*
5052 * First, see if the format 2 image id file exists, and if
5053 * so, get the image's persistent id from it.
5054 */
5055 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5056 object_name = kmalloc(size, GFP_NOIO);
5057 if (!object_name)
5058 return -ENOMEM;
5059 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5060 dout("rbd id object name is %s\n", object_name);
5061
5062 /* Response will be an encoded string, which includes a length */
5063
5064 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5065 response = kzalloc(size, GFP_NOIO);
5066 if (!response) {
5067 ret = -ENOMEM;
5068 goto out;
5069 }
5070
5071 /* If it doesn't exist we'll assume it's a format 1 image */
5072
5073 ret = rbd_obj_method_sync(rbd_dev, object_name,
5074 "rbd", "get_id", NULL, 0,
5075 response, RBD_IMAGE_ID_LEN_MAX);
5076 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5077 if (ret == -ENOENT) {
5078 image_id = kstrdup("", GFP_KERNEL);
5079 ret = image_id ? 0 : -ENOMEM;
5080 if (!ret)
5081 rbd_dev->image_format = 1;
5082 } else if (ret >= 0) {
5083 void *p = response;
5084
5085 image_id = ceph_extract_encoded_string(&p, p + ret,
5086 NULL, GFP_NOIO);
5087 ret = PTR_ERR_OR_ZERO(image_id);
5088 if (!ret)
5089 rbd_dev->image_format = 2;
5090 }
5091
5092 if (!ret) {
5093 rbd_dev->spec->image_id = image_id;
5094 dout("image_id is %s\n", image_id);
5095 }
5096 out:
5097 kfree(response);
5098 kfree(object_name);
5099
5100 return ret;
5101 }
5102
5103 /*
5104 * Undo whatever state changes are made by v1 or v2 header info
5105 * call.
5106 */
5107 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5108 {
5109 struct rbd_image_header *header;
5110
5111 rbd_dev_parent_put(rbd_dev);
5112
5113 /* Free dynamic fields from the header, then zero it out */
5114
5115 header = &rbd_dev->header;
5116 ceph_put_snap_context(header->snapc);
5117 kfree(header->snap_sizes);
5118 kfree(header->snap_names);
5119 kfree(header->object_prefix);
5120 memset(header, 0, sizeof (*header));
5121 }
5122
5123 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5124 {
5125 int ret;
5126
5127 ret = rbd_dev_v2_object_prefix(rbd_dev);
5128 if (ret)
5129 goto out_err;
5130
5131 /*
5132 * Get the and check features for the image. Currently the
5133 * features are assumed to never change.
5134 */
5135 ret = rbd_dev_v2_features(rbd_dev);
5136 if (ret)
5137 goto out_err;
5138
5139 /* If the image supports fancy striping, get its parameters */
5140
5141 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5142 ret = rbd_dev_v2_striping_info(rbd_dev);
5143 if (ret < 0)
5144 goto out_err;
5145 }
5146 /* No support for crypto and compression type format 2 images */
5147
5148 return 0;
5149 out_err:
5150 rbd_dev->header.features = 0;
5151 kfree(rbd_dev->header.object_prefix);
5152 rbd_dev->header.object_prefix = NULL;
5153
5154 return ret;
5155 }
5156
5157 /*
5158 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5159 * rbd_dev_image_probe() recursion depth, which means it's also the
5160 * length of the already discovered part of the parent chain.
5161 */
5162 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5163 {
5164 struct rbd_device *parent = NULL;
5165 int ret;
5166
5167 if (!rbd_dev->parent_spec)
5168 return 0;
5169
5170 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5171 pr_info("parent chain is too long (%d)\n", depth);
5172 ret = -EINVAL;
5173 goto out_err;
5174 }
5175
5176 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5177 NULL);
5178 if (!parent) {
5179 ret = -ENOMEM;
5180 goto out_err;
5181 }
5182
5183 /*
5184 * Images related by parent/child relationships always share
5185 * rbd_client and spec/parent_spec, so bump their refcounts.
5186 */
5187 __rbd_get_client(rbd_dev->rbd_client);
5188 rbd_spec_get(rbd_dev->parent_spec);
5189
5190 ret = rbd_dev_image_probe(parent, depth);
5191 if (ret < 0)
5192 goto out_err;
5193
5194 rbd_dev->parent = parent;
5195 atomic_set(&rbd_dev->parent_ref, 1);
5196 return 0;
5197
5198 out_err:
5199 rbd_dev_unparent(rbd_dev);
5200 rbd_dev_destroy(parent);
5201 return ret;
5202 }
5203
5204 /*
5205 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5206 * upon return.
5207 */
5208 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5209 {
5210 int ret;
5211
5212 /* Get an id and fill in device name. */
5213
5214 ret = rbd_dev_id_get(rbd_dev);
5215 if (ret)
5216 goto err_out_unlock;
5217
5218 BUILD_BUG_ON(DEV_NAME_LEN
5219 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5220 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5221
5222 /* Record our major and minor device numbers. */
5223
5224 if (!single_major) {
5225 ret = register_blkdev(0, rbd_dev->name);
5226 if (ret < 0)
5227 goto err_out_id;
5228
5229 rbd_dev->major = ret;
5230 rbd_dev->minor = 0;
5231 } else {
5232 rbd_dev->major = rbd_major;
5233 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5234 }
5235
5236 /* Set up the blkdev mapping. */
5237
5238 ret = rbd_init_disk(rbd_dev);
5239 if (ret)
5240 goto err_out_blkdev;
5241
5242 ret = rbd_dev_mapping_set(rbd_dev);
5243 if (ret)
5244 goto err_out_disk;
5245
5246 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5247 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5248
5249 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5250 ret = device_add(&rbd_dev->dev);
5251 if (ret)
5252 goto err_out_mapping;
5253
5254 /* Everything's ready. Announce the disk to the world. */
5255
5256 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5257 up_write(&rbd_dev->header_rwsem);
5258
5259 add_disk(rbd_dev->disk);
5260 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5261 (unsigned long long) rbd_dev->mapping.size);
5262
5263 return ret;
5264
5265 err_out_mapping:
5266 rbd_dev_mapping_clear(rbd_dev);
5267 err_out_disk:
5268 rbd_free_disk(rbd_dev);
5269 err_out_blkdev:
5270 if (!single_major)
5271 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5272 err_out_id:
5273 rbd_dev_id_put(rbd_dev);
5274 err_out_unlock:
5275 up_write(&rbd_dev->header_rwsem);
5276 return ret;
5277 }
5278
5279 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5280 {
5281 struct rbd_spec *spec = rbd_dev->spec;
5282 int ret;
5283
5284 /* Record the header object name for this rbd image. */
5285
5286 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5287
5288 if (rbd_dev->image_format == 1)
5289 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5290 spec->image_name, RBD_SUFFIX);
5291 else
5292 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5293 RBD_HEADER_PREFIX, spec->image_id);
5294
5295 return ret;
5296 }
5297
5298 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5299 {
5300 rbd_dev_unprobe(rbd_dev);
5301 rbd_dev->image_format = 0;
5302 kfree(rbd_dev->spec->image_id);
5303 rbd_dev->spec->image_id = NULL;
5304
5305 rbd_dev_destroy(rbd_dev);
5306 }
5307
5308 /*
5309 * Probe for the existence of the header object for the given rbd
5310 * device. If this image is the one being mapped (i.e., not a
5311 * parent), initiate a watch on its header object before using that
5312 * object to get detailed information about the rbd image.
5313 */
5314 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5315 {
5316 int ret;
5317
5318 /*
5319 * Get the id from the image id object. Unless there's an
5320 * error, rbd_dev->spec->image_id will be filled in with
5321 * a dynamically-allocated string, and rbd_dev->image_format
5322 * will be set to either 1 or 2.
5323 */
5324 ret = rbd_dev_image_id(rbd_dev);
5325 if (ret)
5326 return ret;
5327
5328 ret = rbd_dev_header_name(rbd_dev);
5329 if (ret)
5330 goto err_out_format;
5331
5332 if (!depth) {
5333 ret = rbd_dev_header_watch_sync(rbd_dev);
5334 if (ret) {
5335 if (ret == -ENOENT)
5336 pr_info("image %s/%s does not exist\n",
5337 rbd_dev->spec->pool_name,
5338 rbd_dev->spec->image_name);
5339 goto err_out_format;
5340 }
5341 }
5342
5343 ret = rbd_dev_header_info(rbd_dev);
5344 if (ret)
5345 goto err_out_watch;
5346
5347 /*
5348 * If this image is the one being mapped, we have pool name and
5349 * id, image name and id, and snap name - need to fill snap id.
5350 * Otherwise this is a parent image, identified by pool, image
5351 * and snap ids - need to fill in names for those ids.
5352 */
5353 if (!depth)
5354 ret = rbd_spec_fill_snap_id(rbd_dev);
5355 else
5356 ret = rbd_spec_fill_names(rbd_dev);
5357 if (ret) {
5358 if (ret == -ENOENT)
5359 pr_info("snap %s/%s@%s does not exist\n",
5360 rbd_dev->spec->pool_name,
5361 rbd_dev->spec->image_name,
5362 rbd_dev->spec->snap_name);
5363 goto err_out_probe;
5364 }
5365
5366 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5367 ret = rbd_dev_v2_parent_info(rbd_dev);
5368 if (ret)
5369 goto err_out_probe;
5370
5371 /*
5372 * Need to warn users if this image is the one being
5373 * mapped and has a parent.
5374 */
5375 if (!depth && rbd_dev->parent_spec)
5376 rbd_warn(rbd_dev,
5377 "WARNING: kernel layering is EXPERIMENTAL!");
5378 }
5379
5380 ret = rbd_dev_probe_parent(rbd_dev, depth);
5381 if (ret)
5382 goto err_out_probe;
5383
5384 dout("discovered format %u image, header name is %s\n",
5385 rbd_dev->image_format, rbd_dev->header_oid.name);
5386 return 0;
5387
5388 err_out_probe:
5389 rbd_dev_unprobe(rbd_dev);
5390 err_out_watch:
5391 if (!depth)
5392 rbd_dev_header_unwatch_sync(rbd_dev);
5393 err_out_format:
5394 rbd_dev->image_format = 0;
5395 kfree(rbd_dev->spec->image_id);
5396 rbd_dev->spec->image_id = NULL;
5397 return ret;
5398 }
5399
5400 static ssize_t do_rbd_add(struct bus_type *bus,
5401 const char *buf,
5402 size_t count)
5403 {
5404 struct rbd_device *rbd_dev = NULL;
5405 struct ceph_options *ceph_opts = NULL;
5406 struct rbd_options *rbd_opts = NULL;
5407 struct rbd_spec *spec = NULL;
5408 struct rbd_client *rbdc;
5409 bool read_only;
5410 int rc;
5411
5412 if (!try_module_get(THIS_MODULE))
5413 return -ENODEV;
5414
5415 /* parse add command */
5416 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5417 if (rc < 0)
5418 goto out;
5419
5420 rbdc = rbd_get_client(ceph_opts);
5421 if (IS_ERR(rbdc)) {
5422 rc = PTR_ERR(rbdc);
5423 goto err_out_args;
5424 }
5425
5426 /* pick the pool */
5427 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5428 if (rc < 0) {
5429 if (rc == -ENOENT)
5430 pr_info("pool %s does not exist\n", spec->pool_name);
5431 goto err_out_client;
5432 }
5433 spec->pool_id = (u64)rc;
5434
5435 /* The ceph file layout needs to fit pool id in 32 bits */
5436
5437 if (spec->pool_id > (u64)U32_MAX) {
5438 rbd_warn(NULL, "pool id too large (%llu > %u)",
5439 (unsigned long long)spec->pool_id, U32_MAX);
5440 rc = -EIO;
5441 goto err_out_client;
5442 }
5443
5444 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5445 if (!rbd_dev) {
5446 rc = -ENOMEM;
5447 goto err_out_client;
5448 }
5449 rbdc = NULL; /* rbd_dev now owns this */
5450 spec = NULL; /* rbd_dev now owns this */
5451 rbd_opts = NULL; /* rbd_dev now owns this */
5452
5453 down_write(&rbd_dev->header_rwsem);
5454 rc = rbd_dev_image_probe(rbd_dev, 0);
5455 if (rc < 0)
5456 goto err_out_rbd_dev;
5457
5458 /* If we are mapping a snapshot it must be marked read-only */
5459
5460 read_only = rbd_dev->opts->read_only;
5461 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5462 read_only = true;
5463 rbd_dev->mapping.read_only = read_only;
5464
5465 rc = rbd_dev_device_setup(rbd_dev);
5466 if (rc) {
5467 /*
5468 * rbd_dev_header_unwatch_sync() can't be moved into
5469 * rbd_dev_image_release() without refactoring, see
5470 * commit 1f3ef78861ac.
5471 */
5472 rbd_dev_header_unwatch_sync(rbd_dev);
5473 rbd_dev_image_release(rbd_dev);
5474 goto out;
5475 }
5476
5477 rc = count;
5478 out:
5479 module_put(THIS_MODULE);
5480 return rc;
5481
5482 err_out_rbd_dev:
5483 up_write(&rbd_dev->header_rwsem);
5484 rbd_dev_destroy(rbd_dev);
5485 err_out_client:
5486 rbd_put_client(rbdc);
5487 err_out_args:
5488 rbd_spec_put(spec);
5489 kfree(rbd_opts);
5490 goto out;
5491 }
5492
5493 static ssize_t rbd_add(struct bus_type *bus,
5494 const char *buf,
5495 size_t count)
5496 {
5497 if (single_major)
5498 return -EINVAL;
5499
5500 return do_rbd_add(bus, buf, count);
5501 }
5502
5503 static ssize_t rbd_add_single_major(struct bus_type *bus,
5504 const char *buf,
5505 size_t count)
5506 {
5507 return do_rbd_add(bus, buf, count);
5508 }
5509
5510 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5511 {
5512 rbd_free_disk(rbd_dev);
5513 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5514 device_del(&rbd_dev->dev);
5515 rbd_dev_mapping_clear(rbd_dev);
5516 if (!single_major)
5517 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5518 rbd_dev_id_put(rbd_dev);
5519 }
5520
5521 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5522 {
5523 while (rbd_dev->parent) {
5524 struct rbd_device *first = rbd_dev;
5525 struct rbd_device *second = first->parent;
5526 struct rbd_device *third;
5527
5528 /*
5529 * Follow to the parent with no grandparent and
5530 * remove it.
5531 */
5532 while (second && (third = second->parent)) {
5533 first = second;
5534 second = third;
5535 }
5536 rbd_assert(second);
5537 rbd_dev_image_release(second);
5538 first->parent = NULL;
5539 first->parent_overlap = 0;
5540
5541 rbd_assert(first->parent_spec);
5542 rbd_spec_put(first->parent_spec);
5543 first->parent_spec = NULL;
5544 }
5545 }
5546
5547 static ssize_t do_rbd_remove(struct bus_type *bus,
5548 const char *buf,
5549 size_t count)
5550 {
5551 struct rbd_device *rbd_dev = NULL;
5552 struct list_head *tmp;
5553 int dev_id;
5554 unsigned long ul;
5555 bool already = false;
5556 int ret;
5557
5558 ret = kstrtoul(buf, 10, &ul);
5559 if (ret)
5560 return ret;
5561
5562 /* convert to int; abort if we lost anything in the conversion */
5563 dev_id = (int)ul;
5564 if (dev_id != ul)
5565 return -EINVAL;
5566
5567 ret = -ENOENT;
5568 spin_lock(&rbd_dev_list_lock);
5569 list_for_each(tmp, &rbd_dev_list) {
5570 rbd_dev = list_entry(tmp, struct rbd_device, node);
5571 if (rbd_dev->dev_id == dev_id) {
5572 ret = 0;
5573 break;
5574 }
5575 }
5576 if (!ret) {
5577 spin_lock_irq(&rbd_dev->lock);
5578 if (rbd_dev->open_count)
5579 ret = -EBUSY;
5580 else
5581 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5582 &rbd_dev->flags);
5583 spin_unlock_irq(&rbd_dev->lock);
5584 }
5585 spin_unlock(&rbd_dev_list_lock);
5586 if (ret < 0 || already)
5587 return ret;
5588
5589 rbd_dev_header_unwatch_sync(rbd_dev);
5590
5591 /*
5592 * Don't free anything from rbd_dev->disk until after all
5593 * notifies are completely processed. Otherwise
5594 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5595 * in a potential use after free of rbd_dev->disk or rbd_dev.
5596 */
5597 rbd_dev_device_release(rbd_dev);
5598 rbd_dev_image_release(rbd_dev);
5599
5600 return count;
5601 }
5602
5603 static ssize_t rbd_remove(struct bus_type *bus,
5604 const char *buf,
5605 size_t count)
5606 {
5607 if (single_major)
5608 return -EINVAL;
5609
5610 return do_rbd_remove(bus, buf, count);
5611 }
5612
5613 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5614 const char *buf,
5615 size_t count)
5616 {
5617 return do_rbd_remove(bus, buf, count);
5618 }
5619
5620 /*
5621 * create control files in sysfs
5622 * /sys/bus/rbd/...
5623 */
5624 static int rbd_sysfs_init(void)
5625 {
5626 int ret;
5627
5628 ret = device_register(&rbd_root_dev);
5629 if (ret < 0)
5630 return ret;
5631
5632 ret = bus_register(&rbd_bus_type);
5633 if (ret < 0)
5634 device_unregister(&rbd_root_dev);
5635
5636 return ret;
5637 }
5638
5639 static void rbd_sysfs_cleanup(void)
5640 {
5641 bus_unregister(&rbd_bus_type);
5642 device_unregister(&rbd_root_dev);
5643 }
5644
5645 static int rbd_slab_init(void)
5646 {
5647 rbd_assert(!rbd_img_request_cache);
5648 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
5649 if (!rbd_img_request_cache)
5650 return -ENOMEM;
5651
5652 rbd_assert(!rbd_obj_request_cache);
5653 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
5654 if (!rbd_obj_request_cache)
5655 goto out_err;
5656
5657 rbd_assert(!rbd_segment_name_cache);
5658 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5659 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5660 if (rbd_segment_name_cache)
5661 return 0;
5662 out_err:
5663 kmem_cache_destroy(rbd_obj_request_cache);
5664 rbd_obj_request_cache = NULL;
5665
5666 kmem_cache_destroy(rbd_img_request_cache);
5667 rbd_img_request_cache = NULL;
5668
5669 return -ENOMEM;
5670 }
5671
5672 static void rbd_slab_exit(void)
5673 {
5674 rbd_assert(rbd_segment_name_cache);
5675 kmem_cache_destroy(rbd_segment_name_cache);
5676 rbd_segment_name_cache = NULL;
5677
5678 rbd_assert(rbd_obj_request_cache);
5679 kmem_cache_destroy(rbd_obj_request_cache);
5680 rbd_obj_request_cache = NULL;
5681
5682 rbd_assert(rbd_img_request_cache);
5683 kmem_cache_destroy(rbd_img_request_cache);
5684 rbd_img_request_cache = NULL;
5685 }
5686
5687 static int __init rbd_init(void)
5688 {
5689 int rc;
5690
5691 if (!libceph_compatible(NULL)) {
5692 rbd_warn(NULL, "libceph incompatibility (quitting)");
5693 return -EINVAL;
5694 }
5695
5696 rc = rbd_slab_init();
5697 if (rc)
5698 return rc;
5699
5700 /*
5701 * The number of active work items is limited by the number of
5702 * rbd devices * queue depth, so leave @max_active at default.
5703 */
5704 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5705 if (!rbd_wq) {
5706 rc = -ENOMEM;
5707 goto err_out_slab;
5708 }
5709
5710 if (single_major) {
5711 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5712 if (rbd_major < 0) {
5713 rc = rbd_major;
5714 goto err_out_wq;
5715 }
5716 }
5717
5718 rc = rbd_sysfs_init();
5719 if (rc)
5720 goto err_out_blkdev;
5721
5722 if (single_major)
5723 pr_info("loaded (major %d)\n", rbd_major);
5724 else
5725 pr_info("loaded\n");
5726
5727 return 0;
5728
5729 err_out_blkdev:
5730 if (single_major)
5731 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5732 err_out_wq:
5733 destroy_workqueue(rbd_wq);
5734 err_out_slab:
5735 rbd_slab_exit();
5736 return rc;
5737 }
5738
5739 static void __exit rbd_exit(void)
5740 {
5741 ida_destroy(&rbd_dev_id_ida);
5742 rbd_sysfs_cleanup();
5743 if (single_major)
5744 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5745 destroy_workqueue(rbd_wq);
5746 rbd_slab_exit();
5747 }
5748
5749 module_init(rbd_init);
5750 module_exit(rbd_exit);
5751
5752 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5753 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5754 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5755 /* following authorship retained from original osdblk.c */
5756 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5757
5758 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5759 MODULE_LICENSE("GPL");
This page took 0.139323 seconds and 6 git commands to generate.