rbd: kill create_snap sysfs entry
[deliverable/linux.git] / drivers / block / rbd.c
1 /*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
24 For usage instructions, please refer to:
25
26 Documentation/ABI/testing/sysfs-bus-rbd
27
28 */
29
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
35
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
39 #include <linux/fs.h>
40 #include <linux/blkdev.h>
41
42 #include "rbd_types.h"
43
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
45
46 /*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
55 /* It might be useful to have this defined elsewhere too */
56
57 #define U64_MAX ((u64) (~0ULL))
58
59 #define RBD_DRV_NAME "rbd"
60 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
61
62 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
63
64 #define RBD_MAX_SNAP_NAME_LEN 32
65 #define RBD_MAX_OPT_LEN 1024
66
67 #define RBD_SNAP_HEAD_NAME "-"
68
69 #define RBD_IMAGE_ID_LEN_MAX 64
70
71 /*
72 * An RBD device name will be "rbd#", where the "rbd" comes from
73 * RBD_DRV_NAME above, and # is a unique integer identifier.
74 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
75 * enough to hold all possible device names.
76 */
77 #define DEV_NAME_LEN 32
78 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
79
80 #define RBD_READ_ONLY_DEFAULT false
81
82 /*
83 * block device image metadata (in-memory version)
84 */
85 struct rbd_image_header {
86 /* These four fields never change for a given rbd image */
87 char *object_prefix;
88 __u8 obj_order;
89 __u8 crypt_type;
90 __u8 comp_type;
91
92 /* The remaining fields need to be updated occasionally */
93 u64 image_size;
94 struct ceph_snap_context *snapc;
95 char *snap_names;
96 u64 *snap_sizes;
97
98 u64 obj_version;
99 };
100
101 struct rbd_options {
102 bool read_only;
103 };
104
105 /*
106 * an instance of the client. multiple devices may share an rbd client.
107 */
108 struct rbd_client {
109 struct ceph_client *client;
110 struct kref kref;
111 struct list_head node;
112 };
113
114 /*
115 * a request completion status
116 */
117 struct rbd_req_status {
118 int done;
119 int rc;
120 u64 bytes;
121 };
122
123 /*
124 * a collection of requests
125 */
126 struct rbd_req_coll {
127 int total;
128 int num_done;
129 struct kref kref;
130 struct rbd_req_status status[0];
131 };
132
133 /*
134 * a single io request
135 */
136 struct rbd_request {
137 struct request *rq; /* blk layer request */
138 struct bio *bio; /* cloned bio */
139 struct page **pages; /* list of used pages */
140 u64 len;
141 int coll_index;
142 struct rbd_req_coll *coll;
143 };
144
145 struct rbd_snap {
146 struct device dev;
147 const char *name;
148 u64 size;
149 struct list_head node;
150 u64 id;
151 };
152
153 struct rbd_mapping {
154 char *snap_name;
155 u64 snap_id;
156 u64 size;
157 bool snap_exists;
158 bool read_only;
159 };
160
161 /*
162 * a single device
163 */
164 struct rbd_device {
165 int dev_id; /* blkdev unique id */
166
167 int major; /* blkdev assigned major */
168 struct gendisk *disk; /* blkdev's gendisk and rq */
169
170 struct rbd_options rbd_opts;
171 struct rbd_client *rbd_client;
172
173 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
174
175 spinlock_t lock; /* queue lock */
176
177 struct rbd_image_header header;
178 char *image_id;
179 size_t image_id_len;
180 char *image_name;
181 size_t image_name_len;
182 char *header_name;
183 char *pool_name;
184 int pool_id;
185
186 struct ceph_osd_event *watch_event;
187 struct ceph_osd_request *watch_request;
188
189 /* protects updating the header */
190 struct rw_semaphore header_rwsem;
191
192 struct rbd_mapping mapping;
193
194 struct list_head node;
195
196 /* list of snapshots */
197 struct list_head snaps;
198
199 /* sysfs related */
200 struct device dev;
201 };
202
203 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
204
205 static LIST_HEAD(rbd_dev_list); /* devices */
206 static DEFINE_SPINLOCK(rbd_dev_list_lock);
207
208 static LIST_HEAD(rbd_client_list); /* clients */
209 static DEFINE_SPINLOCK(rbd_client_list_lock);
210
211 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
212 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
213
214 static void rbd_dev_release(struct device *dev);
215 static void __rbd_remove_snap_dev(struct rbd_snap *snap);
216
217 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
218 size_t count);
219 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
220 size_t count);
221
222 static struct bus_attribute rbd_bus_attrs[] = {
223 __ATTR(add, S_IWUSR, NULL, rbd_add),
224 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
225 __ATTR_NULL
226 };
227
228 static struct bus_type rbd_bus_type = {
229 .name = "rbd",
230 .bus_attrs = rbd_bus_attrs,
231 };
232
233 static void rbd_root_dev_release(struct device *dev)
234 {
235 }
236
237 static struct device rbd_root_dev = {
238 .init_name = "rbd",
239 .release = rbd_root_dev_release,
240 };
241
242 #ifdef RBD_DEBUG
243 #define rbd_assert(expr) \
244 if (unlikely(!(expr))) { \
245 printk(KERN_ERR "\nAssertion failure in %s() " \
246 "at line %d:\n\n" \
247 "\trbd_assert(%s);\n\n", \
248 __func__, __LINE__, #expr); \
249 BUG(); \
250 }
251 #else /* !RBD_DEBUG */
252 # define rbd_assert(expr) ((void) 0)
253 #endif /* !RBD_DEBUG */
254
255 static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
256 {
257 return get_device(&rbd_dev->dev);
258 }
259
260 static void rbd_put_dev(struct rbd_device *rbd_dev)
261 {
262 put_device(&rbd_dev->dev);
263 }
264
265 static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
266
267 static int rbd_open(struct block_device *bdev, fmode_t mode)
268 {
269 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
270
271 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
272 return -EROFS;
273
274 rbd_get_dev(rbd_dev);
275 set_device_ro(bdev, rbd_dev->mapping.read_only);
276
277 return 0;
278 }
279
280 static int rbd_release(struct gendisk *disk, fmode_t mode)
281 {
282 struct rbd_device *rbd_dev = disk->private_data;
283
284 rbd_put_dev(rbd_dev);
285
286 return 0;
287 }
288
289 static const struct block_device_operations rbd_bd_ops = {
290 .owner = THIS_MODULE,
291 .open = rbd_open,
292 .release = rbd_release,
293 };
294
295 /*
296 * Initialize an rbd client instance.
297 * We own *ceph_opts.
298 */
299 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
300 {
301 struct rbd_client *rbdc;
302 int ret = -ENOMEM;
303
304 dout("rbd_client_create\n");
305 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
306 if (!rbdc)
307 goto out_opt;
308
309 kref_init(&rbdc->kref);
310 INIT_LIST_HEAD(&rbdc->node);
311
312 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
313
314 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
315 if (IS_ERR(rbdc->client))
316 goto out_mutex;
317 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
318
319 ret = ceph_open_session(rbdc->client);
320 if (ret < 0)
321 goto out_err;
322
323 spin_lock(&rbd_client_list_lock);
324 list_add_tail(&rbdc->node, &rbd_client_list);
325 spin_unlock(&rbd_client_list_lock);
326
327 mutex_unlock(&ctl_mutex);
328
329 dout("rbd_client_create created %p\n", rbdc);
330 return rbdc;
331
332 out_err:
333 ceph_destroy_client(rbdc->client);
334 out_mutex:
335 mutex_unlock(&ctl_mutex);
336 kfree(rbdc);
337 out_opt:
338 if (ceph_opts)
339 ceph_destroy_options(ceph_opts);
340 return ERR_PTR(ret);
341 }
342
343 /*
344 * Find a ceph client with specific addr and configuration. If
345 * found, bump its reference count.
346 */
347 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
348 {
349 struct rbd_client *client_node;
350 bool found = false;
351
352 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
353 return NULL;
354
355 spin_lock(&rbd_client_list_lock);
356 list_for_each_entry(client_node, &rbd_client_list, node) {
357 if (!ceph_compare_options(ceph_opts, client_node->client)) {
358 kref_get(&client_node->kref);
359 found = true;
360 break;
361 }
362 }
363 spin_unlock(&rbd_client_list_lock);
364
365 return found ? client_node : NULL;
366 }
367
368 /*
369 * mount options
370 */
371 enum {
372 Opt_last_int,
373 /* int args above */
374 Opt_last_string,
375 /* string args above */
376 Opt_read_only,
377 Opt_read_write,
378 /* Boolean args above */
379 Opt_last_bool,
380 };
381
382 static match_table_t rbd_opts_tokens = {
383 /* int args above */
384 /* string args above */
385 {Opt_read_only, "mapping.read_only"},
386 {Opt_read_only, "ro"}, /* Alternate spelling */
387 {Opt_read_write, "read_write"},
388 {Opt_read_write, "rw"}, /* Alternate spelling */
389 /* Boolean args above */
390 {-1, NULL}
391 };
392
393 static int parse_rbd_opts_token(char *c, void *private)
394 {
395 struct rbd_options *rbd_opts = private;
396 substring_t argstr[MAX_OPT_ARGS];
397 int token, intval, ret;
398
399 token = match_token(c, rbd_opts_tokens, argstr);
400 if (token < 0)
401 return -EINVAL;
402
403 if (token < Opt_last_int) {
404 ret = match_int(&argstr[0], &intval);
405 if (ret < 0) {
406 pr_err("bad mount option arg (not int) "
407 "at '%s'\n", c);
408 return ret;
409 }
410 dout("got int token %d val %d\n", token, intval);
411 } else if (token > Opt_last_int && token < Opt_last_string) {
412 dout("got string token %d val %s\n", token,
413 argstr[0].from);
414 } else if (token > Opt_last_string && token < Opt_last_bool) {
415 dout("got Boolean token %d\n", token);
416 } else {
417 dout("got token %d\n", token);
418 }
419
420 switch (token) {
421 case Opt_read_only:
422 rbd_opts->read_only = true;
423 break;
424 case Opt_read_write:
425 rbd_opts->read_only = false;
426 break;
427 default:
428 rbd_assert(false);
429 break;
430 }
431 return 0;
432 }
433
434 /*
435 * Get a ceph client with specific addr and configuration, if one does
436 * not exist create it.
437 */
438 static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
439 size_t mon_addr_len, char *options)
440 {
441 struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
442 struct ceph_options *ceph_opts;
443 struct rbd_client *rbdc;
444
445 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
446
447 ceph_opts = ceph_parse_options(options, mon_addr,
448 mon_addr + mon_addr_len,
449 parse_rbd_opts_token, rbd_opts);
450 if (IS_ERR(ceph_opts))
451 return PTR_ERR(ceph_opts);
452
453 rbdc = rbd_client_find(ceph_opts);
454 if (rbdc) {
455 /* using an existing client */
456 ceph_destroy_options(ceph_opts);
457 } else {
458 rbdc = rbd_client_create(ceph_opts);
459 if (IS_ERR(rbdc))
460 return PTR_ERR(rbdc);
461 }
462 rbd_dev->rbd_client = rbdc;
463
464 return 0;
465 }
466
467 /*
468 * Destroy ceph client
469 *
470 * Caller must hold rbd_client_list_lock.
471 */
472 static void rbd_client_release(struct kref *kref)
473 {
474 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
475
476 dout("rbd_release_client %p\n", rbdc);
477 spin_lock(&rbd_client_list_lock);
478 list_del(&rbdc->node);
479 spin_unlock(&rbd_client_list_lock);
480
481 ceph_destroy_client(rbdc->client);
482 kfree(rbdc);
483 }
484
485 /*
486 * Drop reference to ceph client node. If it's not referenced anymore, release
487 * it.
488 */
489 static void rbd_put_client(struct rbd_device *rbd_dev)
490 {
491 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
492 rbd_dev->rbd_client = NULL;
493 }
494
495 /*
496 * Destroy requests collection
497 */
498 static void rbd_coll_release(struct kref *kref)
499 {
500 struct rbd_req_coll *coll =
501 container_of(kref, struct rbd_req_coll, kref);
502
503 dout("rbd_coll_release %p\n", coll);
504 kfree(coll);
505 }
506
507 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
508 {
509 size_t size;
510 u32 snap_count;
511
512 /* The header has to start with the magic rbd header text */
513 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
514 return false;
515
516 /*
517 * The size of a snapshot header has to fit in a size_t, and
518 * that limits the number of snapshots.
519 */
520 snap_count = le32_to_cpu(ondisk->snap_count);
521 size = SIZE_MAX - sizeof (struct ceph_snap_context);
522 if (snap_count > size / sizeof (__le64))
523 return false;
524
525 /*
526 * Not only that, but the size of the entire the snapshot
527 * header must also be representable in a size_t.
528 */
529 size -= snap_count * sizeof (__le64);
530 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
531 return false;
532
533 return true;
534 }
535
536 /*
537 * Create a new header structure, translate header format from the on-disk
538 * header.
539 */
540 static int rbd_header_from_disk(struct rbd_image_header *header,
541 struct rbd_image_header_ondisk *ondisk)
542 {
543 u32 snap_count;
544 size_t len;
545 size_t size;
546 u32 i;
547
548 memset(header, 0, sizeof (*header));
549
550 snap_count = le32_to_cpu(ondisk->snap_count);
551
552 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
553 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
554 if (!header->object_prefix)
555 return -ENOMEM;
556 memcpy(header->object_prefix, ondisk->object_prefix, len);
557 header->object_prefix[len] = '\0';
558
559 if (snap_count) {
560 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
561
562 /* Save a copy of the snapshot names */
563
564 if (snap_names_len > (u64) SIZE_MAX)
565 return -EIO;
566 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
567 if (!header->snap_names)
568 goto out_err;
569 /*
570 * Note that rbd_dev_v1_header_read() guarantees
571 * the ondisk buffer we're working with has
572 * snap_names_len bytes beyond the end of the
573 * snapshot id array, this memcpy() is safe.
574 */
575 memcpy(header->snap_names, &ondisk->snaps[snap_count],
576 snap_names_len);
577
578 /* Record each snapshot's size */
579
580 size = snap_count * sizeof (*header->snap_sizes);
581 header->snap_sizes = kmalloc(size, GFP_KERNEL);
582 if (!header->snap_sizes)
583 goto out_err;
584 for (i = 0; i < snap_count; i++)
585 header->snap_sizes[i] =
586 le64_to_cpu(ondisk->snaps[i].image_size);
587 } else {
588 WARN_ON(ondisk->snap_names_len);
589 header->snap_names = NULL;
590 header->snap_sizes = NULL;
591 }
592
593 header->obj_order = ondisk->options.order;
594 header->crypt_type = ondisk->options.crypt_type;
595 header->comp_type = ondisk->options.comp_type;
596
597 /* Allocate and fill in the snapshot context */
598
599 header->image_size = le64_to_cpu(ondisk->image_size);
600 size = sizeof (struct ceph_snap_context);
601 size += snap_count * sizeof (header->snapc->snaps[0]);
602 header->snapc = kzalloc(size, GFP_KERNEL);
603 if (!header->snapc)
604 goto out_err;
605
606 atomic_set(&header->snapc->nref, 1);
607 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
608 header->snapc->num_snaps = snap_count;
609 for (i = 0; i < snap_count; i++)
610 header->snapc->snaps[i] =
611 le64_to_cpu(ondisk->snaps[i].id);
612
613 return 0;
614
615 out_err:
616 kfree(header->snap_sizes);
617 header->snap_sizes = NULL;
618 kfree(header->snap_names);
619 header->snap_names = NULL;
620 kfree(header->object_prefix);
621 header->object_prefix = NULL;
622
623 return -ENOMEM;
624 }
625
626 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
627 {
628
629 struct rbd_snap *snap;
630
631 list_for_each_entry(snap, &rbd_dev->snaps, node) {
632 if (!strcmp(snap_name, snap->name)) {
633 rbd_dev->mapping.snap_id = snap->id;
634 rbd_dev->mapping.size = snap->size;
635
636 return 0;
637 }
638 }
639
640 return -ENOENT;
641 }
642
643 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
644 {
645 int ret;
646
647 if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
648 sizeof (RBD_SNAP_HEAD_NAME))) {
649 rbd_dev->mapping.snap_id = CEPH_NOSNAP;
650 rbd_dev->mapping.size = rbd_dev->header.image_size;
651 rbd_dev->mapping.snap_exists = false;
652 rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
653 ret = 0;
654 } else {
655 ret = snap_by_name(rbd_dev, snap_name);
656 if (ret < 0)
657 goto done;
658 rbd_dev->mapping.snap_exists = true;
659 rbd_dev->mapping.read_only = true;
660 }
661 rbd_dev->mapping.snap_name = snap_name;
662 done:
663 return ret;
664 }
665
666 static void rbd_header_free(struct rbd_image_header *header)
667 {
668 kfree(header->object_prefix);
669 header->object_prefix = NULL;
670 kfree(header->snap_sizes);
671 header->snap_sizes = NULL;
672 kfree(header->snap_names);
673 header->snap_names = NULL;
674 ceph_put_snap_context(header->snapc);
675 header->snapc = NULL;
676 }
677
678 static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
679 {
680 char *name;
681 u64 segment;
682 int ret;
683
684 name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
685 if (!name)
686 return NULL;
687 segment = offset >> rbd_dev->header.obj_order;
688 ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
689 rbd_dev->header.object_prefix, segment);
690 if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
691 pr_err("error formatting segment name for #%llu (%d)\n",
692 segment, ret);
693 kfree(name);
694 name = NULL;
695 }
696
697 return name;
698 }
699
700 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
701 {
702 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
703
704 return offset & (segment_size - 1);
705 }
706
707 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
708 u64 offset, u64 length)
709 {
710 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
711
712 offset &= segment_size - 1;
713
714 rbd_assert(length <= U64_MAX - offset);
715 if (offset + length > segment_size)
716 length = segment_size - offset;
717
718 return length;
719 }
720
721 static int rbd_get_num_segments(struct rbd_image_header *header,
722 u64 ofs, u64 len)
723 {
724 u64 start_seg;
725 u64 end_seg;
726
727 if (!len)
728 return 0;
729 if (len - 1 > U64_MAX - ofs)
730 return -ERANGE;
731
732 start_seg = ofs >> header->obj_order;
733 end_seg = (ofs + len - 1) >> header->obj_order;
734
735 return end_seg - start_seg + 1;
736 }
737
738 /*
739 * returns the size of an object in the image
740 */
741 static u64 rbd_obj_bytes(struct rbd_image_header *header)
742 {
743 return 1 << header->obj_order;
744 }
745
746 /*
747 * bio helpers
748 */
749
750 static void bio_chain_put(struct bio *chain)
751 {
752 struct bio *tmp;
753
754 while (chain) {
755 tmp = chain;
756 chain = chain->bi_next;
757 bio_put(tmp);
758 }
759 }
760
761 /*
762 * zeros a bio chain, starting at specific offset
763 */
764 static void zero_bio_chain(struct bio *chain, int start_ofs)
765 {
766 struct bio_vec *bv;
767 unsigned long flags;
768 void *buf;
769 int i;
770 int pos = 0;
771
772 while (chain) {
773 bio_for_each_segment(bv, chain, i) {
774 if (pos + bv->bv_len > start_ofs) {
775 int remainder = max(start_ofs - pos, 0);
776 buf = bvec_kmap_irq(bv, &flags);
777 memset(buf + remainder, 0,
778 bv->bv_len - remainder);
779 bvec_kunmap_irq(buf, &flags);
780 }
781 pos += bv->bv_len;
782 }
783
784 chain = chain->bi_next;
785 }
786 }
787
788 /*
789 * bio_chain_clone - clone a chain of bios up to a certain length.
790 * might return a bio_pair that will need to be released.
791 */
792 static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
793 struct bio_pair **bp,
794 int len, gfp_t gfpmask)
795 {
796 struct bio *old_chain = *old;
797 struct bio *new_chain = NULL;
798 struct bio *tail;
799 int total = 0;
800
801 if (*bp) {
802 bio_pair_release(*bp);
803 *bp = NULL;
804 }
805
806 while (old_chain && (total < len)) {
807 struct bio *tmp;
808
809 tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
810 if (!tmp)
811 goto err_out;
812 gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
813
814 if (total + old_chain->bi_size > len) {
815 struct bio_pair *bp;
816
817 /*
818 * this split can only happen with a single paged bio,
819 * split_bio will BUG_ON if this is not the case
820 */
821 dout("bio_chain_clone split! total=%d remaining=%d"
822 "bi_size=%u\n",
823 total, len - total, old_chain->bi_size);
824
825 /* split the bio. We'll release it either in the next
826 call, or it will have to be released outside */
827 bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
828 if (!bp)
829 goto err_out;
830
831 __bio_clone(tmp, &bp->bio1);
832
833 *next = &bp->bio2;
834 } else {
835 __bio_clone(tmp, old_chain);
836 *next = old_chain->bi_next;
837 }
838
839 tmp->bi_bdev = NULL;
840 tmp->bi_next = NULL;
841 if (new_chain)
842 tail->bi_next = tmp;
843 else
844 new_chain = tmp;
845 tail = tmp;
846 old_chain = old_chain->bi_next;
847
848 total += tmp->bi_size;
849 }
850
851 rbd_assert(total == len);
852
853 *old = old_chain;
854
855 return new_chain;
856
857 err_out:
858 dout("bio_chain_clone with err\n");
859 bio_chain_put(new_chain);
860 return NULL;
861 }
862
863 /*
864 * helpers for osd request op vectors.
865 */
866 static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
867 int opcode, u32 payload_len)
868 {
869 struct ceph_osd_req_op *ops;
870
871 ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
872 if (!ops)
873 return NULL;
874
875 ops[0].op = opcode;
876
877 /*
878 * op extent offset and length will be set later on
879 * in calc_raw_layout()
880 */
881 ops[0].payload_len = payload_len;
882
883 return ops;
884 }
885
886 static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
887 {
888 kfree(ops);
889 }
890
891 static void rbd_coll_end_req_index(struct request *rq,
892 struct rbd_req_coll *coll,
893 int index,
894 int ret, u64 len)
895 {
896 struct request_queue *q;
897 int min, max, i;
898
899 dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
900 coll, index, ret, (unsigned long long) len);
901
902 if (!rq)
903 return;
904
905 if (!coll) {
906 blk_end_request(rq, ret, len);
907 return;
908 }
909
910 q = rq->q;
911
912 spin_lock_irq(q->queue_lock);
913 coll->status[index].done = 1;
914 coll->status[index].rc = ret;
915 coll->status[index].bytes = len;
916 max = min = coll->num_done;
917 while (max < coll->total && coll->status[max].done)
918 max++;
919
920 for (i = min; i<max; i++) {
921 __blk_end_request(rq, coll->status[i].rc,
922 coll->status[i].bytes);
923 coll->num_done++;
924 kref_put(&coll->kref, rbd_coll_release);
925 }
926 spin_unlock_irq(q->queue_lock);
927 }
928
929 static void rbd_coll_end_req(struct rbd_request *req,
930 int ret, u64 len)
931 {
932 rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
933 }
934
935 /*
936 * Send ceph osd request
937 */
938 static int rbd_do_request(struct request *rq,
939 struct rbd_device *rbd_dev,
940 struct ceph_snap_context *snapc,
941 u64 snapid,
942 const char *object_name, u64 ofs, u64 len,
943 struct bio *bio,
944 struct page **pages,
945 int num_pages,
946 int flags,
947 struct ceph_osd_req_op *ops,
948 struct rbd_req_coll *coll,
949 int coll_index,
950 void (*rbd_cb)(struct ceph_osd_request *req,
951 struct ceph_msg *msg),
952 struct ceph_osd_request **linger_req,
953 u64 *ver)
954 {
955 struct ceph_osd_request *req;
956 struct ceph_file_layout *layout;
957 int ret;
958 u64 bno;
959 struct timespec mtime = CURRENT_TIME;
960 struct rbd_request *req_data;
961 struct ceph_osd_request_head *reqhead;
962 struct ceph_osd_client *osdc;
963
964 req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
965 if (!req_data) {
966 if (coll)
967 rbd_coll_end_req_index(rq, coll, coll_index,
968 -ENOMEM, len);
969 return -ENOMEM;
970 }
971
972 if (coll) {
973 req_data->coll = coll;
974 req_data->coll_index = coll_index;
975 }
976
977 dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
978 (unsigned long long) ofs, (unsigned long long) len);
979
980 osdc = &rbd_dev->rbd_client->client->osdc;
981 req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
982 false, GFP_NOIO, pages, bio);
983 if (!req) {
984 ret = -ENOMEM;
985 goto done_pages;
986 }
987
988 req->r_callback = rbd_cb;
989
990 req_data->rq = rq;
991 req_data->bio = bio;
992 req_data->pages = pages;
993 req_data->len = len;
994
995 req->r_priv = req_data;
996
997 reqhead = req->r_request->front.iov_base;
998 reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
999
1000 strncpy(req->r_oid, object_name, sizeof(req->r_oid));
1001 req->r_oid_len = strlen(req->r_oid);
1002
1003 layout = &req->r_file_layout;
1004 memset(layout, 0, sizeof(*layout));
1005 layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
1006 layout->fl_stripe_count = cpu_to_le32(1);
1007 layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
1008 layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
1009 ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
1010 req, ops);
1011
1012 ceph_osdc_build_request(req, ofs, &len,
1013 ops,
1014 snapc,
1015 &mtime,
1016 req->r_oid, req->r_oid_len);
1017
1018 if (linger_req) {
1019 ceph_osdc_set_request_linger(osdc, req);
1020 *linger_req = req;
1021 }
1022
1023 ret = ceph_osdc_start_request(osdc, req, false);
1024 if (ret < 0)
1025 goto done_err;
1026
1027 if (!rbd_cb) {
1028 ret = ceph_osdc_wait_request(osdc, req);
1029 if (ver)
1030 *ver = le64_to_cpu(req->r_reassert_version.version);
1031 dout("reassert_ver=%llu\n",
1032 (unsigned long long)
1033 le64_to_cpu(req->r_reassert_version.version));
1034 ceph_osdc_put_request(req);
1035 }
1036 return ret;
1037
1038 done_err:
1039 bio_chain_put(req_data->bio);
1040 ceph_osdc_put_request(req);
1041 done_pages:
1042 rbd_coll_end_req(req_data, ret, len);
1043 kfree(req_data);
1044 return ret;
1045 }
1046
1047 /*
1048 * Ceph osd op callback
1049 */
1050 static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
1051 {
1052 struct rbd_request *req_data = req->r_priv;
1053 struct ceph_osd_reply_head *replyhead;
1054 struct ceph_osd_op *op;
1055 __s32 rc;
1056 u64 bytes;
1057 int read_op;
1058
1059 /* parse reply */
1060 replyhead = msg->front.iov_base;
1061 WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
1062 op = (void *)(replyhead + 1);
1063 rc = le32_to_cpu(replyhead->result);
1064 bytes = le64_to_cpu(op->extent.length);
1065 read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
1066
1067 dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
1068 (unsigned long long) bytes, read_op, (int) rc);
1069
1070 if (rc == -ENOENT && read_op) {
1071 zero_bio_chain(req_data->bio, 0);
1072 rc = 0;
1073 } else if (rc == 0 && read_op && bytes < req_data->len) {
1074 zero_bio_chain(req_data->bio, bytes);
1075 bytes = req_data->len;
1076 }
1077
1078 rbd_coll_end_req(req_data, rc, bytes);
1079
1080 if (req_data->bio)
1081 bio_chain_put(req_data->bio);
1082
1083 ceph_osdc_put_request(req);
1084 kfree(req_data);
1085 }
1086
1087 static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
1088 {
1089 ceph_osdc_put_request(req);
1090 }
1091
1092 /*
1093 * Do a synchronous ceph osd operation
1094 */
1095 static int rbd_req_sync_op(struct rbd_device *rbd_dev,
1096 struct ceph_snap_context *snapc,
1097 u64 snapid,
1098 int flags,
1099 struct ceph_osd_req_op *ops,
1100 const char *object_name,
1101 u64 ofs, u64 inbound_size,
1102 char *inbound,
1103 struct ceph_osd_request **linger_req,
1104 u64 *ver)
1105 {
1106 int ret;
1107 struct page **pages;
1108 int num_pages;
1109
1110 rbd_assert(ops != NULL);
1111
1112 num_pages = calc_pages_for(ofs, inbound_size);
1113 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1114 if (IS_ERR(pages))
1115 return PTR_ERR(pages);
1116
1117 ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
1118 object_name, ofs, inbound_size, NULL,
1119 pages, num_pages,
1120 flags,
1121 ops,
1122 NULL, 0,
1123 NULL,
1124 linger_req, ver);
1125 if (ret < 0)
1126 goto done;
1127
1128 if ((flags & CEPH_OSD_FLAG_READ) && inbound)
1129 ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
1130
1131 done:
1132 ceph_release_page_vector(pages, num_pages);
1133 return ret;
1134 }
1135
1136 /*
1137 * Do an asynchronous ceph osd operation
1138 */
1139 static int rbd_do_op(struct request *rq,
1140 struct rbd_device *rbd_dev,
1141 struct ceph_snap_context *snapc,
1142 u64 snapid,
1143 int opcode, int flags,
1144 u64 ofs, u64 len,
1145 struct bio *bio,
1146 struct rbd_req_coll *coll,
1147 int coll_index)
1148 {
1149 char *seg_name;
1150 u64 seg_ofs;
1151 u64 seg_len;
1152 int ret;
1153 struct ceph_osd_req_op *ops;
1154 u32 payload_len;
1155
1156 seg_name = rbd_segment_name(rbd_dev, ofs);
1157 if (!seg_name)
1158 return -ENOMEM;
1159 seg_len = rbd_segment_length(rbd_dev, ofs, len);
1160 seg_ofs = rbd_segment_offset(rbd_dev, ofs);
1161
1162 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
1163
1164 ret = -ENOMEM;
1165 ops = rbd_create_rw_ops(1, opcode, payload_len);
1166 if (!ops)
1167 goto done;
1168
1169 /* we've taken care of segment sizes earlier when we
1170 cloned the bios. We should never have a segment
1171 truncated at this point */
1172 rbd_assert(seg_len == len);
1173
1174 ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
1175 seg_name, seg_ofs, seg_len,
1176 bio,
1177 NULL, 0,
1178 flags,
1179 ops,
1180 coll, coll_index,
1181 rbd_req_cb, 0, NULL);
1182
1183 rbd_destroy_ops(ops);
1184 done:
1185 kfree(seg_name);
1186 return ret;
1187 }
1188
1189 /*
1190 * Request async osd write
1191 */
1192 static int rbd_req_write(struct request *rq,
1193 struct rbd_device *rbd_dev,
1194 struct ceph_snap_context *snapc,
1195 u64 ofs, u64 len,
1196 struct bio *bio,
1197 struct rbd_req_coll *coll,
1198 int coll_index)
1199 {
1200 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
1201 CEPH_OSD_OP_WRITE,
1202 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1203 ofs, len, bio, coll, coll_index);
1204 }
1205
1206 /*
1207 * Request async osd read
1208 */
1209 static int rbd_req_read(struct request *rq,
1210 struct rbd_device *rbd_dev,
1211 u64 snapid,
1212 u64 ofs, u64 len,
1213 struct bio *bio,
1214 struct rbd_req_coll *coll,
1215 int coll_index)
1216 {
1217 return rbd_do_op(rq, rbd_dev, NULL,
1218 snapid,
1219 CEPH_OSD_OP_READ,
1220 CEPH_OSD_FLAG_READ,
1221 ofs, len, bio, coll, coll_index);
1222 }
1223
1224 /*
1225 * Request sync osd read
1226 */
1227 static int rbd_req_sync_read(struct rbd_device *rbd_dev,
1228 u64 snapid,
1229 const char *object_name,
1230 u64 ofs, u64 len,
1231 char *buf,
1232 u64 *ver)
1233 {
1234 struct ceph_osd_req_op *ops;
1235 int ret;
1236
1237 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
1238 if (!ops)
1239 return -ENOMEM;
1240
1241 ret = rbd_req_sync_op(rbd_dev, NULL,
1242 snapid,
1243 CEPH_OSD_FLAG_READ,
1244 ops, object_name, ofs, len, buf, NULL, ver);
1245 rbd_destroy_ops(ops);
1246
1247 return ret;
1248 }
1249
1250 /*
1251 * Request sync osd watch
1252 */
1253 static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
1254 u64 ver,
1255 u64 notify_id)
1256 {
1257 struct ceph_osd_req_op *ops;
1258 int ret;
1259
1260 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1261 if (!ops)
1262 return -ENOMEM;
1263
1264 ops[0].watch.ver = cpu_to_le64(ver);
1265 ops[0].watch.cookie = notify_id;
1266 ops[0].watch.flag = 0;
1267
1268 ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
1269 rbd_dev->header_name, 0, 0, NULL,
1270 NULL, 0,
1271 CEPH_OSD_FLAG_READ,
1272 ops,
1273 NULL, 0,
1274 rbd_simple_req_cb, 0, NULL);
1275
1276 rbd_destroy_ops(ops);
1277 return ret;
1278 }
1279
1280 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1281 {
1282 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1283 u64 hver;
1284 int rc;
1285
1286 if (!rbd_dev)
1287 return;
1288
1289 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1290 rbd_dev->header_name, (unsigned long long) notify_id,
1291 (unsigned int) opcode);
1292 rc = rbd_refresh_header(rbd_dev, &hver);
1293 if (rc)
1294 pr_warning(RBD_DRV_NAME "%d got notification but failed to "
1295 " update snaps: %d\n", rbd_dev->major, rc);
1296
1297 rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
1298 }
1299
1300 /*
1301 * Request sync osd watch
1302 */
1303 static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
1304 {
1305 struct ceph_osd_req_op *ops;
1306 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1307 int ret;
1308
1309 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1310 if (!ops)
1311 return -ENOMEM;
1312
1313 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
1314 (void *)rbd_dev, &rbd_dev->watch_event);
1315 if (ret < 0)
1316 goto fail;
1317
1318 ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
1319 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
1320 ops[0].watch.flag = 1;
1321
1322 ret = rbd_req_sync_op(rbd_dev, NULL,
1323 CEPH_NOSNAP,
1324 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1325 ops,
1326 rbd_dev->header_name,
1327 0, 0, NULL,
1328 &rbd_dev->watch_request, NULL);
1329
1330 if (ret < 0)
1331 goto fail_event;
1332
1333 rbd_destroy_ops(ops);
1334 return 0;
1335
1336 fail_event:
1337 ceph_osdc_cancel_event(rbd_dev->watch_event);
1338 rbd_dev->watch_event = NULL;
1339 fail:
1340 rbd_destroy_ops(ops);
1341 return ret;
1342 }
1343
1344 /*
1345 * Request sync osd unwatch
1346 */
1347 static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
1348 {
1349 struct ceph_osd_req_op *ops;
1350 int ret;
1351
1352 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1353 if (!ops)
1354 return -ENOMEM;
1355
1356 ops[0].watch.ver = 0;
1357 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
1358 ops[0].watch.flag = 0;
1359
1360 ret = rbd_req_sync_op(rbd_dev, NULL,
1361 CEPH_NOSNAP,
1362 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1363 ops,
1364 rbd_dev->header_name,
1365 0, 0, NULL, NULL, NULL);
1366
1367
1368 rbd_destroy_ops(ops);
1369 ceph_osdc_cancel_event(rbd_dev->watch_event);
1370 rbd_dev->watch_event = NULL;
1371 return ret;
1372 }
1373
1374 /*
1375 * Synchronous osd object method call
1376 */
1377 static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
1378 const char *object_name,
1379 const char *class_name,
1380 const char *method_name,
1381 const char *outbound,
1382 size_t outbound_size,
1383 char *inbound,
1384 size_t inbound_size,
1385 int flags,
1386 u64 *ver)
1387 {
1388 struct ceph_osd_req_op *ops;
1389 int class_name_len = strlen(class_name);
1390 int method_name_len = strlen(method_name);
1391 int payload_size;
1392 int ret;
1393
1394 /*
1395 * Any input parameters required by the method we're calling
1396 * will be sent along with the class and method names as
1397 * part of the message payload. That data and its size are
1398 * supplied via the indata and indata_len fields (named from
1399 * the perspective of the server side) in the OSD request
1400 * operation.
1401 */
1402 payload_size = class_name_len + method_name_len + outbound_size;
1403 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
1404 if (!ops)
1405 return -ENOMEM;
1406
1407 ops[0].cls.class_name = class_name;
1408 ops[0].cls.class_len = (__u8) class_name_len;
1409 ops[0].cls.method_name = method_name;
1410 ops[0].cls.method_len = (__u8) method_name_len;
1411 ops[0].cls.argc = 0;
1412 ops[0].cls.indata = outbound;
1413 ops[0].cls.indata_len = outbound_size;
1414
1415 ret = rbd_req_sync_op(rbd_dev, NULL,
1416 CEPH_NOSNAP,
1417 flags, ops,
1418 object_name, 0, inbound_size, inbound,
1419 NULL, ver);
1420
1421 rbd_destroy_ops(ops);
1422
1423 dout("cls_exec returned %d\n", ret);
1424 return ret;
1425 }
1426
1427 static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
1428 {
1429 struct rbd_req_coll *coll =
1430 kzalloc(sizeof(struct rbd_req_coll) +
1431 sizeof(struct rbd_req_status) * num_reqs,
1432 GFP_ATOMIC);
1433
1434 if (!coll)
1435 return NULL;
1436 coll->total = num_reqs;
1437 kref_init(&coll->kref);
1438 return coll;
1439 }
1440
1441 /*
1442 * block device queue callback
1443 */
1444 static void rbd_rq_fn(struct request_queue *q)
1445 {
1446 struct rbd_device *rbd_dev = q->queuedata;
1447 struct request *rq;
1448 struct bio_pair *bp = NULL;
1449
1450 while ((rq = blk_fetch_request(q))) {
1451 struct bio *bio;
1452 struct bio *rq_bio, *next_bio = NULL;
1453 bool do_write;
1454 unsigned int size;
1455 u64 op_size = 0;
1456 u64 ofs;
1457 int num_segs, cur_seg = 0;
1458 struct rbd_req_coll *coll;
1459 struct ceph_snap_context *snapc;
1460
1461 dout("fetched request\n");
1462
1463 /* filter out block requests we don't understand */
1464 if ((rq->cmd_type != REQ_TYPE_FS)) {
1465 __blk_end_request_all(rq, 0);
1466 continue;
1467 }
1468
1469 /* deduce our operation (read, write) */
1470 do_write = (rq_data_dir(rq) == WRITE);
1471
1472 size = blk_rq_bytes(rq);
1473 ofs = blk_rq_pos(rq) * SECTOR_SIZE;
1474 rq_bio = rq->bio;
1475 if (do_write && rbd_dev->mapping.read_only) {
1476 __blk_end_request_all(rq, -EROFS);
1477 continue;
1478 }
1479
1480 spin_unlock_irq(q->queue_lock);
1481
1482 down_read(&rbd_dev->header_rwsem);
1483
1484 if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
1485 !rbd_dev->mapping.snap_exists) {
1486 up_read(&rbd_dev->header_rwsem);
1487 dout("request for non-existent snapshot");
1488 spin_lock_irq(q->queue_lock);
1489 __blk_end_request_all(rq, -ENXIO);
1490 continue;
1491 }
1492
1493 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1494
1495 up_read(&rbd_dev->header_rwsem);
1496
1497 dout("%s 0x%x bytes at 0x%llx\n",
1498 do_write ? "write" : "read",
1499 size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
1500
1501 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
1502 if (num_segs <= 0) {
1503 spin_lock_irq(q->queue_lock);
1504 __blk_end_request_all(rq, num_segs);
1505 ceph_put_snap_context(snapc);
1506 continue;
1507 }
1508 coll = rbd_alloc_coll(num_segs);
1509 if (!coll) {
1510 spin_lock_irq(q->queue_lock);
1511 __blk_end_request_all(rq, -ENOMEM);
1512 ceph_put_snap_context(snapc);
1513 continue;
1514 }
1515
1516 do {
1517 /* a bio clone to be passed down to OSD req */
1518 dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
1519 op_size = rbd_segment_length(rbd_dev, ofs, size);
1520 kref_get(&coll->kref);
1521 bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
1522 op_size, GFP_ATOMIC);
1523 if (!bio) {
1524 rbd_coll_end_req_index(rq, coll, cur_seg,
1525 -ENOMEM, op_size);
1526 goto next_seg;
1527 }
1528
1529
1530 /* init OSD command: write or read */
1531 if (do_write)
1532 rbd_req_write(rq, rbd_dev,
1533 snapc,
1534 ofs,
1535 op_size, bio,
1536 coll, cur_seg);
1537 else
1538 rbd_req_read(rq, rbd_dev,
1539 rbd_dev->mapping.snap_id,
1540 ofs,
1541 op_size, bio,
1542 coll, cur_seg);
1543
1544 next_seg:
1545 size -= op_size;
1546 ofs += op_size;
1547
1548 cur_seg++;
1549 rq_bio = next_bio;
1550 } while (size > 0);
1551 kref_put(&coll->kref, rbd_coll_release);
1552
1553 if (bp)
1554 bio_pair_release(bp);
1555 spin_lock_irq(q->queue_lock);
1556
1557 ceph_put_snap_context(snapc);
1558 }
1559 }
1560
1561 /*
1562 * a queue callback. Makes sure that we don't create a bio that spans across
1563 * multiple osd objects. One exception would be with a single page bios,
1564 * which we handle later at bio_chain_clone
1565 */
1566 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1567 struct bio_vec *bvec)
1568 {
1569 struct rbd_device *rbd_dev = q->queuedata;
1570 unsigned int chunk_sectors;
1571 sector_t sector;
1572 unsigned int bio_sectors;
1573 int max;
1574
1575 chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
1576 sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
1577 bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
1578
1579 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
1580 + bio_sectors)) << SECTOR_SHIFT;
1581 if (max < 0)
1582 max = 0; /* bio_add cannot handle a negative return */
1583 if (max <= bvec->bv_len && bio_sectors == 0)
1584 return bvec->bv_len;
1585 return max;
1586 }
1587
1588 static void rbd_free_disk(struct rbd_device *rbd_dev)
1589 {
1590 struct gendisk *disk = rbd_dev->disk;
1591
1592 if (!disk)
1593 return;
1594
1595 if (disk->flags & GENHD_FL_UP)
1596 del_gendisk(disk);
1597 if (disk->queue)
1598 blk_cleanup_queue(disk->queue);
1599 put_disk(disk);
1600 }
1601
1602 /*
1603 * Read the complete header for the given rbd device.
1604 *
1605 * Returns a pointer to a dynamically-allocated buffer containing
1606 * the complete and validated header. Caller can pass the address
1607 * of a variable that will be filled in with the version of the
1608 * header object at the time it was read.
1609 *
1610 * Returns a pointer-coded errno if a failure occurs.
1611 */
1612 static struct rbd_image_header_ondisk *
1613 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
1614 {
1615 struct rbd_image_header_ondisk *ondisk = NULL;
1616 u32 snap_count = 0;
1617 u64 names_size = 0;
1618 u32 want_count;
1619 int ret;
1620
1621 /*
1622 * The complete header will include an array of its 64-bit
1623 * snapshot ids, followed by the names of those snapshots as
1624 * a contiguous block of NUL-terminated strings. Note that
1625 * the number of snapshots could change by the time we read
1626 * it in, in which case we re-read it.
1627 */
1628 do {
1629 size_t size;
1630
1631 kfree(ondisk);
1632
1633 size = sizeof (*ondisk);
1634 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
1635 size += names_size;
1636 ondisk = kmalloc(size, GFP_KERNEL);
1637 if (!ondisk)
1638 return ERR_PTR(-ENOMEM);
1639
1640 ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
1641 rbd_dev->header_name,
1642 0, size,
1643 (char *) ondisk, version);
1644
1645 if (ret < 0)
1646 goto out_err;
1647 if (WARN_ON((size_t) ret < size)) {
1648 ret = -ENXIO;
1649 pr_warning("short header read for image %s"
1650 " (want %zd got %d)\n",
1651 rbd_dev->image_name, size, ret);
1652 goto out_err;
1653 }
1654 if (!rbd_dev_ondisk_valid(ondisk)) {
1655 ret = -ENXIO;
1656 pr_warning("invalid header for image %s\n",
1657 rbd_dev->image_name);
1658 goto out_err;
1659 }
1660
1661 names_size = le64_to_cpu(ondisk->snap_names_len);
1662 want_count = snap_count;
1663 snap_count = le32_to_cpu(ondisk->snap_count);
1664 } while (snap_count != want_count);
1665
1666 return ondisk;
1667
1668 out_err:
1669 kfree(ondisk);
1670
1671 return ERR_PTR(ret);
1672 }
1673
1674 /*
1675 * reload the ondisk the header
1676 */
1677 static int rbd_read_header(struct rbd_device *rbd_dev,
1678 struct rbd_image_header *header)
1679 {
1680 struct rbd_image_header_ondisk *ondisk;
1681 u64 ver = 0;
1682 int ret;
1683
1684 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
1685 if (IS_ERR(ondisk))
1686 return PTR_ERR(ondisk);
1687 ret = rbd_header_from_disk(header, ondisk);
1688 if (ret >= 0)
1689 header->obj_version = ver;
1690 kfree(ondisk);
1691
1692 return ret;
1693 }
1694
1695 static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
1696 {
1697 struct rbd_snap *snap;
1698 struct rbd_snap *next;
1699
1700 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
1701 __rbd_remove_snap_dev(snap);
1702 }
1703
1704 /*
1705 * only read the first part of the ondisk header, without the snaps info
1706 */
1707 static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
1708 {
1709 int ret;
1710 struct rbd_image_header h;
1711
1712 ret = rbd_read_header(rbd_dev, &h);
1713 if (ret < 0)
1714 return ret;
1715
1716 down_write(&rbd_dev->header_rwsem);
1717
1718 /* resized? */
1719 if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
1720 sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
1721
1722 if (size != (sector_t) rbd_dev->mapping.size) {
1723 dout("setting size to %llu sectors",
1724 (unsigned long long) size);
1725 rbd_dev->mapping.size = (u64) size;
1726 set_capacity(rbd_dev->disk, size);
1727 }
1728 }
1729
1730 /* rbd_dev->header.object_prefix shouldn't change */
1731 kfree(rbd_dev->header.snap_sizes);
1732 kfree(rbd_dev->header.snap_names);
1733 /* osd requests may still refer to snapc */
1734 ceph_put_snap_context(rbd_dev->header.snapc);
1735
1736 if (hver)
1737 *hver = h.obj_version;
1738 rbd_dev->header.obj_version = h.obj_version;
1739 rbd_dev->header.image_size = h.image_size;
1740 rbd_dev->header.snapc = h.snapc;
1741 rbd_dev->header.snap_names = h.snap_names;
1742 rbd_dev->header.snap_sizes = h.snap_sizes;
1743 /* Free the extra copy of the object prefix */
1744 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
1745 kfree(h.object_prefix);
1746
1747 ret = rbd_dev_snaps_update(rbd_dev);
1748 if (!ret)
1749 ret = rbd_dev_snaps_register(rbd_dev);
1750
1751 up_write(&rbd_dev->header_rwsem);
1752
1753 return ret;
1754 }
1755
1756 static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
1757 {
1758 int ret;
1759
1760 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1761 ret = __rbd_refresh_header(rbd_dev, hver);
1762 mutex_unlock(&ctl_mutex);
1763
1764 return ret;
1765 }
1766
1767 static int rbd_init_disk(struct rbd_device *rbd_dev)
1768 {
1769 struct gendisk *disk;
1770 struct request_queue *q;
1771 u64 segment_size;
1772
1773 /* create gendisk info */
1774 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
1775 if (!disk)
1776 return -ENOMEM;
1777
1778 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
1779 rbd_dev->dev_id);
1780 disk->major = rbd_dev->major;
1781 disk->first_minor = 0;
1782 disk->fops = &rbd_bd_ops;
1783 disk->private_data = rbd_dev;
1784
1785 /* init rq */
1786 q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
1787 if (!q)
1788 goto out_disk;
1789
1790 /* We use the default size, but let's be explicit about it. */
1791 blk_queue_physical_block_size(q, SECTOR_SIZE);
1792
1793 /* set io sizes to object size */
1794 segment_size = rbd_obj_bytes(&rbd_dev->header);
1795 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
1796 blk_queue_max_segment_size(q, segment_size);
1797 blk_queue_io_min(q, segment_size);
1798 blk_queue_io_opt(q, segment_size);
1799
1800 blk_queue_merge_bvec(q, rbd_merge_bvec);
1801 disk->queue = q;
1802
1803 q->queuedata = rbd_dev;
1804
1805 rbd_dev->disk = disk;
1806
1807 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
1808
1809 return 0;
1810 out_disk:
1811 put_disk(disk);
1812
1813 return -ENOMEM;
1814 }
1815
1816 /*
1817 sysfs
1818 */
1819
1820 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
1821 {
1822 return container_of(dev, struct rbd_device, dev);
1823 }
1824
1825 static ssize_t rbd_size_show(struct device *dev,
1826 struct device_attribute *attr, char *buf)
1827 {
1828 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1829 sector_t size;
1830
1831 down_read(&rbd_dev->header_rwsem);
1832 size = get_capacity(rbd_dev->disk);
1833 up_read(&rbd_dev->header_rwsem);
1834
1835 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
1836 }
1837
1838 static ssize_t rbd_major_show(struct device *dev,
1839 struct device_attribute *attr, char *buf)
1840 {
1841 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1842
1843 return sprintf(buf, "%d\n", rbd_dev->major);
1844 }
1845
1846 static ssize_t rbd_client_id_show(struct device *dev,
1847 struct device_attribute *attr, char *buf)
1848 {
1849 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1850
1851 return sprintf(buf, "client%lld\n",
1852 ceph_client_id(rbd_dev->rbd_client->client));
1853 }
1854
1855 static ssize_t rbd_pool_show(struct device *dev,
1856 struct device_attribute *attr, char *buf)
1857 {
1858 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1859
1860 return sprintf(buf, "%s\n", rbd_dev->pool_name);
1861 }
1862
1863 static ssize_t rbd_pool_id_show(struct device *dev,
1864 struct device_attribute *attr, char *buf)
1865 {
1866 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1867
1868 return sprintf(buf, "%d\n", rbd_dev->pool_id);
1869 }
1870
1871 static ssize_t rbd_name_show(struct device *dev,
1872 struct device_attribute *attr, char *buf)
1873 {
1874 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1875
1876 return sprintf(buf, "%s\n", rbd_dev->image_name);
1877 }
1878
1879 static ssize_t rbd_image_id_show(struct device *dev,
1880 struct device_attribute *attr, char *buf)
1881 {
1882 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1883
1884 return sprintf(buf, "%s\n", rbd_dev->image_id);
1885 }
1886
1887 static ssize_t rbd_snap_show(struct device *dev,
1888 struct device_attribute *attr,
1889 char *buf)
1890 {
1891 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1892
1893 return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
1894 }
1895
1896 static ssize_t rbd_image_refresh(struct device *dev,
1897 struct device_attribute *attr,
1898 const char *buf,
1899 size_t size)
1900 {
1901 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1902 int ret;
1903
1904 ret = rbd_refresh_header(rbd_dev, NULL);
1905
1906 return ret < 0 ? ret : size;
1907 }
1908
1909 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
1910 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
1911 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
1912 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
1913 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
1914 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
1915 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
1916 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1917 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
1918
1919 static struct attribute *rbd_attrs[] = {
1920 &dev_attr_size.attr,
1921 &dev_attr_major.attr,
1922 &dev_attr_client_id.attr,
1923 &dev_attr_pool.attr,
1924 &dev_attr_pool_id.attr,
1925 &dev_attr_name.attr,
1926 &dev_attr_image_id.attr,
1927 &dev_attr_current_snap.attr,
1928 &dev_attr_refresh.attr,
1929 NULL
1930 };
1931
1932 static struct attribute_group rbd_attr_group = {
1933 .attrs = rbd_attrs,
1934 };
1935
1936 static const struct attribute_group *rbd_attr_groups[] = {
1937 &rbd_attr_group,
1938 NULL
1939 };
1940
1941 static void rbd_sysfs_dev_release(struct device *dev)
1942 {
1943 }
1944
1945 static struct device_type rbd_device_type = {
1946 .name = "rbd",
1947 .groups = rbd_attr_groups,
1948 .release = rbd_sysfs_dev_release,
1949 };
1950
1951
1952 /*
1953 sysfs - snapshots
1954 */
1955
1956 static ssize_t rbd_snap_size_show(struct device *dev,
1957 struct device_attribute *attr,
1958 char *buf)
1959 {
1960 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1961
1962 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
1963 }
1964
1965 static ssize_t rbd_snap_id_show(struct device *dev,
1966 struct device_attribute *attr,
1967 char *buf)
1968 {
1969 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1970
1971 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
1972 }
1973
1974 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
1975 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
1976
1977 static struct attribute *rbd_snap_attrs[] = {
1978 &dev_attr_snap_size.attr,
1979 &dev_attr_snap_id.attr,
1980 NULL,
1981 };
1982
1983 static struct attribute_group rbd_snap_attr_group = {
1984 .attrs = rbd_snap_attrs,
1985 };
1986
1987 static void rbd_snap_dev_release(struct device *dev)
1988 {
1989 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1990 kfree(snap->name);
1991 kfree(snap);
1992 }
1993
1994 static const struct attribute_group *rbd_snap_attr_groups[] = {
1995 &rbd_snap_attr_group,
1996 NULL
1997 };
1998
1999 static struct device_type rbd_snap_device_type = {
2000 .groups = rbd_snap_attr_groups,
2001 .release = rbd_snap_dev_release,
2002 };
2003
2004 static bool rbd_snap_registered(struct rbd_snap *snap)
2005 {
2006 bool ret = snap->dev.type == &rbd_snap_device_type;
2007 bool reg = device_is_registered(&snap->dev);
2008
2009 rbd_assert(!ret ^ reg);
2010
2011 return ret;
2012 }
2013
2014 static void __rbd_remove_snap_dev(struct rbd_snap *snap)
2015 {
2016 list_del(&snap->node);
2017 if (device_is_registered(&snap->dev))
2018 device_unregister(&snap->dev);
2019 }
2020
2021 static int rbd_register_snap_dev(struct rbd_snap *snap,
2022 struct device *parent)
2023 {
2024 struct device *dev = &snap->dev;
2025 int ret;
2026
2027 dev->type = &rbd_snap_device_type;
2028 dev->parent = parent;
2029 dev->release = rbd_snap_dev_release;
2030 dev_set_name(dev, "snap_%s", snap->name);
2031 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2032
2033 ret = device_register(dev);
2034
2035 return ret;
2036 }
2037
2038 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2039 int i, const char *name)
2040 {
2041 struct rbd_snap *snap;
2042 int ret;
2043
2044 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2045 if (!snap)
2046 return ERR_PTR(-ENOMEM);
2047
2048 ret = -ENOMEM;
2049 snap->name = kstrdup(name, GFP_KERNEL);
2050 if (!snap->name)
2051 goto err;
2052
2053 snap->size = rbd_dev->header.snap_sizes[i];
2054 snap->id = rbd_dev->header.snapc->snaps[i];
2055
2056 return snap;
2057
2058 err:
2059 kfree(snap->name);
2060 kfree(snap);
2061
2062 return ERR_PTR(ret);
2063 }
2064
2065 /*
2066 * Scan the rbd device's current snapshot list and compare it to the
2067 * newly-received snapshot context. Remove any existing snapshots
2068 * not present in the new snapshot context. Add a new snapshot for
2069 * any snaphots in the snapshot context not in the current list.
2070 * And verify there are no changes to snapshots we already know
2071 * about.
2072 *
2073 * Assumes the snapshots in the snapshot context are sorted by
2074 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
2075 * are also maintained in that order.)
2076 */
2077 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
2078 {
2079 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
2080 const u32 snap_count = snapc->num_snaps;
2081 char *snap_name = rbd_dev->header.snap_names;
2082 struct list_head *head = &rbd_dev->snaps;
2083 struct list_head *links = head->next;
2084 u32 index = 0;
2085
2086 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
2087 while (index < snap_count || links != head) {
2088 u64 snap_id;
2089 struct rbd_snap *snap;
2090
2091 snap_id = index < snap_count ? snapc->snaps[index]
2092 : CEPH_NOSNAP;
2093 snap = links != head ? list_entry(links, struct rbd_snap, node)
2094 : NULL;
2095 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
2096
2097 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
2098 struct list_head *next = links->next;
2099
2100 /* Existing snapshot not in the new snap context */
2101
2102 if (rbd_dev->mapping.snap_id == snap->id)
2103 rbd_dev->mapping.snap_exists = false;
2104 __rbd_remove_snap_dev(snap);
2105 dout("%ssnap id %llu has been removed\n",
2106 rbd_dev->mapping.snap_id == snap->id ?
2107 "mapped " : "",
2108 (unsigned long long) snap->id);
2109
2110 /* Done with this list entry; advance */
2111
2112 links = next;
2113 continue;
2114 }
2115
2116 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
2117 (unsigned long long) snap_id);
2118 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
2119 struct rbd_snap *new_snap;
2120
2121 /* We haven't seen this snapshot before */
2122
2123 new_snap = __rbd_add_snap_dev(rbd_dev, index,
2124 snap_name);
2125 if (IS_ERR(new_snap)) {
2126 int err = PTR_ERR(new_snap);
2127
2128 dout(" failed to add dev, error %d\n", err);
2129
2130 return err;
2131 }
2132
2133 /* New goes before existing, or at end of list */
2134
2135 dout(" added dev%s\n", snap ? "" : " at end\n");
2136 if (snap)
2137 list_add_tail(&new_snap->node, &snap->node);
2138 else
2139 list_add_tail(&new_snap->node, head);
2140 } else {
2141 /* Already have this one */
2142
2143 dout(" already present\n");
2144
2145 rbd_assert(snap->size ==
2146 rbd_dev->header.snap_sizes[index]);
2147 rbd_assert(!strcmp(snap->name, snap_name));
2148
2149 /* Done with this list entry; advance */
2150
2151 links = links->next;
2152 }
2153
2154 /* Advance to the next entry in the snapshot context */
2155
2156 index++;
2157 snap_name += strlen(snap_name) + 1;
2158 }
2159 dout("%s: done\n", __func__);
2160
2161 return 0;
2162 }
2163
2164 /*
2165 * Scan the list of snapshots and register the devices for any that
2166 * have not already been registered.
2167 */
2168 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
2169 {
2170 struct rbd_snap *snap;
2171 int ret = 0;
2172
2173 dout("%s called\n", __func__);
2174 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
2175 return -EIO;
2176
2177 list_for_each_entry(snap, &rbd_dev->snaps, node) {
2178 if (!rbd_snap_registered(snap)) {
2179 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
2180 if (ret < 0)
2181 break;
2182 }
2183 }
2184 dout("%s: returning %d\n", __func__, ret);
2185
2186 return ret;
2187 }
2188
2189 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
2190 {
2191 struct device *dev;
2192 int ret;
2193
2194 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2195
2196 dev = &rbd_dev->dev;
2197 dev->bus = &rbd_bus_type;
2198 dev->type = &rbd_device_type;
2199 dev->parent = &rbd_root_dev;
2200 dev->release = rbd_dev_release;
2201 dev_set_name(dev, "%d", rbd_dev->dev_id);
2202 ret = device_register(dev);
2203
2204 mutex_unlock(&ctl_mutex);
2205
2206 return ret;
2207 }
2208
2209 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
2210 {
2211 device_unregister(&rbd_dev->dev);
2212 }
2213
2214 static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
2215 {
2216 int ret, rc;
2217
2218 do {
2219 ret = rbd_req_sync_watch(rbd_dev);
2220 if (ret == -ERANGE) {
2221 rc = rbd_refresh_header(rbd_dev, NULL);
2222 if (rc < 0)
2223 return rc;
2224 }
2225 } while (ret == -ERANGE);
2226
2227 return ret;
2228 }
2229
2230 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
2231
2232 /*
2233 * Get a unique rbd identifier for the given new rbd_dev, and add
2234 * the rbd_dev to the global list. The minimum rbd id is 1.
2235 */
2236 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
2237 {
2238 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
2239
2240 spin_lock(&rbd_dev_list_lock);
2241 list_add_tail(&rbd_dev->node, &rbd_dev_list);
2242 spin_unlock(&rbd_dev_list_lock);
2243 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
2244 (unsigned long long) rbd_dev->dev_id);
2245 }
2246
2247 /*
2248 * Remove an rbd_dev from the global list, and record that its
2249 * identifier is no longer in use.
2250 */
2251 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
2252 {
2253 struct list_head *tmp;
2254 int rbd_id = rbd_dev->dev_id;
2255 int max_id;
2256
2257 rbd_assert(rbd_id > 0);
2258
2259 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
2260 (unsigned long long) rbd_dev->dev_id);
2261 spin_lock(&rbd_dev_list_lock);
2262 list_del_init(&rbd_dev->node);
2263
2264 /*
2265 * If the id being "put" is not the current maximum, there
2266 * is nothing special we need to do.
2267 */
2268 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
2269 spin_unlock(&rbd_dev_list_lock);
2270 return;
2271 }
2272
2273 /*
2274 * We need to update the current maximum id. Search the
2275 * list to find out what it is. We're more likely to find
2276 * the maximum at the end, so search the list backward.
2277 */
2278 max_id = 0;
2279 list_for_each_prev(tmp, &rbd_dev_list) {
2280 struct rbd_device *rbd_dev;
2281
2282 rbd_dev = list_entry(tmp, struct rbd_device, node);
2283 if (rbd_id > max_id)
2284 max_id = rbd_id;
2285 }
2286 spin_unlock(&rbd_dev_list_lock);
2287
2288 /*
2289 * The max id could have been updated by rbd_dev_id_get(), in
2290 * which case it now accurately reflects the new maximum.
2291 * Be careful not to overwrite the maximum value in that
2292 * case.
2293 */
2294 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
2295 dout(" max dev id has been reset\n");
2296 }
2297
2298 /*
2299 * Skips over white space at *buf, and updates *buf to point to the
2300 * first found non-space character (if any). Returns the length of
2301 * the token (string of non-white space characters) found. Note
2302 * that *buf must be terminated with '\0'.
2303 */
2304 static inline size_t next_token(const char **buf)
2305 {
2306 /*
2307 * These are the characters that produce nonzero for
2308 * isspace() in the "C" and "POSIX" locales.
2309 */
2310 const char *spaces = " \f\n\r\t\v";
2311
2312 *buf += strspn(*buf, spaces); /* Find start of token */
2313
2314 return strcspn(*buf, spaces); /* Return token length */
2315 }
2316
2317 /*
2318 * Finds the next token in *buf, and if the provided token buffer is
2319 * big enough, copies the found token into it. The result, if
2320 * copied, is guaranteed to be terminated with '\0'. Note that *buf
2321 * must be terminated with '\0' on entry.
2322 *
2323 * Returns the length of the token found (not including the '\0').
2324 * Return value will be 0 if no token is found, and it will be >=
2325 * token_size if the token would not fit.
2326 *
2327 * The *buf pointer will be updated to point beyond the end of the
2328 * found token. Note that this occurs even if the token buffer is
2329 * too small to hold it.
2330 */
2331 static inline size_t copy_token(const char **buf,
2332 char *token,
2333 size_t token_size)
2334 {
2335 size_t len;
2336
2337 len = next_token(buf);
2338 if (len < token_size) {
2339 memcpy(token, *buf, len);
2340 *(token + len) = '\0';
2341 }
2342 *buf += len;
2343
2344 return len;
2345 }
2346
2347 /*
2348 * Finds the next token in *buf, dynamically allocates a buffer big
2349 * enough to hold a copy of it, and copies the token into the new
2350 * buffer. The copy is guaranteed to be terminated with '\0'. Note
2351 * that a duplicate buffer is created even for a zero-length token.
2352 *
2353 * Returns a pointer to the newly-allocated duplicate, or a null
2354 * pointer if memory for the duplicate was not available. If
2355 * the lenp argument is a non-null pointer, the length of the token
2356 * (not including the '\0') is returned in *lenp.
2357 *
2358 * If successful, the *buf pointer will be updated to point beyond
2359 * the end of the found token.
2360 *
2361 * Note: uses GFP_KERNEL for allocation.
2362 */
2363 static inline char *dup_token(const char **buf, size_t *lenp)
2364 {
2365 char *dup;
2366 size_t len;
2367
2368 len = next_token(buf);
2369 dup = kmalloc(len + 1, GFP_KERNEL);
2370 if (!dup)
2371 return NULL;
2372
2373 memcpy(dup, *buf, len);
2374 *(dup + len) = '\0';
2375 *buf += len;
2376
2377 if (lenp)
2378 *lenp = len;
2379
2380 return dup;
2381 }
2382
2383 /*
2384 * This fills in the pool_name, image_name, image_name_len, rbd_dev,
2385 * rbd_md_name, and name fields of the given rbd_dev, based on the
2386 * list of monitor addresses and other options provided via
2387 * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
2388 * copy of the snapshot name to map if successful, or a
2389 * pointer-coded error otherwise.
2390 *
2391 * Note: rbd_dev is assumed to have been initially zero-filled.
2392 */
2393 static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
2394 const char *buf,
2395 const char **mon_addrs,
2396 size_t *mon_addrs_size,
2397 char *options,
2398 size_t options_size)
2399 {
2400 size_t len;
2401 char *err_ptr = ERR_PTR(-EINVAL);
2402 char *snap_name;
2403
2404 /* The first four tokens are required */
2405
2406 len = next_token(&buf);
2407 if (!len)
2408 return err_ptr;
2409 *mon_addrs_size = len + 1;
2410 *mon_addrs = buf;
2411
2412 buf += len;
2413
2414 len = copy_token(&buf, options, options_size);
2415 if (!len || len >= options_size)
2416 return err_ptr;
2417
2418 err_ptr = ERR_PTR(-ENOMEM);
2419 rbd_dev->pool_name = dup_token(&buf, NULL);
2420 if (!rbd_dev->pool_name)
2421 goto out_err;
2422
2423 rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
2424 if (!rbd_dev->image_name)
2425 goto out_err;
2426
2427 /* Snapshot name is optional */
2428 len = next_token(&buf);
2429 if (!len) {
2430 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
2431 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
2432 }
2433 snap_name = kmalloc(len + 1, GFP_KERNEL);
2434 if (!snap_name)
2435 goto out_err;
2436 memcpy(snap_name, buf, len);
2437 *(snap_name + len) = '\0';
2438
2439 dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
2440
2441 return snap_name;
2442
2443 out_err:
2444 kfree(rbd_dev->image_name);
2445 rbd_dev->image_name = NULL;
2446 rbd_dev->image_name_len = 0;
2447 kfree(rbd_dev->pool_name);
2448 rbd_dev->pool_name = NULL;
2449
2450 return err_ptr;
2451 }
2452
2453 /*
2454 * An rbd format 2 image has a unique identifier, distinct from the
2455 * name given to it by the user. Internally, that identifier is
2456 * what's used to specify the names of objects related to the image.
2457 *
2458 * A special "rbd id" object is used to map an rbd image name to its
2459 * id. If that object doesn't exist, then there is no v2 rbd image
2460 * with the supplied name.
2461 *
2462 * This function will record the given rbd_dev's image_id field if
2463 * it can be determined, and in that case will return 0. If any
2464 * errors occur a negative errno will be returned and the rbd_dev's
2465 * image_id field will be unchanged (and should be NULL).
2466 */
2467 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
2468 {
2469 int ret;
2470 size_t size;
2471 char *object_name;
2472 void *response;
2473 void *p;
2474
2475 /*
2476 * First, see if the format 2 image id file exists, and if
2477 * so, get the image's persistent id from it.
2478 */
2479 size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
2480 object_name = kmalloc(size, GFP_NOIO);
2481 if (!object_name)
2482 return -ENOMEM;
2483 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
2484 dout("rbd id object name is %s\n", object_name);
2485
2486 /* Response will be an encoded string, which includes a length */
2487
2488 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
2489 response = kzalloc(size, GFP_NOIO);
2490 if (!response) {
2491 ret = -ENOMEM;
2492 goto out;
2493 }
2494
2495 ret = rbd_req_sync_exec(rbd_dev, object_name,
2496 "rbd", "get_id",
2497 NULL, 0,
2498 response, RBD_IMAGE_ID_LEN_MAX,
2499 CEPH_OSD_FLAG_READ, NULL);
2500 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2501 if (ret < 0)
2502 goto out;
2503
2504 p = response;
2505 rbd_dev->image_id = ceph_extract_encoded_string(&p,
2506 p + RBD_IMAGE_ID_LEN_MAX,
2507 &rbd_dev->image_id_len,
2508 GFP_NOIO);
2509 if (IS_ERR(rbd_dev->image_id)) {
2510 ret = PTR_ERR(rbd_dev->image_id);
2511 rbd_dev->image_id = NULL;
2512 } else {
2513 dout("image_id is %s\n", rbd_dev->image_id);
2514 }
2515 out:
2516 kfree(response);
2517 kfree(object_name);
2518
2519 return ret;
2520 }
2521
2522 static ssize_t rbd_add(struct bus_type *bus,
2523 const char *buf,
2524 size_t count)
2525 {
2526 char *options;
2527 struct rbd_device *rbd_dev = NULL;
2528 const char *mon_addrs = NULL;
2529 size_t mon_addrs_size = 0;
2530 struct ceph_osd_client *osdc;
2531 int rc = -ENOMEM;
2532 char *snap_name;
2533
2534 if (!try_module_get(THIS_MODULE))
2535 return -ENODEV;
2536
2537 options = kmalloc(count, GFP_KERNEL);
2538 if (!options)
2539 goto err_out_mem;
2540 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
2541 if (!rbd_dev)
2542 goto err_out_mem;
2543
2544 /* static rbd_device initialization */
2545 spin_lock_init(&rbd_dev->lock);
2546 INIT_LIST_HEAD(&rbd_dev->node);
2547 INIT_LIST_HEAD(&rbd_dev->snaps);
2548 init_rwsem(&rbd_dev->header_rwsem);
2549
2550 /* parse add command */
2551 snap_name = rbd_add_parse_args(rbd_dev, buf,
2552 &mon_addrs, &mon_addrs_size, options, count);
2553 if (IS_ERR(snap_name)) {
2554 rc = PTR_ERR(snap_name);
2555 goto err_out_mem;
2556 }
2557
2558 rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
2559 if (rc < 0)
2560 goto err_out_args;
2561
2562 /* pick the pool */
2563 osdc = &rbd_dev->rbd_client->client->osdc;
2564 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
2565 if (rc < 0)
2566 goto err_out_client;
2567 rbd_dev->pool_id = rc;
2568
2569 rc = rbd_dev_image_id(rbd_dev);
2570 if (!rc) {
2571 rc = -ENOTSUPP; /* Not actually supporting format 2 yet */
2572 goto err_out_client;
2573 }
2574
2575 /* Version 1 images have no id; empty string is used */
2576
2577 rbd_dev->image_id = kstrdup("", GFP_KERNEL);
2578 if (!rbd_dev->image_id) {
2579 rc = -ENOMEM;
2580 goto err_out_client;
2581 }
2582 rbd_dev->image_id_len = 0;
2583
2584 /* Create the name of the header object */
2585
2586 rbd_dev->header_name = kmalloc(rbd_dev->image_name_len
2587 + sizeof (RBD_SUFFIX),
2588 GFP_KERNEL);
2589 if (!rbd_dev->header_name)
2590 goto err_out_client;
2591 sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
2592
2593 /* Get information about the image being mapped */
2594
2595 rc = rbd_read_header(rbd_dev, &rbd_dev->header);
2596 if (rc)
2597 goto err_out_client;
2598
2599 /* no need to lock here, as rbd_dev is not registered yet */
2600 rc = rbd_dev_snaps_update(rbd_dev);
2601 if (rc)
2602 goto err_out_header;
2603
2604 rc = rbd_dev_set_mapping(rbd_dev, snap_name);
2605 if (rc)
2606 goto err_out_header;
2607
2608 /* generate unique id: find highest unique id, add one */
2609 rbd_dev_id_get(rbd_dev);
2610
2611 /* Fill in the device name, now that we have its id. */
2612 BUILD_BUG_ON(DEV_NAME_LEN
2613 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
2614 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
2615
2616 /* Get our block major device number. */
2617
2618 rc = register_blkdev(0, rbd_dev->name);
2619 if (rc < 0)
2620 goto err_out_id;
2621 rbd_dev->major = rc;
2622
2623 /* Set up the blkdev mapping. */
2624
2625 rc = rbd_init_disk(rbd_dev);
2626 if (rc)
2627 goto err_out_blkdev;
2628
2629 rc = rbd_bus_add_dev(rbd_dev);
2630 if (rc)
2631 goto err_out_disk;
2632
2633 /*
2634 * At this point cleanup in the event of an error is the job
2635 * of the sysfs code (initiated by rbd_bus_del_dev()).
2636 */
2637
2638 down_write(&rbd_dev->header_rwsem);
2639 rc = rbd_dev_snaps_register(rbd_dev);
2640 up_write(&rbd_dev->header_rwsem);
2641 if (rc)
2642 goto err_out_bus;
2643
2644 rc = rbd_init_watch_dev(rbd_dev);
2645 if (rc)
2646 goto err_out_bus;
2647
2648 /* Everything's ready. Announce the disk to the world. */
2649
2650 add_disk(rbd_dev->disk);
2651
2652 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
2653 (unsigned long long) rbd_dev->mapping.size);
2654
2655 return count;
2656
2657 err_out_bus:
2658 /* this will also clean up rest of rbd_dev stuff */
2659
2660 rbd_bus_del_dev(rbd_dev);
2661 kfree(options);
2662 return rc;
2663
2664 err_out_disk:
2665 rbd_free_disk(rbd_dev);
2666 err_out_blkdev:
2667 unregister_blkdev(rbd_dev->major, rbd_dev->name);
2668 err_out_id:
2669 rbd_dev_id_put(rbd_dev);
2670 err_out_header:
2671 rbd_header_free(&rbd_dev->header);
2672 err_out_client:
2673 kfree(rbd_dev->header_name);
2674 rbd_put_client(rbd_dev);
2675 kfree(rbd_dev->image_id);
2676 err_out_args:
2677 kfree(rbd_dev->mapping.snap_name);
2678 kfree(rbd_dev->image_name);
2679 kfree(rbd_dev->pool_name);
2680 err_out_mem:
2681 kfree(rbd_dev);
2682 kfree(options);
2683
2684 dout("Error adding device %s\n", buf);
2685 module_put(THIS_MODULE);
2686
2687 return (ssize_t) rc;
2688 }
2689
2690 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
2691 {
2692 struct list_head *tmp;
2693 struct rbd_device *rbd_dev;
2694
2695 spin_lock(&rbd_dev_list_lock);
2696 list_for_each(tmp, &rbd_dev_list) {
2697 rbd_dev = list_entry(tmp, struct rbd_device, node);
2698 if (rbd_dev->dev_id == dev_id) {
2699 spin_unlock(&rbd_dev_list_lock);
2700 return rbd_dev;
2701 }
2702 }
2703 spin_unlock(&rbd_dev_list_lock);
2704 return NULL;
2705 }
2706
2707 static void rbd_dev_release(struct device *dev)
2708 {
2709 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2710
2711 if (rbd_dev->watch_request) {
2712 struct ceph_client *client = rbd_dev->rbd_client->client;
2713
2714 ceph_osdc_unregister_linger_request(&client->osdc,
2715 rbd_dev->watch_request);
2716 }
2717 if (rbd_dev->watch_event)
2718 rbd_req_sync_unwatch(rbd_dev);
2719
2720 rbd_put_client(rbd_dev);
2721
2722 /* clean up and free blkdev */
2723 rbd_free_disk(rbd_dev);
2724 unregister_blkdev(rbd_dev->major, rbd_dev->name);
2725
2726 /* release allocated disk header fields */
2727 rbd_header_free(&rbd_dev->header);
2728
2729 /* done with the id, and with the rbd_dev */
2730 kfree(rbd_dev->mapping.snap_name);
2731 kfree(rbd_dev->image_id);
2732 kfree(rbd_dev->header_name);
2733 kfree(rbd_dev->pool_name);
2734 kfree(rbd_dev->image_name);
2735 rbd_dev_id_put(rbd_dev);
2736 kfree(rbd_dev);
2737
2738 /* release module ref */
2739 module_put(THIS_MODULE);
2740 }
2741
2742 static ssize_t rbd_remove(struct bus_type *bus,
2743 const char *buf,
2744 size_t count)
2745 {
2746 struct rbd_device *rbd_dev = NULL;
2747 int target_id, rc;
2748 unsigned long ul;
2749 int ret = count;
2750
2751 rc = strict_strtoul(buf, 10, &ul);
2752 if (rc)
2753 return rc;
2754
2755 /* convert to int; abort if we lost anything in the conversion */
2756 target_id = (int) ul;
2757 if (target_id != ul)
2758 return -EINVAL;
2759
2760 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2761
2762 rbd_dev = __rbd_get_dev(target_id);
2763 if (!rbd_dev) {
2764 ret = -ENOENT;
2765 goto done;
2766 }
2767
2768 __rbd_remove_all_snaps(rbd_dev);
2769 rbd_bus_del_dev(rbd_dev);
2770
2771 done:
2772 mutex_unlock(&ctl_mutex);
2773
2774 return ret;
2775 }
2776
2777 /*
2778 * create control files in sysfs
2779 * /sys/bus/rbd/...
2780 */
2781 static int rbd_sysfs_init(void)
2782 {
2783 int ret;
2784
2785 ret = device_register(&rbd_root_dev);
2786 if (ret < 0)
2787 return ret;
2788
2789 ret = bus_register(&rbd_bus_type);
2790 if (ret < 0)
2791 device_unregister(&rbd_root_dev);
2792
2793 return ret;
2794 }
2795
2796 static void rbd_sysfs_cleanup(void)
2797 {
2798 bus_unregister(&rbd_bus_type);
2799 device_unregister(&rbd_root_dev);
2800 }
2801
2802 int __init rbd_init(void)
2803 {
2804 int rc;
2805
2806 rc = rbd_sysfs_init();
2807 if (rc)
2808 return rc;
2809 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
2810 return 0;
2811 }
2812
2813 void __exit rbd_exit(void)
2814 {
2815 rbd_sysfs_cleanup();
2816 }
2817
2818 module_init(rbd_init);
2819 module_exit(rbd_exit);
2820
2821 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
2822 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
2823 MODULE_DESCRIPTION("rados block device");
2824
2825 /* following authorship retained from original osdblk.c */
2826 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
2827
2828 MODULE_LICENSE("GPL");
This page took 0.286872 seconds and 5 git commands to generate.