rbd: fix error paths in rbd_img_request_fill()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / rbd.c
1
2 /*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25 For usage instructions, please refer to:
26
27 Documentation/ABI/testing/sysfs-bus-rbd
28
29 */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
37
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/fs.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44
45 #include "rbd_types.h"
46
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
48
49 /*
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
54 */
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
57
58 /*
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
63 */
64 static int atomic_inc_return_safe(atomic_t *v)
65 {
66 unsigned int counter;
67
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
70 return (int)counter;
71
72 atomic_dec(v);
73
74 return -EINVAL;
75 }
76
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
79 {
80 int counter;
81
82 counter = atomic_dec_return(v);
83 if (counter >= 0)
84 return counter;
85
86 atomic_inc(v);
87
88 return -EINVAL;
89 }
90
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
93
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
95
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
99
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
101
102 #define RBD_SNAP_HEAD_NAME "-"
103
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
105
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
109
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
111
112 /* Feature bits */
113
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
118
119 /* Features supported by this (client software) implementation. */
120
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
122
123 /*
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
128 */
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
131
132 /*
133 * block device image metadata (in-memory version)
134 */
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
137 char *object_prefix;
138 __u8 obj_order;
139 __u8 crypt_type;
140 __u8 comp_type;
141 u64 stripe_unit;
142 u64 stripe_count;
143 u64 features; /* Might be changeable someday? */
144
145 /* The remaining fields need to be updated occasionally */
146 u64 image_size;
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
150 };
151
152 /*
153 * An rbd image specification.
154 *
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
158 *
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
163 *
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
169 *
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
173 *
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
176 */
177 struct rbd_spec {
178 u64 pool_id;
179 const char *pool_name;
180
181 const char *image_id;
182 const char *image_name;
183
184 u64 snap_id;
185 const char *snap_name;
186
187 struct kref kref;
188 };
189
190 /*
191 * an instance of the client. multiple devices may share an rbd client.
192 */
193 struct rbd_client {
194 struct ceph_client *client;
195 struct kref kref;
196 struct list_head node;
197 };
198
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
201
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
203
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
206
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
209 };
210
211 enum obj_req_flags {
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
216 };
217
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
222 unsigned long flags;
223
224 /*
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
227 *
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
230 *
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
234 *
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
239 */
240 union {
241 struct rbd_obj_request *obj_request; /* STAT op */
242 struct {
243 struct rbd_img_request *img_request;
244 u64 img_offset;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
247 };
248 };
249 u32 which; /* posn image request list */
250
251 enum obj_request_type type;
252 union {
253 struct bio *bio_list;
254 struct {
255 struct page **pages;
256 u32 page_count;
257 };
258 };
259 struct page **copyup_pages;
260 u32 copyup_page_count;
261
262 struct ceph_osd_request *osd_req;
263
264 u64 xferred; /* bytes transferred */
265 int result;
266
267 rbd_obj_callback_t callback;
268 struct completion completion;
269
270 struct kref kref;
271 };
272
273 enum img_req_flags {
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
277 };
278
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
283 unsigned long flags;
284 union {
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
287 };
288 union {
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
291 };
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
295 u32 next_completion;
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
299
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
302
303 struct kref kref;
304 };
305
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
312
313 struct rbd_mapping {
314 u64 size;
315 u64 features;
316 bool read_only;
317 };
318
319 /*
320 * a single device
321 */
322 struct rbd_device {
323 int dev_id; /* blkdev unique id */
324
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
327
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
330
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
332
333 spinlock_t lock; /* queue, flags, open_count */
334
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
338
339 char *header_name;
340
341 struct ceph_file_layout layout;
342
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
345
346 struct rbd_spec *parent_spec;
347 u64 parent_overlap;
348 atomic_t parent_ref;
349 struct rbd_device *parent;
350
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
353
354 struct rbd_mapping mapping;
355
356 struct list_head node;
357
358 /* sysfs related */
359 struct device dev;
360 unsigned long open_count; /* protected by lock */
361 };
362
363 /*
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
366 *
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
369 */
370 enum rbd_dev_flags {
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
373 };
374
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
376
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
379
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
382
383 /* Slab caches for frequently-allocated structures */
384
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
388
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
390
391 static void rbd_dev_device_release(struct device *dev);
392
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
394 size_t count);
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
396 size_t count);
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
399
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
403 __ATTR_NULL
404 };
405
406 static struct bus_type rbd_bus_type = {
407 .name = "rbd",
408 .bus_attrs = rbd_bus_attrs,
409 };
410
411 static void rbd_root_dev_release(struct device *dev)
412 {
413 }
414
415 static struct device rbd_root_dev = {
416 .init_name = "rbd",
417 .release = rbd_root_dev_release,
418 };
419
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
422 {
423 struct va_format vaf;
424 va_list args;
425
426 va_start(args, fmt);
427 vaf.fmt = fmt;
428 vaf.va = &args;
429
430 if (!rbd_dev)
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
441 else /* punt */
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
444 va_end(args);
445 }
446
447 #ifdef RBD_DEBUG
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
451 "at line %d:\n\n" \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
454 BUG(); \
455 }
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
459
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
463
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
468 u64 snap_id);
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
472 u64 *snap_features);
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
474
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
476 {
477 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 bool removing = false;
479
480 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
481 return -EROFS;
482
483 spin_lock_irq(&rbd_dev->lock);
484 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
485 removing = true;
486 else
487 rbd_dev->open_count++;
488 spin_unlock_irq(&rbd_dev->lock);
489 if (removing)
490 return -ENOENT;
491
492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 (void) get_device(&rbd_dev->dev);
494 set_device_ro(bdev, rbd_dev->mapping.read_only);
495 mutex_unlock(&ctl_mutex);
496
497 return 0;
498 }
499
500 static void rbd_release(struct gendisk *disk, fmode_t mode)
501 {
502 struct rbd_device *rbd_dev = disk->private_data;
503 unsigned long open_count_before;
504
505 spin_lock_irq(&rbd_dev->lock);
506 open_count_before = rbd_dev->open_count--;
507 spin_unlock_irq(&rbd_dev->lock);
508 rbd_assert(open_count_before > 0);
509
510 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 put_device(&rbd_dev->dev);
512 mutex_unlock(&ctl_mutex);
513 }
514
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
517 .open = rbd_open,
518 .release = rbd_release,
519 };
520
521 /*
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts.
524 */
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526 {
527 struct rbd_client *rbdc;
528 int ret = -ENOMEM;
529
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
532 if (!rbdc)
533 goto out_opt;
534
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
537
538 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
539
540 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
541 if (IS_ERR(rbdc->client))
542 goto out_mutex;
543 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
544
545 ret = ceph_open_session(rbdc->client);
546 if (ret < 0)
547 goto out_err;
548
549 spin_lock(&rbd_client_list_lock);
550 list_add_tail(&rbdc->node, &rbd_client_list);
551 spin_unlock(&rbd_client_list_lock);
552
553 mutex_unlock(&ctl_mutex);
554 dout("%s: rbdc %p\n", __func__, rbdc);
555
556 return rbdc;
557
558 out_err:
559 ceph_destroy_client(rbdc->client);
560 out_mutex:
561 mutex_unlock(&ctl_mutex);
562 kfree(rbdc);
563 out_opt:
564 if (ceph_opts)
565 ceph_destroy_options(ceph_opts);
566 dout("%s: error %d\n", __func__, ret);
567
568 return ERR_PTR(ret);
569 }
570
571 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
572 {
573 kref_get(&rbdc->kref);
574
575 return rbdc;
576 }
577
578 /*
579 * Find a ceph client with specific addr and configuration. If
580 * found, bump its reference count.
581 */
582 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
583 {
584 struct rbd_client *client_node;
585 bool found = false;
586
587 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
588 return NULL;
589
590 spin_lock(&rbd_client_list_lock);
591 list_for_each_entry(client_node, &rbd_client_list, node) {
592 if (!ceph_compare_options(ceph_opts, client_node->client)) {
593 __rbd_get_client(client_node);
594
595 found = true;
596 break;
597 }
598 }
599 spin_unlock(&rbd_client_list_lock);
600
601 return found ? client_node : NULL;
602 }
603
604 /*
605 * mount options
606 */
607 enum {
608 Opt_last_int,
609 /* int args above */
610 Opt_last_string,
611 /* string args above */
612 Opt_read_only,
613 Opt_read_write,
614 /* Boolean args above */
615 Opt_last_bool,
616 };
617
618 static match_table_t rbd_opts_tokens = {
619 /* int args above */
620 /* string args above */
621 {Opt_read_only, "read_only"},
622 {Opt_read_only, "ro"}, /* Alternate spelling */
623 {Opt_read_write, "read_write"},
624 {Opt_read_write, "rw"}, /* Alternate spelling */
625 /* Boolean args above */
626 {-1, NULL}
627 };
628
629 struct rbd_options {
630 bool read_only;
631 };
632
633 #define RBD_READ_ONLY_DEFAULT false
634
635 static int parse_rbd_opts_token(char *c, void *private)
636 {
637 struct rbd_options *rbd_opts = private;
638 substring_t argstr[MAX_OPT_ARGS];
639 int token, intval, ret;
640
641 token = match_token(c, rbd_opts_tokens, argstr);
642 if (token < 0)
643 return -EINVAL;
644
645 if (token < Opt_last_int) {
646 ret = match_int(&argstr[0], &intval);
647 if (ret < 0) {
648 pr_err("bad mount option arg (not int) "
649 "at '%s'\n", c);
650 return ret;
651 }
652 dout("got int token %d val %d\n", token, intval);
653 } else if (token > Opt_last_int && token < Opt_last_string) {
654 dout("got string token %d val %s\n", token,
655 argstr[0].from);
656 } else if (token > Opt_last_string && token < Opt_last_bool) {
657 dout("got Boolean token %d\n", token);
658 } else {
659 dout("got token %d\n", token);
660 }
661
662 switch (token) {
663 case Opt_read_only:
664 rbd_opts->read_only = true;
665 break;
666 case Opt_read_write:
667 rbd_opts->read_only = false;
668 break;
669 default:
670 rbd_assert(false);
671 break;
672 }
673 return 0;
674 }
675
676 /*
677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. Either way, ceph_opts is consumed by this
679 * function.
680 */
681 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
682 {
683 struct rbd_client *rbdc;
684
685 rbdc = rbd_client_find(ceph_opts);
686 if (rbdc) /* using an existing client */
687 ceph_destroy_options(ceph_opts);
688 else
689 rbdc = rbd_client_create(ceph_opts);
690
691 return rbdc;
692 }
693
694 /*
695 * Destroy ceph client
696 *
697 * Caller must hold rbd_client_list_lock.
698 */
699 static void rbd_client_release(struct kref *kref)
700 {
701 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
702
703 dout("%s: rbdc %p\n", __func__, rbdc);
704 spin_lock(&rbd_client_list_lock);
705 list_del(&rbdc->node);
706 spin_unlock(&rbd_client_list_lock);
707
708 ceph_destroy_client(rbdc->client);
709 kfree(rbdc);
710 }
711
712 /*
713 * Drop reference to ceph client node. If it's not referenced anymore, release
714 * it.
715 */
716 static void rbd_put_client(struct rbd_client *rbdc)
717 {
718 if (rbdc)
719 kref_put(&rbdc->kref, rbd_client_release);
720 }
721
722 static bool rbd_image_format_valid(u32 image_format)
723 {
724 return image_format == 1 || image_format == 2;
725 }
726
727 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
728 {
729 size_t size;
730 u32 snap_count;
731
732 /* The header has to start with the magic rbd header text */
733 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
734 return false;
735
736 /* The bio layer requires at least sector-sized I/O */
737
738 if (ondisk->options.order < SECTOR_SHIFT)
739 return false;
740
741 /* If we use u64 in a few spots we may be able to loosen this */
742
743 if (ondisk->options.order > 8 * sizeof (int) - 1)
744 return false;
745
746 /*
747 * The size of a snapshot header has to fit in a size_t, and
748 * that limits the number of snapshots.
749 */
750 snap_count = le32_to_cpu(ondisk->snap_count);
751 size = SIZE_MAX - sizeof (struct ceph_snap_context);
752 if (snap_count > size / sizeof (__le64))
753 return false;
754
755 /*
756 * Not only that, but the size of the entire the snapshot
757 * header must also be representable in a size_t.
758 */
759 size -= snap_count * sizeof (__le64);
760 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
761 return false;
762
763 return true;
764 }
765
766 /*
767 * Fill an rbd image header with information from the given format 1
768 * on-disk header.
769 */
770 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
771 struct rbd_image_header_ondisk *ondisk)
772 {
773 struct rbd_image_header *header = &rbd_dev->header;
774 bool first_time = header->object_prefix == NULL;
775 struct ceph_snap_context *snapc;
776 char *object_prefix = NULL;
777 char *snap_names = NULL;
778 u64 *snap_sizes = NULL;
779 u32 snap_count;
780 size_t size;
781 int ret = -ENOMEM;
782 u32 i;
783
784 /* Allocate this now to avoid having to handle failure below */
785
786 if (first_time) {
787 size_t len;
788
789 len = strnlen(ondisk->object_prefix,
790 sizeof (ondisk->object_prefix));
791 object_prefix = kmalloc(len + 1, GFP_KERNEL);
792 if (!object_prefix)
793 return -ENOMEM;
794 memcpy(object_prefix, ondisk->object_prefix, len);
795 object_prefix[len] = '\0';
796 }
797
798 /* Allocate the snapshot context and fill it in */
799
800 snap_count = le32_to_cpu(ondisk->snap_count);
801 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
802 if (!snapc)
803 goto out_err;
804 snapc->seq = le64_to_cpu(ondisk->snap_seq);
805 if (snap_count) {
806 struct rbd_image_snap_ondisk *snaps;
807 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
808
809 /* We'll keep a copy of the snapshot names... */
810
811 if (snap_names_len > (u64)SIZE_MAX)
812 goto out_2big;
813 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
814 if (!snap_names)
815 goto out_err;
816
817 /* ...as well as the array of their sizes. */
818
819 size = snap_count * sizeof (*header->snap_sizes);
820 snap_sizes = kmalloc(size, GFP_KERNEL);
821 if (!snap_sizes)
822 goto out_err;
823
824 /*
825 * Copy the names, and fill in each snapshot's id
826 * and size.
827 *
828 * Note that rbd_dev_v1_header_info() guarantees the
829 * ondisk buffer we're working with has
830 * snap_names_len bytes beyond the end of the
831 * snapshot id array, this memcpy() is safe.
832 */
833 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
834 snaps = ondisk->snaps;
835 for (i = 0; i < snap_count; i++) {
836 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
837 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
838 }
839 }
840
841 /* We won't fail any more, fill in the header */
842
843 down_write(&rbd_dev->header_rwsem);
844 if (first_time) {
845 header->object_prefix = object_prefix;
846 header->obj_order = ondisk->options.order;
847 header->crypt_type = ondisk->options.crypt_type;
848 header->comp_type = ondisk->options.comp_type;
849 /* The rest aren't used for format 1 images */
850 header->stripe_unit = 0;
851 header->stripe_count = 0;
852 header->features = 0;
853 } else {
854 ceph_put_snap_context(header->snapc);
855 kfree(header->snap_names);
856 kfree(header->snap_sizes);
857 }
858
859 /* The remaining fields always get updated (when we refresh) */
860
861 header->image_size = le64_to_cpu(ondisk->image_size);
862 header->snapc = snapc;
863 header->snap_names = snap_names;
864 header->snap_sizes = snap_sizes;
865
866 /* Make sure mapping size is consistent with header info */
867
868 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
869 if (rbd_dev->mapping.size != header->image_size)
870 rbd_dev->mapping.size = header->image_size;
871
872 up_write(&rbd_dev->header_rwsem);
873
874 return 0;
875 out_2big:
876 ret = -EIO;
877 out_err:
878 kfree(snap_sizes);
879 kfree(snap_names);
880 ceph_put_snap_context(snapc);
881 kfree(object_prefix);
882
883 return ret;
884 }
885
886 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
887 {
888 const char *snap_name;
889
890 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
891
892 /* Skip over names until we find the one we are looking for */
893
894 snap_name = rbd_dev->header.snap_names;
895 while (which--)
896 snap_name += strlen(snap_name) + 1;
897
898 return kstrdup(snap_name, GFP_KERNEL);
899 }
900
901 /*
902 * Snapshot id comparison function for use with qsort()/bsearch().
903 * Note that result is for snapshots in *descending* order.
904 */
905 static int snapid_compare_reverse(const void *s1, const void *s2)
906 {
907 u64 snap_id1 = *(u64 *)s1;
908 u64 snap_id2 = *(u64 *)s2;
909
910 if (snap_id1 < snap_id2)
911 return 1;
912 return snap_id1 == snap_id2 ? 0 : -1;
913 }
914
915 /*
916 * Search a snapshot context to see if the given snapshot id is
917 * present.
918 *
919 * Returns the position of the snapshot id in the array if it's found,
920 * or BAD_SNAP_INDEX otherwise.
921 *
922 * Note: The snapshot array is in kept sorted (by the osd) in
923 * reverse order, highest snapshot id first.
924 */
925 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
926 {
927 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
928 u64 *found;
929
930 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
931 sizeof (snap_id), snapid_compare_reverse);
932
933 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
934 }
935
936 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
937 u64 snap_id)
938 {
939 u32 which;
940 const char *snap_name;
941
942 which = rbd_dev_snap_index(rbd_dev, snap_id);
943 if (which == BAD_SNAP_INDEX)
944 return ERR_PTR(-ENOENT);
945
946 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
947 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
948 }
949
950 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
951 {
952 if (snap_id == CEPH_NOSNAP)
953 return RBD_SNAP_HEAD_NAME;
954
955 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
956 if (rbd_dev->image_format == 1)
957 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
958
959 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
960 }
961
962 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
963 u64 *snap_size)
964 {
965 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
966 if (snap_id == CEPH_NOSNAP) {
967 *snap_size = rbd_dev->header.image_size;
968 } else if (rbd_dev->image_format == 1) {
969 u32 which;
970
971 which = rbd_dev_snap_index(rbd_dev, snap_id);
972 if (which == BAD_SNAP_INDEX)
973 return -ENOENT;
974
975 *snap_size = rbd_dev->header.snap_sizes[which];
976 } else {
977 u64 size = 0;
978 int ret;
979
980 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
981 if (ret)
982 return ret;
983
984 *snap_size = size;
985 }
986 return 0;
987 }
988
989 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
990 u64 *snap_features)
991 {
992 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
993 if (snap_id == CEPH_NOSNAP) {
994 *snap_features = rbd_dev->header.features;
995 } else if (rbd_dev->image_format == 1) {
996 *snap_features = 0; /* No features for format 1 */
997 } else {
998 u64 features = 0;
999 int ret;
1000
1001 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1002 if (ret)
1003 return ret;
1004
1005 *snap_features = features;
1006 }
1007 return 0;
1008 }
1009
1010 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1011 {
1012 u64 snap_id = rbd_dev->spec->snap_id;
1013 u64 size = 0;
1014 u64 features = 0;
1015 int ret;
1016
1017 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1018 if (ret)
1019 return ret;
1020 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1021 if (ret)
1022 return ret;
1023
1024 rbd_dev->mapping.size = size;
1025 rbd_dev->mapping.features = features;
1026
1027 return 0;
1028 }
1029
1030 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1031 {
1032 rbd_dev->mapping.size = 0;
1033 rbd_dev->mapping.features = 0;
1034 }
1035
1036 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1037 {
1038 char *name;
1039 u64 segment;
1040 int ret;
1041 char *name_format;
1042
1043 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1044 if (!name)
1045 return NULL;
1046 segment = offset >> rbd_dev->header.obj_order;
1047 name_format = "%s.%012llx";
1048 if (rbd_dev->image_format == 2)
1049 name_format = "%s.%016llx";
1050 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1051 rbd_dev->header.object_prefix, segment);
1052 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1053 pr_err("error formatting segment name for #%llu (%d)\n",
1054 segment, ret);
1055 kfree(name);
1056 name = NULL;
1057 }
1058
1059 return name;
1060 }
1061
1062 static void rbd_segment_name_free(const char *name)
1063 {
1064 /* The explicit cast here is needed to drop the const qualifier */
1065
1066 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1067 }
1068
1069 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1070 {
1071 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1072
1073 return offset & (segment_size - 1);
1074 }
1075
1076 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1077 u64 offset, u64 length)
1078 {
1079 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1080
1081 offset &= segment_size - 1;
1082
1083 rbd_assert(length <= U64_MAX - offset);
1084 if (offset + length > segment_size)
1085 length = segment_size - offset;
1086
1087 return length;
1088 }
1089
1090 /*
1091 * returns the size of an object in the image
1092 */
1093 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1094 {
1095 return 1 << header->obj_order;
1096 }
1097
1098 /*
1099 * bio helpers
1100 */
1101
1102 static void bio_chain_put(struct bio *chain)
1103 {
1104 struct bio *tmp;
1105
1106 while (chain) {
1107 tmp = chain;
1108 chain = chain->bi_next;
1109 bio_put(tmp);
1110 }
1111 }
1112
1113 /*
1114 * zeros a bio chain, starting at specific offset
1115 */
1116 static void zero_bio_chain(struct bio *chain, int start_ofs)
1117 {
1118 struct bio_vec *bv;
1119 unsigned long flags;
1120 void *buf;
1121 int i;
1122 int pos = 0;
1123
1124 while (chain) {
1125 bio_for_each_segment(bv, chain, i) {
1126 if (pos + bv->bv_len > start_ofs) {
1127 int remainder = max(start_ofs - pos, 0);
1128 buf = bvec_kmap_irq(bv, &flags);
1129 memset(buf + remainder, 0,
1130 bv->bv_len - remainder);
1131 flush_dcache_page(bv->bv_page);
1132 bvec_kunmap_irq(buf, &flags);
1133 }
1134 pos += bv->bv_len;
1135 }
1136
1137 chain = chain->bi_next;
1138 }
1139 }
1140
1141 /*
1142 * similar to zero_bio_chain(), zeros data defined by a page array,
1143 * starting at the given byte offset from the start of the array and
1144 * continuing up to the given end offset. The pages array is
1145 * assumed to be big enough to hold all bytes up to the end.
1146 */
1147 static void zero_pages(struct page **pages, u64 offset, u64 end)
1148 {
1149 struct page **page = &pages[offset >> PAGE_SHIFT];
1150
1151 rbd_assert(end > offset);
1152 rbd_assert(end - offset <= (u64)SIZE_MAX);
1153 while (offset < end) {
1154 size_t page_offset;
1155 size_t length;
1156 unsigned long flags;
1157 void *kaddr;
1158
1159 page_offset = (size_t)(offset & ~PAGE_MASK);
1160 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1161 local_irq_save(flags);
1162 kaddr = kmap_atomic(*page);
1163 memset(kaddr + page_offset, 0, length);
1164 flush_dcache_page(*page);
1165 kunmap_atomic(kaddr);
1166 local_irq_restore(flags);
1167
1168 offset += length;
1169 page++;
1170 }
1171 }
1172
1173 /*
1174 * Clone a portion of a bio, starting at the given byte offset
1175 * and continuing for the number of bytes indicated.
1176 */
1177 static struct bio *bio_clone_range(struct bio *bio_src,
1178 unsigned int offset,
1179 unsigned int len,
1180 gfp_t gfpmask)
1181 {
1182 struct bio_vec *bv;
1183 unsigned int resid;
1184 unsigned short idx;
1185 unsigned int voff;
1186 unsigned short end_idx;
1187 unsigned short vcnt;
1188 struct bio *bio;
1189
1190 /* Handle the easy case for the caller */
1191
1192 if (!offset && len == bio_src->bi_size)
1193 return bio_clone(bio_src, gfpmask);
1194
1195 if (WARN_ON_ONCE(!len))
1196 return NULL;
1197 if (WARN_ON_ONCE(len > bio_src->bi_size))
1198 return NULL;
1199 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1200 return NULL;
1201
1202 /* Find first affected segment... */
1203
1204 resid = offset;
1205 bio_for_each_segment(bv, bio_src, idx) {
1206 if (resid < bv->bv_len)
1207 break;
1208 resid -= bv->bv_len;
1209 }
1210 voff = resid;
1211
1212 /* ...and the last affected segment */
1213
1214 resid += len;
1215 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1216 if (resid <= bv->bv_len)
1217 break;
1218 resid -= bv->bv_len;
1219 }
1220 vcnt = end_idx - idx + 1;
1221
1222 /* Build the clone */
1223
1224 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1225 if (!bio)
1226 return NULL; /* ENOMEM */
1227
1228 bio->bi_bdev = bio_src->bi_bdev;
1229 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1230 bio->bi_rw = bio_src->bi_rw;
1231 bio->bi_flags |= 1 << BIO_CLONED;
1232
1233 /*
1234 * Copy over our part of the bio_vec, then update the first
1235 * and last (or only) entries.
1236 */
1237 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1238 vcnt * sizeof (struct bio_vec));
1239 bio->bi_io_vec[0].bv_offset += voff;
1240 if (vcnt > 1) {
1241 bio->bi_io_vec[0].bv_len -= voff;
1242 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1243 } else {
1244 bio->bi_io_vec[0].bv_len = len;
1245 }
1246
1247 bio->bi_vcnt = vcnt;
1248 bio->bi_size = len;
1249 bio->bi_idx = 0;
1250
1251 return bio;
1252 }
1253
1254 /*
1255 * Clone a portion of a bio chain, starting at the given byte offset
1256 * into the first bio in the source chain and continuing for the
1257 * number of bytes indicated. The result is another bio chain of
1258 * exactly the given length, or a null pointer on error.
1259 *
1260 * The bio_src and offset parameters are both in-out. On entry they
1261 * refer to the first source bio and the offset into that bio where
1262 * the start of data to be cloned is located.
1263 *
1264 * On return, bio_src is updated to refer to the bio in the source
1265 * chain that contains first un-cloned byte, and *offset will
1266 * contain the offset of that byte within that bio.
1267 */
1268 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1269 unsigned int *offset,
1270 unsigned int len,
1271 gfp_t gfpmask)
1272 {
1273 struct bio *bi = *bio_src;
1274 unsigned int off = *offset;
1275 struct bio *chain = NULL;
1276 struct bio **end;
1277
1278 /* Build up a chain of clone bios up to the limit */
1279
1280 if (!bi || off >= bi->bi_size || !len)
1281 return NULL; /* Nothing to clone */
1282
1283 end = &chain;
1284 while (len) {
1285 unsigned int bi_size;
1286 struct bio *bio;
1287
1288 if (!bi) {
1289 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1290 goto out_err; /* EINVAL; ran out of bio's */
1291 }
1292 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1293 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1294 if (!bio)
1295 goto out_err; /* ENOMEM */
1296
1297 *end = bio;
1298 end = &bio->bi_next;
1299
1300 off += bi_size;
1301 if (off == bi->bi_size) {
1302 bi = bi->bi_next;
1303 off = 0;
1304 }
1305 len -= bi_size;
1306 }
1307 *bio_src = bi;
1308 *offset = off;
1309
1310 return chain;
1311 out_err:
1312 bio_chain_put(chain);
1313
1314 return NULL;
1315 }
1316
1317 /*
1318 * The default/initial value for all object request flags is 0. For
1319 * each flag, once its value is set to 1 it is never reset to 0
1320 * again.
1321 */
1322 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1323 {
1324 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1325 struct rbd_device *rbd_dev;
1326
1327 rbd_dev = obj_request->img_request->rbd_dev;
1328 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1329 obj_request);
1330 }
1331 }
1332
1333 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1334 {
1335 smp_mb();
1336 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1337 }
1338
1339 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1340 {
1341 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1342 struct rbd_device *rbd_dev = NULL;
1343
1344 if (obj_request_img_data_test(obj_request))
1345 rbd_dev = obj_request->img_request->rbd_dev;
1346 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1347 obj_request);
1348 }
1349 }
1350
1351 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1352 {
1353 smp_mb();
1354 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1355 }
1356
1357 /*
1358 * This sets the KNOWN flag after (possibly) setting the EXISTS
1359 * flag. The latter is set based on the "exists" value provided.
1360 *
1361 * Note that for our purposes once an object exists it never goes
1362 * away again. It's possible that the response from two existence
1363 * checks are separated by the creation of the target object, and
1364 * the first ("doesn't exist") response arrives *after* the second
1365 * ("does exist"). In that case we ignore the second one.
1366 */
1367 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1368 bool exists)
1369 {
1370 if (exists)
1371 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1372 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1373 smp_mb();
1374 }
1375
1376 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1377 {
1378 smp_mb();
1379 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1380 }
1381
1382 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1383 {
1384 smp_mb();
1385 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1386 }
1387
1388 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1389 {
1390 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1391 atomic_read(&obj_request->kref.refcount));
1392 kref_get(&obj_request->kref);
1393 }
1394
1395 static void rbd_obj_request_destroy(struct kref *kref);
1396 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1397 {
1398 rbd_assert(obj_request != NULL);
1399 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1400 atomic_read(&obj_request->kref.refcount));
1401 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1402 }
1403
1404 static bool img_request_child_test(struct rbd_img_request *img_request);
1405 static void rbd_parent_request_destroy(struct kref *kref);
1406 static void rbd_img_request_destroy(struct kref *kref);
1407 static void rbd_img_request_put(struct rbd_img_request *img_request)
1408 {
1409 rbd_assert(img_request != NULL);
1410 dout("%s: img %p (was %d)\n", __func__, img_request,
1411 atomic_read(&img_request->kref.refcount));
1412 if (img_request_child_test(img_request))
1413 kref_put(&img_request->kref, rbd_parent_request_destroy);
1414 else
1415 kref_put(&img_request->kref, rbd_img_request_destroy);
1416 }
1417
1418 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1419 struct rbd_obj_request *obj_request)
1420 {
1421 rbd_assert(obj_request->img_request == NULL);
1422
1423 /* Image request now owns object's original reference */
1424 obj_request->img_request = img_request;
1425 obj_request->which = img_request->obj_request_count;
1426 rbd_assert(!obj_request_img_data_test(obj_request));
1427 obj_request_img_data_set(obj_request);
1428 rbd_assert(obj_request->which != BAD_WHICH);
1429 img_request->obj_request_count++;
1430 list_add_tail(&obj_request->links, &img_request->obj_requests);
1431 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1432 obj_request->which);
1433 }
1434
1435 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1436 struct rbd_obj_request *obj_request)
1437 {
1438 rbd_assert(obj_request->which != BAD_WHICH);
1439
1440 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1441 obj_request->which);
1442 list_del(&obj_request->links);
1443 rbd_assert(img_request->obj_request_count > 0);
1444 img_request->obj_request_count--;
1445 rbd_assert(obj_request->which == img_request->obj_request_count);
1446 obj_request->which = BAD_WHICH;
1447 rbd_assert(obj_request_img_data_test(obj_request));
1448 rbd_assert(obj_request->img_request == img_request);
1449 obj_request->img_request = NULL;
1450 obj_request->callback = NULL;
1451 rbd_obj_request_put(obj_request);
1452 }
1453
1454 static bool obj_request_type_valid(enum obj_request_type type)
1455 {
1456 switch (type) {
1457 case OBJ_REQUEST_NODATA:
1458 case OBJ_REQUEST_BIO:
1459 case OBJ_REQUEST_PAGES:
1460 return true;
1461 default:
1462 return false;
1463 }
1464 }
1465
1466 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1467 struct rbd_obj_request *obj_request)
1468 {
1469 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1470
1471 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1472 }
1473
1474 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1475 {
1476
1477 dout("%s: img %p\n", __func__, img_request);
1478
1479 /*
1480 * If no error occurred, compute the aggregate transfer
1481 * count for the image request. We could instead use
1482 * atomic64_cmpxchg() to update it as each object request
1483 * completes; not clear which way is better off hand.
1484 */
1485 if (!img_request->result) {
1486 struct rbd_obj_request *obj_request;
1487 u64 xferred = 0;
1488
1489 for_each_obj_request(img_request, obj_request)
1490 xferred += obj_request->xferred;
1491 img_request->xferred = xferred;
1492 }
1493
1494 if (img_request->callback)
1495 img_request->callback(img_request);
1496 else
1497 rbd_img_request_put(img_request);
1498 }
1499
1500 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1501
1502 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1503 {
1504 dout("%s: obj %p\n", __func__, obj_request);
1505
1506 return wait_for_completion_interruptible(&obj_request->completion);
1507 }
1508
1509 /*
1510 * The default/initial value for all image request flags is 0. Each
1511 * is conditionally set to 1 at image request initialization time
1512 * and currently never change thereafter.
1513 */
1514 static void img_request_write_set(struct rbd_img_request *img_request)
1515 {
1516 set_bit(IMG_REQ_WRITE, &img_request->flags);
1517 smp_mb();
1518 }
1519
1520 static bool img_request_write_test(struct rbd_img_request *img_request)
1521 {
1522 smp_mb();
1523 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1524 }
1525
1526 static void img_request_child_set(struct rbd_img_request *img_request)
1527 {
1528 set_bit(IMG_REQ_CHILD, &img_request->flags);
1529 smp_mb();
1530 }
1531
1532 static void img_request_child_clear(struct rbd_img_request *img_request)
1533 {
1534 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1535 smp_mb();
1536 }
1537
1538 static bool img_request_child_test(struct rbd_img_request *img_request)
1539 {
1540 smp_mb();
1541 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1542 }
1543
1544 static void img_request_layered_set(struct rbd_img_request *img_request)
1545 {
1546 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1547 smp_mb();
1548 }
1549
1550 static void img_request_layered_clear(struct rbd_img_request *img_request)
1551 {
1552 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1553 smp_mb();
1554 }
1555
1556 static bool img_request_layered_test(struct rbd_img_request *img_request)
1557 {
1558 smp_mb();
1559 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1560 }
1561
1562 static void
1563 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1564 {
1565 u64 xferred = obj_request->xferred;
1566 u64 length = obj_request->length;
1567
1568 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1569 obj_request, obj_request->img_request, obj_request->result,
1570 xferred, length);
1571 /*
1572 * ENOENT means a hole in the image. We zero-fill the entire
1573 * length of the request. A short read also implies zero-fill
1574 * to the end of the request. An error requires the whole
1575 * length of the request to be reported finished with an error
1576 * to the block layer. In each case we update the xferred
1577 * count to indicate the whole request was satisfied.
1578 */
1579 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1580 if (obj_request->result == -ENOENT) {
1581 if (obj_request->type == OBJ_REQUEST_BIO)
1582 zero_bio_chain(obj_request->bio_list, 0);
1583 else
1584 zero_pages(obj_request->pages, 0, length);
1585 obj_request->result = 0;
1586 } else if (xferred < length && !obj_request->result) {
1587 if (obj_request->type == OBJ_REQUEST_BIO)
1588 zero_bio_chain(obj_request->bio_list, xferred);
1589 else
1590 zero_pages(obj_request->pages, xferred, length);
1591 }
1592 obj_request->xferred = length;
1593 obj_request_done_set(obj_request);
1594 }
1595
1596 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1597 {
1598 dout("%s: obj %p cb %p\n", __func__, obj_request,
1599 obj_request->callback);
1600 if (obj_request->callback)
1601 obj_request->callback(obj_request);
1602 else
1603 complete_all(&obj_request->completion);
1604 }
1605
1606 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1607 {
1608 dout("%s: obj %p\n", __func__, obj_request);
1609 obj_request_done_set(obj_request);
1610 }
1611
1612 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1613 {
1614 struct rbd_img_request *img_request = NULL;
1615 struct rbd_device *rbd_dev = NULL;
1616 bool layered = false;
1617
1618 if (obj_request_img_data_test(obj_request)) {
1619 img_request = obj_request->img_request;
1620 layered = img_request && img_request_layered_test(img_request);
1621 rbd_dev = img_request->rbd_dev;
1622 }
1623
1624 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1625 obj_request, img_request, obj_request->result,
1626 obj_request->xferred, obj_request->length);
1627 if (layered && obj_request->result == -ENOENT &&
1628 obj_request->img_offset < rbd_dev->parent_overlap)
1629 rbd_img_parent_read(obj_request);
1630 else if (img_request)
1631 rbd_img_obj_request_read_callback(obj_request);
1632 else
1633 obj_request_done_set(obj_request);
1634 }
1635
1636 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1637 {
1638 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1639 obj_request->result, obj_request->length);
1640 /*
1641 * There is no such thing as a successful short write. Set
1642 * it to our originally-requested length.
1643 */
1644 obj_request->xferred = obj_request->length;
1645 obj_request_done_set(obj_request);
1646 }
1647
1648 /*
1649 * For a simple stat call there's nothing to do. We'll do more if
1650 * this is part of a write sequence for a layered image.
1651 */
1652 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1653 {
1654 dout("%s: obj %p\n", __func__, obj_request);
1655 obj_request_done_set(obj_request);
1656 }
1657
1658 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1659 struct ceph_msg *msg)
1660 {
1661 struct rbd_obj_request *obj_request = osd_req->r_priv;
1662 u16 opcode;
1663
1664 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1665 rbd_assert(osd_req == obj_request->osd_req);
1666 if (obj_request_img_data_test(obj_request)) {
1667 rbd_assert(obj_request->img_request);
1668 rbd_assert(obj_request->which != BAD_WHICH);
1669 } else {
1670 rbd_assert(obj_request->which == BAD_WHICH);
1671 }
1672
1673 if (osd_req->r_result < 0)
1674 obj_request->result = osd_req->r_result;
1675
1676 BUG_ON(osd_req->r_num_ops > 2);
1677
1678 /*
1679 * We support a 64-bit length, but ultimately it has to be
1680 * passed to blk_end_request(), which takes an unsigned int.
1681 */
1682 obj_request->xferred = osd_req->r_reply_op_len[0];
1683 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1684 opcode = osd_req->r_ops[0].op;
1685 switch (opcode) {
1686 case CEPH_OSD_OP_READ:
1687 rbd_osd_read_callback(obj_request);
1688 break;
1689 case CEPH_OSD_OP_WRITE:
1690 rbd_osd_write_callback(obj_request);
1691 break;
1692 case CEPH_OSD_OP_STAT:
1693 rbd_osd_stat_callback(obj_request);
1694 break;
1695 case CEPH_OSD_OP_CALL:
1696 case CEPH_OSD_OP_NOTIFY_ACK:
1697 case CEPH_OSD_OP_WATCH:
1698 rbd_osd_trivial_callback(obj_request);
1699 break;
1700 default:
1701 rbd_warn(NULL, "%s: unsupported op %hu\n",
1702 obj_request->object_name, (unsigned short) opcode);
1703 break;
1704 }
1705
1706 if (obj_request_done_test(obj_request))
1707 rbd_obj_request_complete(obj_request);
1708 }
1709
1710 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1711 {
1712 struct rbd_img_request *img_request = obj_request->img_request;
1713 struct ceph_osd_request *osd_req = obj_request->osd_req;
1714 u64 snap_id;
1715
1716 rbd_assert(osd_req != NULL);
1717
1718 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1719 ceph_osdc_build_request(osd_req, obj_request->offset,
1720 NULL, snap_id, NULL);
1721 }
1722
1723 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1724 {
1725 struct rbd_img_request *img_request = obj_request->img_request;
1726 struct ceph_osd_request *osd_req = obj_request->osd_req;
1727 struct ceph_snap_context *snapc;
1728 struct timespec mtime = CURRENT_TIME;
1729
1730 rbd_assert(osd_req != NULL);
1731
1732 snapc = img_request ? img_request->snapc : NULL;
1733 ceph_osdc_build_request(osd_req, obj_request->offset,
1734 snapc, CEPH_NOSNAP, &mtime);
1735 }
1736
1737 static struct ceph_osd_request *rbd_osd_req_create(
1738 struct rbd_device *rbd_dev,
1739 bool write_request,
1740 struct rbd_obj_request *obj_request)
1741 {
1742 struct ceph_snap_context *snapc = NULL;
1743 struct ceph_osd_client *osdc;
1744 struct ceph_osd_request *osd_req;
1745
1746 if (obj_request_img_data_test(obj_request)) {
1747 struct rbd_img_request *img_request = obj_request->img_request;
1748
1749 rbd_assert(write_request ==
1750 img_request_write_test(img_request));
1751 if (write_request)
1752 snapc = img_request->snapc;
1753 }
1754
1755 /* Allocate and initialize the request, for the single op */
1756
1757 osdc = &rbd_dev->rbd_client->client->osdc;
1758 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1759 if (!osd_req)
1760 return NULL; /* ENOMEM */
1761
1762 if (write_request)
1763 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1764 else
1765 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1766
1767 osd_req->r_callback = rbd_osd_req_callback;
1768 osd_req->r_priv = obj_request;
1769
1770 osd_req->r_oid_len = strlen(obj_request->object_name);
1771 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1772 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1773
1774 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1775
1776 return osd_req;
1777 }
1778
1779 /*
1780 * Create a copyup osd request based on the information in the
1781 * object request supplied. A copyup request has two osd ops,
1782 * a copyup method call, and a "normal" write request.
1783 */
1784 static struct ceph_osd_request *
1785 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1786 {
1787 struct rbd_img_request *img_request;
1788 struct ceph_snap_context *snapc;
1789 struct rbd_device *rbd_dev;
1790 struct ceph_osd_client *osdc;
1791 struct ceph_osd_request *osd_req;
1792
1793 rbd_assert(obj_request_img_data_test(obj_request));
1794 img_request = obj_request->img_request;
1795 rbd_assert(img_request);
1796 rbd_assert(img_request_write_test(img_request));
1797
1798 /* Allocate and initialize the request, for the two ops */
1799
1800 snapc = img_request->snapc;
1801 rbd_dev = img_request->rbd_dev;
1802 osdc = &rbd_dev->rbd_client->client->osdc;
1803 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1804 if (!osd_req)
1805 return NULL; /* ENOMEM */
1806
1807 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1808 osd_req->r_callback = rbd_osd_req_callback;
1809 osd_req->r_priv = obj_request;
1810
1811 osd_req->r_oid_len = strlen(obj_request->object_name);
1812 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1813 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1814
1815 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1816
1817 return osd_req;
1818 }
1819
1820
1821 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1822 {
1823 ceph_osdc_put_request(osd_req);
1824 }
1825
1826 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1827
1828 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1829 u64 offset, u64 length,
1830 enum obj_request_type type)
1831 {
1832 struct rbd_obj_request *obj_request;
1833 size_t size;
1834 char *name;
1835
1836 rbd_assert(obj_request_type_valid(type));
1837
1838 size = strlen(object_name) + 1;
1839 name = kmalloc(size, GFP_KERNEL);
1840 if (!name)
1841 return NULL;
1842
1843 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1844 if (!obj_request) {
1845 kfree(name);
1846 return NULL;
1847 }
1848
1849 obj_request->object_name = memcpy(name, object_name, size);
1850 obj_request->offset = offset;
1851 obj_request->length = length;
1852 obj_request->flags = 0;
1853 obj_request->which = BAD_WHICH;
1854 obj_request->type = type;
1855 INIT_LIST_HEAD(&obj_request->links);
1856 init_completion(&obj_request->completion);
1857 kref_init(&obj_request->kref);
1858
1859 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1860 offset, length, (int)type, obj_request);
1861
1862 return obj_request;
1863 }
1864
1865 static void rbd_obj_request_destroy(struct kref *kref)
1866 {
1867 struct rbd_obj_request *obj_request;
1868
1869 obj_request = container_of(kref, struct rbd_obj_request, kref);
1870
1871 dout("%s: obj %p\n", __func__, obj_request);
1872
1873 rbd_assert(obj_request->img_request == NULL);
1874 rbd_assert(obj_request->which == BAD_WHICH);
1875
1876 if (obj_request->osd_req)
1877 rbd_osd_req_destroy(obj_request->osd_req);
1878
1879 rbd_assert(obj_request_type_valid(obj_request->type));
1880 switch (obj_request->type) {
1881 case OBJ_REQUEST_NODATA:
1882 break; /* Nothing to do */
1883 case OBJ_REQUEST_BIO:
1884 if (obj_request->bio_list)
1885 bio_chain_put(obj_request->bio_list);
1886 break;
1887 case OBJ_REQUEST_PAGES:
1888 if (obj_request->pages)
1889 ceph_release_page_vector(obj_request->pages,
1890 obj_request->page_count);
1891 break;
1892 }
1893
1894 kfree(obj_request->object_name);
1895 obj_request->object_name = NULL;
1896 kmem_cache_free(rbd_obj_request_cache, obj_request);
1897 }
1898
1899 /* It's OK to call this for a device with no parent */
1900
1901 static void rbd_spec_put(struct rbd_spec *spec);
1902 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1903 {
1904 rbd_dev_remove_parent(rbd_dev);
1905 rbd_spec_put(rbd_dev->parent_spec);
1906 rbd_dev->parent_spec = NULL;
1907 rbd_dev->parent_overlap = 0;
1908 }
1909
1910 /*
1911 * Parent image reference counting is used to determine when an
1912 * image's parent fields can be safely torn down--after there are no
1913 * more in-flight requests to the parent image. When the last
1914 * reference is dropped, cleaning them up is safe.
1915 */
1916 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1917 {
1918 int counter;
1919
1920 if (!rbd_dev->parent_spec)
1921 return;
1922
1923 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1924 if (counter > 0)
1925 return;
1926
1927 /* Last reference; clean up parent data structures */
1928
1929 if (!counter)
1930 rbd_dev_unparent(rbd_dev);
1931 else
1932 rbd_warn(rbd_dev, "parent reference underflow\n");
1933 }
1934
1935 /*
1936 * If an image has a non-zero parent overlap, get a reference to its
1937 * parent.
1938 *
1939 * We must get the reference before checking for the overlap to
1940 * coordinate properly with zeroing the parent overlap in
1941 * rbd_dev_v2_parent_info() when an image gets flattened. We
1942 * drop it again if there is no overlap.
1943 *
1944 * Returns true if the rbd device has a parent with a non-zero
1945 * overlap and a reference for it was successfully taken, or
1946 * false otherwise.
1947 */
1948 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1949 {
1950 int counter;
1951
1952 if (!rbd_dev->parent_spec)
1953 return false;
1954
1955 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1956 if (counter > 0 && rbd_dev->parent_overlap)
1957 return true;
1958
1959 /* Image was flattened, but parent is not yet torn down */
1960
1961 if (counter < 0)
1962 rbd_warn(rbd_dev, "parent reference overflow\n");
1963
1964 return false;
1965 }
1966
1967 /*
1968 * Caller is responsible for filling in the list of object requests
1969 * that comprises the image request, and the Linux request pointer
1970 * (if there is one).
1971 */
1972 static struct rbd_img_request *rbd_img_request_create(
1973 struct rbd_device *rbd_dev,
1974 u64 offset, u64 length,
1975 bool write_request)
1976 {
1977 struct rbd_img_request *img_request;
1978
1979 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1980 if (!img_request)
1981 return NULL;
1982
1983 if (write_request) {
1984 down_read(&rbd_dev->header_rwsem);
1985 ceph_get_snap_context(rbd_dev->header.snapc);
1986 up_read(&rbd_dev->header_rwsem);
1987 }
1988
1989 img_request->rq = NULL;
1990 img_request->rbd_dev = rbd_dev;
1991 img_request->offset = offset;
1992 img_request->length = length;
1993 img_request->flags = 0;
1994 if (write_request) {
1995 img_request_write_set(img_request);
1996 img_request->snapc = rbd_dev->header.snapc;
1997 } else {
1998 img_request->snap_id = rbd_dev->spec->snap_id;
1999 }
2000 if (rbd_dev_parent_get(rbd_dev))
2001 img_request_layered_set(img_request);
2002 spin_lock_init(&img_request->completion_lock);
2003 img_request->next_completion = 0;
2004 img_request->callback = NULL;
2005 img_request->result = 0;
2006 img_request->obj_request_count = 0;
2007 INIT_LIST_HEAD(&img_request->obj_requests);
2008 kref_init(&img_request->kref);
2009
2010 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2011 write_request ? "write" : "read", offset, length,
2012 img_request);
2013
2014 return img_request;
2015 }
2016
2017 static void rbd_img_request_destroy(struct kref *kref)
2018 {
2019 struct rbd_img_request *img_request;
2020 struct rbd_obj_request *obj_request;
2021 struct rbd_obj_request *next_obj_request;
2022
2023 img_request = container_of(kref, struct rbd_img_request, kref);
2024
2025 dout("%s: img %p\n", __func__, img_request);
2026
2027 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2028 rbd_img_obj_request_del(img_request, obj_request);
2029 rbd_assert(img_request->obj_request_count == 0);
2030
2031 if (img_request_layered_test(img_request)) {
2032 img_request_layered_clear(img_request);
2033 rbd_dev_parent_put(img_request->rbd_dev);
2034 }
2035
2036 if (img_request_write_test(img_request))
2037 ceph_put_snap_context(img_request->snapc);
2038
2039 kmem_cache_free(rbd_img_request_cache, img_request);
2040 }
2041
2042 static struct rbd_img_request *rbd_parent_request_create(
2043 struct rbd_obj_request *obj_request,
2044 u64 img_offset, u64 length)
2045 {
2046 struct rbd_img_request *parent_request;
2047 struct rbd_device *rbd_dev;
2048
2049 rbd_assert(obj_request->img_request);
2050 rbd_dev = obj_request->img_request->rbd_dev;
2051
2052 parent_request = rbd_img_request_create(rbd_dev->parent,
2053 img_offset, length, false);
2054 if (!parent_request)
2055 return NULL;
2056
2057 img_request_child_set(parent_request);
2058 rbd_obj_request_get(obj_request);
2059 parent_request->obj_request = obj_request;
2060
2061 return parent_request;
2062 }
2063
2064 static void rbd_parent_request_destroy(struct kref *kref)
2065 {
2066 struct rbd_img_request *parent_request;
2067 struct rbd_obj_request *orig_request;
2068
2069 parent_request = container_of(kref, struct rbd_img_request, kref);
2070 orig_request = parent_request->obj_request;
2071
2072 parent_request->obj_request = NULL;
2073 rbd_obj_request_put(orig_request);
2074 img_request_child_clear(parent_request);
2075
2076 rbd_img_request_destroy(kref);
2077 }
2078
2079 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2080 {
2081 struct rbd_img_request *img_request;
2082 unsigned int xferred;
2083 int result;
2084 bool more;
2085
2086 rbd_assert(obj_request_img_data_test(obj_request));
2087 img_request = obj_request->img_request;
2088
2089 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2090 xferred = (unsigned int)obj_request->xferred;
2091 result = obj_request->result;
2092 if (result) {
2093 struct rbd_device *rbd_dev = img_request->rbd_dev;
2094
2095 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2096 img_request_write_test(img_request) ? "write" : "read",
2097 obj_request->length, obj_request->img_offset,
2098 obj_request->offset);
2099 rbd_warn(rbd_dev, " result %d xferred %x\n",
2100 result, xferred);
2101 if (!img_request->result)
2102 img_request->result = result;
2103 }
2104
2105 /* Image object requests don't own their page array */
2106
2107 if (obj_request->type == OBJ_REQUEST_PAGES) {
2108 obj_request->pages = NULL;
2109 obj_request->page_count = 0;
2110 }
2111
2112 if (img_request_child_test(img_request)) {
2113 rbd_assert(img_request->obj_request != NULL);
2114 more = obj_request->which < img_request->obj_request_count - 1;
2115 } else {
2116 rbd_assert(img_request->rq != NULL);
2117 more = blk_end_request(img_request->rq, result, xferred);
2118 }
2119
2120 return more;
2121 }
2122
2123 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2124 {
2125 struct rbd_img_request *img_request;
2126 u32 which = obj_request->which;
2127 bool more = true;
2128
2129 rbd_assert(obj_request_img_data_test(obj_request));
2130 img_request = obj_request->img_request;
2131
2132 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2133 rbd_assert(img_request != NULL);
2134 rbd_assert(img_request->obj_request_count > 0);
2135 rbd_assert(which != BAD_WHICH);
2136 rbd_assert(which < img_request->obj_request_count);
2137 rbd_assert(which >= img_request->next_completion);
2138
2139 spin_lock_irq(&img_request->completion_lock);
2140 if (which != img_request->next_completion)
2141 goto out;
2142
2143 for_each_obj_request_from(img_request, obj_request) {
2144 rbd_assert(more);
2145 rbd_assert(which < img_request->obj_request_count);
2146
2147 if (!obj_request_done_test(obj_request))
2148 break;
2149 more = rbd_img_obj_end_request(obj_request);
2150 which++;
2151 }
2152
2153 rbd_assert(more ^ (which == img_request->obj_request_count));
2154 img_request->next_completion = which;
2155 out:
2156 spin_unlock_irq(&img_request->completion_lock);
2157
2158 if (!more)
2159 rbd_img_request_complete(img_request);
2160 }
2161
2162 /*
2163 * Split up an image request into one or more object requests, each
2164 * to a different object. The "type" parameter indicates whether
2165 * "data_desc" is the pointer to the head of a list of bio
2166 * structures, or the base of a page array. In either case this
2167 * function assumes data_desc describes memory sufficient to hold
2168 * all data described by the image request.
2169 */
2170 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2171 enum obj_request_type type,
2172 void *data_desc)
2173 {
2174 struct rbd_device *rbd_dev = img_request->rbd_dev;
2175 struct rbd_obj_request *obj_request = NULL;
2176 struct rbd_obj_request *next_obj_request;
2177 bool write_request = img_request_write_test(img_request);
2178 struct bio *bio_list = 0;
2179 unsigned int bio_offset = 0;
2180 struct page **pages = 0;
2181 u64 img_offset;
2182 u64 resid;
2183 u16 opcode;
2184
2185 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2186 (int)type, data_desc);
2187
2188 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2189 img_offset = img_request->offset;
2190 resid = img_request->length;
2191 rbd_assert(resid > 0);
2192
2193 if (type == OBJ_REQUEST_BIO) {
2194 bio_list = data_desc;
2195 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2196 } else {
2197 rbd_assert(type == OBJ_REQUEST_PAGES);
2198 pages = data_desc;
2199 }
2200
2201 while (resid) {
2202 struct ceph_osd_request *osd_req;
2203 const char *object_name;
2204 u64 offset;
2205 u64 length;
2206
2207 object_name = rbd_segment_name(rbd_dev, img_offset);
2208 if (!object_name)
2209 goto out_unwind;
2210 offset = rbd_segment_offset(rbd_dev, img_offset);
2211 length = rbd_segment_length(rbd_dev, img_offset, resid);
2212 obj_request = rbd_obj_request_create(object_name,
2213 offset, length, type);
2214 /* object request has its own copy of the object name */
2215 rbd_segment_name_free(object_name);
2216 if (!obj_request)
2217 goto out_unwind;
2218 /*
2219 * set obj_request->img_request before creating the
2220 * osd_request so that it gets the right snapc
2221 */
2222 rbd_img_obj_request_add(img_request, obj_request);
2223
2224 if (type == OBJ_REQUEST_BIO) {
2225 unsigned int clone_size;
2226
2227 rbd_assert(length <= (u64)UINT_MAX);
2228 clone_size = (unsigned int)length;
2229 obj_request->bio_list =
2230 bio_chain_clone_range(&bio_list,
2231 &bio_offset,
2232 clone_size,
2233 GFP_ATOMIC);
2234 if (!obj_request->bio_list)
2235 goto out_partial;
2236 } else {
2237 unsigned int page_count;
2238
2239 obj_request->pages = pages;
2240 page_count = (u32)calc_pages_for(offset, length);
2241 obj_request->page_count = page_count;
2242 if ((offset + length) & ~PAGE_MASK)
2243 page_count--; /* more on last page */
2244 pages += page_count;
2245 }
2246
2247 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2248 obj_request);
2249 if (!osd_req)
2250 goto out_partial;
2251 obj_request->osd_req = osd_req;
2252 obj_request->callback = rbd_img_obj_callback;
2253
2254 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2255 0, 0);
2256 if (type == OBJ_REQUEST_BIO)
2257 osd_req_op_extent_osd_data_bio(osd_req, 0,
2258 obj_request->bio_list, length);
2259 else
2260 osd_req_op_extent_osd_data_pages(osd_req, 0,
2261 obj_request->pages, length,
2262 offset & ~PAGE_MASK, false, false);
2263
2264 if (write_request)
2265 rbd_osd_req_format_write(obj_request);
2266 else
2267 rbd_osd_req_format_read(obj_request);
2268
2269 obj_request->img_offset = img_offset;
2270
2271 img_offset += length;
2272 resid -= length;
2273 }
2274
2275 return 0;
2276
2277 out_partial:
2278 rbd_obj_request_put(obj_request);
2279 out_unwind:
2280 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2281 rbd_img_obj_request_del(img_request, obj_request);
2282
2283 return -ENOMEM;
2284 }
2285
2286 static void
2287 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2288 {
2289 struct rbd_img_request *img_request;
2290 struct rbd_device *rbd_dev;
2291 struct page **pages;
2292 u32 page_count;
2293
2294 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2295 rbd_assert(obj_request_img_data_test(obj_request));
2296 img_request = obj_request->img_request;
2297 rbd_assert(img_request);
2298
2299 rbd_dev = img_request->rbd_dev;
2300 rbd_assert(rbd_dev);
2301
2302 pages = obj_request->copyup_pages;
2303 rbd_assert(pages != NULL);
2304 obj_request->copyup_pages = NULL;
2305 page_count = obj_request->copyup_page_count;
2306 rbd_assert(page_count);
2307 obj_request->copyup_page_count = 0;
2308 ceph_release_page_vector(pages, page_count);
2309
2310 /*
2311 * We want the transfer count to reflect the size of the
2312 * original write request. There is no such thing as a
2313 * successful short write, so if the request was successful
2314 * we can just set it to the originally-requested length.
2315 */
2316 if (!obj_request->result)
2317 obj_request->xferred = obj_request->length;
2318
2319 /* Finish up with the normal image object callback */
2320
2321 rbd_img_obj_callback(obj_request);
2322 }
2323
2324 static void
2325 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2326 {
2327 struct rbd_obj_request *orig_request;
2328 struct ceph_osd_request *osd_req;
2329 struct ceph_osd_client *osdc;
2330 struct rbd_device *rbd_dev;
2331 struct page **pages;
2332 u32 page_count;
2333 int img_result;
2334 u64 parent_length;
2335 u64 offset;
2336 u64 length;
2337
2338 rbd_assert(img_request_child_test(img_request));
2339
2340 /* First get what we need from the image request */
2341
2342 pages = img_request->copyup_pages;
2343 rbd_assert(pages != NULL);
2344 img_request->copyup_pages = NULL;
2345 page_count = img_request->copyup_page_count;
2346 rbd_assert(page_count);
2347 img_request->copyup_page_count = 0;
2348
2349 orig_request = img_request->obj_request;
2350 rbd_assert(orig_request != NULL);
2351 rbd_assert(obj_request_type_valid(orig_request->type));
2352 img_result = img_request->result;
2353 parent_length = img_request->length;
2354 rbd_assert(parent_length == img_request->xferred);
2355 rbd_img_request_put(img_request);
2356
2357 rbd_assert(orig_request->img_request);
2358 rbd_dev = orig_request->img_request->rbd_dev;
2359 rbd_assert(rbd_dev);
2360
2361 /*
2362 * If the overlap has become 0 (most likely because the
2363 * image has been flattened) we need to free the pages
2364 * and re-submit the original write request.
2365 */
2366 if (!rbd_dev->parent_overlap) {
2367 struct ceph_osd_client *osdc;
2368
2369 ceph_release_page_vector(pages, page_count);
2370 osdc = &rbd_dev->rbd_client->client->osdc;
2371 img_result = rbd_obj_request_submit(osdc, orig_request);
2372 if (!img_result)
2373 return;
2374 }
2375
2376 if (img_result)
2377 goto out_err;
2378
2379 /*
2380 * The original osd request is of no use to use any more.
2381 * We need a new one that can hold the two ops in a copyup
2382 * request. Allocate the new copyup osd request for the
2383 * original request, and release the old one.
2384 */
2385 img_result = -ENOMEM;
2386 osd_req = rbd_osd_req_create_copyup(orig_request);
2387 if (!osd_req)
2388 goto out_err;
2389 rbd_osd_req_destroy(orig_request->osd_req);
2390 orig_request->osd_req = osd_req;
2391 orig_request->copyup_pages = pages;
2392 orig_request->copyup_page_count = page_count;
2393
2394 /* Initialize the copyup op */
2395
2396 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2397 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2398 false, false);
2399
2400 /* Then the original write request op */
2401
2402 offset = orig_request->offset;
2403 length = orig_request->length;
2404 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2405 offset, length, 0, 0);
2406 if (orig_request->type == OBJ_REQUEST_BIO)
2407 osd_req_op_extent_osd_data_bio(osd_req, 1,
2408 orig_request->bio_list, length);
2409 else
2410 osd_req_op_extent_osd_data_pages(osd_req, 1,
2411 orig_request->pages, length,
2412 offset & ~PAGE_MASK, false, false);
2413
2414 rbd_osd_req_format_write(orig_request);
2415
2416 /* All set, send it off. */
2417
2418 orig_request->callback = rbd_img_obj_copyup_callback;
2419 osdc = &rbd_dev->rbd_client->client->osdc;
2420 img_result = rbd_obj_request_submit(osdc, orig_request);
2421 if (!img_result)
2422 return;
2423 out_err:
2424 /* Record the error code and complete the request */
2425
2426 orig_request->result = img_result;
2427 orig_request->xferred = 0;
2428 obj_request_done_set(orig_request);
2429 rbd_obj_request_complete(orig_request);
2430 }
2431
2432 /*
2433 * Read from the parent image the range of data that covers the
2434 * entire target of the given object request. This is used for
2435 * satisfying a layered image write request when the target of an
2436 * object request from the image request does not exist.
2437 *
2438 * A page array big enough to hold the returned data is allocated
2439 * and supplied to rbd_img_request_fill() as the "data descriptor."
2440 * When the read completes, this page array will be transferred to
2441 * the original object request for the copyup operation.
2442 *
2443 * If an error occurs, record it as the result of the original
2444 * object request and mark it done so it gets completed.
2445 */
2446 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2447 {
2448 struct rbd_img_request *img_request = NULL;
2449 struct rbd_img_request *parent_request = NULL;
2450 struct rbd_device *rbd_dev;
2451 u64 img_offset;
2452 u64 length;
2453 struct page **pages = NULL;
2454 u32 page_count;
2455 int result;
2456
2457 rbd_assert(obj_request_img_data_test(obj_request));
2458 rbd_assert(obj_request_type_valid(obj_request->type));
2459
2460 img_request = obj_request->img_request;
2461 rbd_assert(img_request != NULL);
2462 rbd_dev = img_request->rbd_dev;
2463 rbd_assert(rbd_dev->parent != NULL);
2464
2465 /*
2466 * Determine the byte range covered by the object in the
2467 * child image to which the original request was to be sent.
2468 */
2469 img_offset = obj_request->img_offset - obj_request->offset;
2470 length = (u64)1 << rbd_dev->header.obj_order;
2471
2472 /*
2473 * There is no defined parent data beyond the parent
2474 * overlap, so limit what we read at that boundary if
2475 * necessary.
2476 */
2477 if (img_offset + length > rbd_dev->parent_overlap) {
2478 rbd_assert(img_offset < rbd_dev->parent_overlap);
2479 length = rbd_dev->parent_overlap - img_offset;
2480 }
2481
2482 /*
2483 * Allocate a page array big enough to receive the data read
2484 * from the parent.
2485 */
2486 page_count = (u32)calc_pages_for(0, length);
2487 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2488 if (IS_ERR(pages)) {
2489 result = PTR_ERR(pages);
2490 pages = NULL;
2491 goto out_err;
2492 }
2493
2494 result = -ENOMEM;
2495 parent_request = rbd_parent_request_create(obj_request,
2496 img_offset, length);
2497 if (!parent_request)
2498 goto out_err;
2499
2500 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2501 if (result)
2502 goto out_err;
2503 parent_request->copyup_pages = pages;
2504 parent_request->copyup_page_count = page_count;
2505
2506 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2507 result = rbd_img_request_submit(parent_request);
2508 if (!result)
2509 return 0;
2510
2511 parent_request->copyup_pages = NULL;
2512 parent_request->copyup_page_count = 0;
2513 parent_request->obj_request = NULL;
2514 rbd_obj_request_put(obj_request);
2515 out_err:
2516 if (pages)
2517 ceph_release_page_vector(pages, page_count);
2518 if (parent_request)
2519 rbd_img_request_put(parent_request);
2520 obj_request->result = result;
2521 obj_request->xferred = 0;
2522 obj_request_done_set(obj_request);
2523
2524 return result;
2525 }
2526
2527 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2528 {
2529 struct rbd_obj_request *orig_request;
2530 struct rbd_device *rbd_dev;
2531 int result;
2532
2533 rbd_assert(!obj_request_img_data_test(obj_request));
2534
2535 /*
2536 * All we need from the object request is the original
2537 * request and the result of the STAT op. Grab those, then
2538 * we're done with the request.
2539 */
2540 orig_request = obj_request->obj_request;
2541 obj_request->obj_request = NULL;
2542 rbd_assert(orig_request);
2543 rbd_assert(orig_request->img_request);
2544
2545 result = obj_request->result;
2546 obj_request->result = 0;
2547
2548 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2549 obj_request, orig_request, result,
2550 obj_request->xferred, obj_request->length);
2551 rbd_obj_request_put(obj_request);
2552
2553 /*
2554 * If the overlap has become 0 (most likely because the
2555 * image has been flattened) we need to free the pages
2556 * and re-submit the original write request.
2557 */
2558 rbd_dev = orig_request->img_request->rbd_dev;
2559 if (!rbd_dev->parent_overlap) {
2560 struct ceph_osd_client *osdc;
2561
2562 rbd_obj_request_put(orig_request);
2563 osdc = &rbd_dev->rbd_client->client->osdc;
2564 result = rbd_obj_request_submit(osdc, orig_request);
2565 if (!result)
2566 return;
2567 }
2568
2569 /*
2570 * Our only purpose here is to determine whether the object
2571 * exists, and we don't want to treat the non-existence as
2572 * an error. If something else comes back, transfer the
2573 * error to the original request and complete it now.
2574 */
2575 if (!result) {
2576 obj_request_existence_set(orig_request, true);
2577 } else if (result == -ENOENT) {
2578 obj_request_existence_set(orig_request, false);
2579 } else if (result) {
2580 orig_request->result = result;
2581 goto out;
2582 }
2583
2584 /*
2585 * Resubmit the original request now that we have recorded
2586 * whether the target object exists.
2587 */
2588 orig_request->result = rbd_img_obj_request_submit(orig_request);
2589 out:
2590 if (orig_request->result)
2591 rbd_obj_request_complete(orig_request);
2592 rbd_obj_request_put(orig_request);
2593 }
2594
2595 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2596 {
2597 struct rbd_obj_request *stat_request;
2598 struct rbd_device *rbd_dev;
2599 struct ceph_osd_client *osdc;
2600 struct page **pages = NULL;
2601 u32 page_count;
2602 size_t size;
2603 int ret;
2604
2605 /*
2606 * The response data for a STAT call consists of:
2607 * le64 length;
2608 * struct {
2609 * le32 tv_sec;
2610 * le32 tv_nsec;
2611 * } mtime;
2612 */
2613 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2614 page_count = (u32)calc_pages_for(0, size);
2615 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2616 if (IS_ERR(pages))
2617 return PTR_ERR(pages);
2618
2619 ret = -ENOMEM;
2620 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2621 OBJ_REQUEST_PAGES);
2622 if (!stat_request)
2623 goto out;
2624
2625 rbd_obj_request_get(obj_request);
2626 stat_request->obj_request = obj_request;
2627 stat_request->pages = pages;
2628 stat_request->page_count = page_count;
2629
2630 rbd_assert(obj_request->img_request);
2631 rbd_dev = obj_request->img_request->rbd_dev;
2632 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2633 stat_request);
2634 if (!stat_request->osd_req)
2635 goto out;
2636 stat_request->callback = rbd_img_obj_exists_callback;
2637
2638 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2639 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2640 false, false);
2641 rbd_osd_req_format_read(stat_request);
2642
2643 osdc = &rbd_dev->rbd_client->client->osdc;
2644 ret = rbd_obj_request_submit(osdc, stat_request);
2645 out:
2646 if (ret)
2647 rbd_obj_request_put(obj_request);
2648
2649 return ret;
2650 }
2651
2652 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2653 {
2654 struct rbd_img_request *img_request;
2655 struct rbd_device *rbd_dev;
2656 bool known;
2657
2658 rbd_assert(obj_request_img_data_test(obj_request));
2659
2660 img_request = obj_request->img_request;
2661 rbd_assert(img_request);
2662 rbd_dev = img_request->rbd_dev;
2663
2664 /*
2665 * Only writes to layered images need special handling.
2666 * Reads and non-layered writes are simple object requests.
2667 * Layered writes that start beyond the end of the overlap
2668 * with the parent have no parent data, so they too are
2669 * simple object requests. Finally, if the target object is
2670 * known to already exist, its parent data has already been
2671 * copied, so a write to the object can also be handled as a
2672 * simple object request.
2673 */
2674 if (!img_request_write_test(img_request) ||
2675 !img_request_layered_test(img_request) ||
2676 rbd_dev->parent_overlap <= obj_request->img_offset ||
2677 ((known = obj_request_known_test(obj_request)) &&
2678 obj_request_exists_test(obj_request))) {
2679
2680 struct rbd_device *rbd_dev;
2681 struct ceph_osd_client *osdc;
2682
2683 rbd_dev = obj_request->img_request->rbd_dev;
2684 osdc = &rbd_dev->rbd_client->client->osdc;
2685
2686 return rbd_obj_request_submit(osdc, obj_request);
2687 }
2688
2689 /*
2690 * It's a layered write. The target object might exist but
2691 * we may not know that yet. If we know it doesn't exist,
2692 * start by reading the data for the full target object from
2693 * the parent so we can use it for a copyup to the target.
2694 */
2695 if (known)
2696 return rbd_img_obj_parent_read_full(obj_request);
2697
2698 /* We don't know whether the target exists. Go find out. */
2699
2700 return rbd_img_obj_exists_submit(obj_request);
2701 }
2702
2703 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2704 {
2705 struct rbd_obj_request *obj_request;
2706 struct rbd_obj_request *next_obj_request;
2707
2708 dout("%s: img %p\n", __func__, img_request);
2709 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2710 int ret;
2711
2712 ret = rbd_img_obj_request_submit(obj_request);
2713 if (ret)
2714 return ret;
2715 }
2716
2717 return 0;
2718 }
2719
2720 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2721 {
2722 struct rbd_obj_request *obj_request;
2723 struct rbd_device *rbd_dev;
2724 u64 obj_end;
2725 u64 img_xferred;
2726 int img_result;
2727
2728 rbd_assert(img_request_child_test(img_request));
2729
2730 /* First get what we need from the image request and release it */
2731
2732 obj_request = img_request->obj_request;
2733 img_xferred = img_request->xferred;
2734 img_result = img_request->result;
2735 rbd_img_request_put(img_request);
2736
2737 /*
2738 * If the overlap has become 0 (most likely because the
2739 * image has been flattened) we need to re-submit the
2740 * original request.
2741 */
2742 rbd_assert(obj_request);
2743 rbd_assert(obj_request->img_request);
2744 rbd_dev = obj_request->img_request->rbd_dev;
2745 if (!rbd_dev->parent_overlap) {
2746 struct ceph_osd_client *osdc;
2747
2748 osdc = &rbd_dev->rbd_client->client->osdc;
2749 img_result = rbd_obj_request_submit(osdc, obj_request);
2750 if (!img_result)
2751 return;
2752 }
2753
2754 obj_request->result = img_result;
2755 if (obj_request->result)
2756 goto out;
2757
2758 /*
2759 * We need to zero anything beyond the parent overlap
2760 * boundary. Since rbd_img_obj_request_read_callback()
2761 * will zero anything beyond the end of a short read, an
2762 * easy way to do this is to pretend the data from the
2763 * parent came up short--ending at the overlap boundary.
2764 */
2765 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2766 obj_end = obj_request->img_offset + obj_request->length;
2767 if (obj_end > rbd_dev->parent_overlap) {
2768 u64 xferred = 0;
2769
2770 if (obj_request->img_offset < rbd_dev->parent_overlap)
2771 xferred = rbd_dev->parent_overlap -
2772 obj_request->img_offset;
2773
2774 obj_request->xferred = min(img_xferred, xferred);
2775 } else {
2776 obj_request->xferred = img_xferred;
2777 }
2778 out:
2779 rbd_img_obj_request_read_callback(obj_request);
2780 rbd_obj_request_complete(obj_request);
2781 }
2782
2783 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2784 {
2785 struct rbd_img_request *img_request;
2786 int result;
2787
2788 rbd_assert(obj_request_img_data_test(obj_request));
2789 rbd_assert(obj_request->img_request != NULL);
2790 rbd_assert(obj_request->result == (s32) -ENOENT);
2791 rbd_assert(obj_request_type_valid(obj_request->type));
2792
2793 /* rbd_read_finish(obj_request, obj_request->length); */
2794 img_request = rbd_parent_request_create(obj_request,
2795 obj_request->img_offset,
2796 obj_request->length);
2797 result = -ENOMEM;
2798 if (!img_request)
2799 goto out_err;
2800
2801 if (obj_request->type == OBJ_REQUEST_BIO)
2802 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2803 obj_request->bio_list);
2804 else
2805 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2806 obj_request->pages);
2807 if (result)
2808 goto out_err;
2809
2810 img_request->callback = rbd_img_parent_read_callback;
2811 result = rbd_img_request_submit(img_request);
2812 if (result)
2813 goto out_err;
2814
2815 return;
2816 out_err:
2817 if (img_request)
2818 rbd_img_request_put(img_request);
2819 obj_request->result = result;
2820 obj_request->xferred = 0;
2821 obj_request_done_set(obj_request);
2822 }
2823
2824 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2825 {
2826 struct rbd_obj_request *obj_request;
2827 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2828 int ret;
2829
2830 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2831 OBJ_REQUEST_NODATA);
2832 if (!obj_request)
2833 return -ENOMEM;
2834
2835 ret = -ENOMEM;
2836 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2837 if (!obj_request->osd_req)
2838 goto out;
2839
2840 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2841 notify_id, 0, 0);
2842 rbd_osd_req_format_read(obj_request);
2843
2844 ret = rbd_obj_request_submit(osdc, obj_request);
2845 if (ret)
2846 goto out;
2847 ret = rbd_obj_request_wait(obj_request);
2848 out:
2849 rbd_obj_request_put(obj_request);
2850
2851 return ret;
2852 }
2853
2854 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2855 {
2856 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2857 int ret;
2858
2859 if (!rbd_dev)
2860 return;
2861
2862 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2863 rbd_dev->header_name, (unsigned long long)notify_id,
2864 (unsigned int)opcode);
2865 ret = rbd_dev_refresh(rbd_dev);
2866 if (ret)
2867 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2868
2869 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2870 }
2871
2872 /*
2873 * Request sync osd watch/unwatch. The value of "start" determines
2874 * whether a watch request is being initiated or torn down.
2875 */
2876 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2877 {
2878 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2879 struct rbd_obj_request *obj_request;
2880 int ret;
2881
2882 rbd_assert(start ^ !!rbd_dev->watch_event);
2883 rbd_assert(start ^ !!rbd_dev->watch_request);
2884
2885 if (start) {
2886 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2887 &rbd_dev->watch_event);
2888 if (ret < 0)
2889 return ret;
2890 rbd_assert(rbd_dev->watch_event != NULL);
2891 }
2892
2893 ret = -ENOMEM;
2894 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2895 OBJ_REQUEST_NODATA);
2896 if (!obj_request)
2897 goto out_cancel;
2898
2899 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2900 if (!obj_request->osd_req)
2901 goto out_cancel;
2902
2903 if (start)
2904 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2905 else
2906 ceph_osdc_unregister_linger_request(osdc,
2907 rbd_dev->watch_request->osd_req);
2908
2909 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2910 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2911 rbd_osd_req_format_write(obj_request);
2912
2913 ret = rbd_obj_request_submit(osdc, obj_request);
2914 if (ret)
2915 goto out_cancel;
2916 ret = rbd_obj_request_wait(obj_request);
2917 if (ret)
2918 goto out_cancel;
2919 ret = obj_request->result;
2920 if (ret)
2921 goto out_cancel;
2922
2923 /*
2924 * A watch request is set to linger, so the underlying osd
2925 * request won't go away until we unregister it. We retain
2926 * a pointer to the object request during that time (in
2927 * rbd_dev->watch_request), so we'll keep a reference to
2928 * it. We'll drop that reference (below) after we've
2929 * unregistered it.
2930 */
2931 if (start) {
2932 rbd_dev->watch_request = obj_request;
2933
2934 return 0;
2935 }
2936
2937 /* We have successfully torn down the watch request */
2938
2939 rbd_obj_request_put(rbd_dev->watch_request);
2940 rbd_dev->watch_request = NULL;
2941 out_cancel:
2942 /* Cancel the event if we're tearing down, or on error */
2943 ceph_osdc_cancel_event(rbd_dev->watch_event);
2944 rbd_dev->watch_event = NULL;
2945 if (obj_request)
2946 rbd_obj_request_put(obj_request);
2947
2948 return ret;
2949 }
2950
2951 /*
2952 * Synchronous osd object method call. Returns the number of bytes
2953 * returned in the outbound buffer, or a negative error code.
2954 */
2955 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2956 const char *object_name,
2957 const char *class_name,
2958 const char *method_name,
2959 const void *outbound,
2960 size_t outbound_size,
2961 void *inbound,
2962 size_t inbound_size)
2963 {
2964 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2965 struct rbd_obj_request *obj_request;
2966 struct page **pages;
2967 u32 page_count;
2968 int ret;
2969
2970 /*
2971 * Method calls are ultimately read operations. The result
2972 * should placed into the inbound buffer provided. They
2973 * also supply outbound data--parameters for the object
2974 * method. Currently if this is present it will be a
2975 * snapshot id.
2976 */
2977 page_count = (u32)calc_pages_for(0, inbound_size);
2978 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2979 if (IS_ERR(pages))
2980 return PTR_ERR(pages);
2981
2982 ret = -ENOMEM;
2983 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2984 OBJ_REQUEST_PAGES);
2985 if (!obj_request)
2986 goto out;
2987
2988 obj_request->pages = pages;
2989 obj_request->page_count = page_count;
2990
2991 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2992 if (!obj_request->osd_req)
2993 goto out;
2994
2995 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2996 class_name, method_name);
2997 if (outbound_size) {
2998 struct ceph_pagelist *pagelist;
2999
3000 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3001 if (!pagelist)
3002 goto out;
3003
3004 ceph_pagelist_init(pagelist);
3005 ceph_pagelist_append(pagelist, outbound, outbound_size);
3006 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3007 pagelist);
3008 }
3009 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3010 obj_request->pages, inbound_size,
3011 0, false, false);
3012 rbd_osd_req_format_read(obj_request);
3013
3014 ret = rbd_obj_request_submit(osdc, obj_request);
3015 if (ret)
3016 goto out;
3017 ret = rbd_obj_request_wait(obj_request);
3018 if (ret)
3019 goto out;
3020
3021 ret = obj_request->result;
3022 if (ret < 0)
3023 goto out;
3024
3025 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3026 ret = (int)obj_request->xferred;
3027 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3028 out:
3029 if (obj_request)
3030 rbd_obj_request_put(obj_request);
3031 else
3032 ceph_release_page_vector(pages, page_count);
3033
3034 return ret;
3035 }
3036
3037 static void rbd_request_fn(struct request_queue *q)
3038 __releases(q->queue_lock) __acquires(q->queue_lock)
3039 {
3040 struct rbd_device *rbd_dev = q->queuedata;
3041 bool read_only = rbd_dev->mapping.read_only;
3042 struct request *rq;
3043 int result;
3044
3045 while ((rq = blk_fetch_request(q))) {
3046 bool write_request = rq_data_dir(rq) == WRITE;
3047 struct rbd_img_request *img_request;
3048 u64 offset;
3049 u64 length;
3050
3051 /* Ignore any non-FS requests that filter through. */
3052
3053 if (rq->cmd_type != REQ_TYPE_FS) {
3054 dout("%s: non-fs request type %d\n", __func__,
3055 (int) rq->cmd_type);
3056 __blk_end_request_all(rq, 0);
3057 continue;
3058 }
3059
3060 /* Ignore/skip any zero-length requests */
3061
3062 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3063 length = (u64) blk_rq_bytes(rq);
3064
3065 if (!length) {
3066 dout("%s: zero-length request\n", __func__);
3067 __blk_end_request_all(rq, 0);
3068 continue;
3069 }
3070
3071 spin_unlock_irq(q->queue_lock);
3072
3073 /* Disallow writes to a read-only device */
3074
3075 if (write_request) {
3076 result = -EROFS;
3077 if (read_only)
3078 goto end_request;
3079 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3080 }
3081
3082 /*
3083 * Quit early if the mapped snapshot no longer
3084 * exists. It's still possible the snapshot will
3085 * have disappeared by the time our request arrives
3086 * at the osd, but there's no sense in sending it if
3087 * we already know.
3088 */
3089 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3090 dout("request for non-existent snapshot");
3091 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3092 result = -ENXIO;
3093 goto end_request;
3094 }
3095
3096 result = -EINVAL;
3097 if (offset && length > U64_MAX - offset + 1) {
3098 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3099 offset, length);
3100 goto end_request; /* Shouldn't happen */
3101 }
3102
3103 result = -EIO;
3104 if (offset + length > rbd_dev->mapping.size) {
3105 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3106 offset, length, rbd_dev->mapping.size);
3107 goto end_request;
3108 }
3109
3110 result = -ENOMEM;
3111 img_request = rbd_img_request_create(rbd_dev, offset, length,
3112 write_request);
3113 if (!img_request)
3114 goto end_request;
3115
3116 img_request->rq = rq;
3117
3118 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3119 rq->bio);
3120 if (!result)
3121 result = rbd_img_request_submit(img_request);
3122 if (result)
3123 rbd_img_request_put(img_request);
3124 end_request:
3125 spin_lock_irq(q->queue_lock);
3126 if (result < 0) {
3127 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3128 write_request ? "write" : "read",
3129 length, offset, result);
3130
3131 __blk_end_request_all(rq, result);
3132 }
3133 }
3134 }
3135
3136 /*
3137 * a queue callback. Makes sure that we don't create a bio that spans across
3138 * multiple osd objects. One exception would be with a single page bios,
3139 * which we handle later at bio_chain_clone_range()
3140 */
3141 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3142 struct bio_vec *bvec)
3143 {
3144 struct rbd_device *rbd_dev = q->queuedata;
3145 sector_t sector_offset;
3146 sector_t sectors_per_obj;
3147 sector_t obj_sector_offset;
3148 int ret;
3149
3150 /*
3151 * Find how far into its rbd object the partition-relative
3152 * bio start sector is to offset relative to the enclosing
3153 * device.
3154 */
3155 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3156 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3157 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3158
3159 /*
3160 * Compute the number of bytes from that offset to the end
3161 * of the object. Account for what's already used by the bio.
3162 */
3163 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3164 if (ret > bmd->bi_size)
3165 ret -= bmd->bi_size;
3166 else
3167 ret = 0;
3168
3169 /*
3170 * Don't send back more than was asked for. And if the bio
3171 * was empty, let the whole thing through because: "Note
3172 * that a block device *must* allow a single page to be
3173 * added to an empty bio."
3174 */
3175 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3176 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3177 ret = (int) bvec->bv_len;
3178
3179 return ret;
3180 }
3181
3182 static void rbd_free_disk(struct rbd_device *rbd_dev)
3183 {
3184 struct gendisk *disk = rbd_dev->disk;
3185
3186 if (!disk)
3187 return;
3188
3189 rbd_dev->disk = NULL;
3190 if (disk->flags & GENHD_FL_UP) {
3191 del_gendisk(disk);
3192 if (disk->queue)
3193 blk_cleanup_queue(disk->queue);
3194 }
3195 put_disk(disk);
3196 }
3197
3198 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3199 const char *object_name,
3200 u64 offset, u64 length, void *buf)
3201
3202 {
3203 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3204 struct rbd_obj_request *obj_request;
3205 struct page **pages = NULL;
3206 u32 page_count;
3207 size_t size;
3208 int ret;
3209
3210 page_count = (u32) calc_pages_for(offset, length);
3211 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3212 if (IS_ERR(pages))
3213 ret = PTR_ERR(pages);
3214
3215 ret = -ENOMEM;
3216 obj_request = rbd_obj_request_create(object_name, offset, length,
3217 OBJ_REQUEST_PAGES);
3218 if (!obj_request)
3219 goto out;
3220
3221 obj_request->pages = pages;
3222 obj_request->page_count = page_count;
3223
3224 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3225 if (!obj_request->osd_req)
3226 goto out;
3227
3228 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3229 offset, length, 0, 0);
3230 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3231 obj_request->pages,
3232 obj_request->length,
3233 obj_request->offset & ~PAGE_MASK,
3234 false, false);
3235 rbd_osd_req_format_read(obj_request);
3236
3237 ret = rbd_obj_request_submit(osdc, obj_request);
3238 if (ret)
3239 goto out;
3240 ret = rbd_obj_request_wait(obj_request);
3241 if (ret)
3242 goto out;
3243
3244 ret = obj_request->result;
3245 if (ret < 0)
3246 goto out;
3247
3248 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3249 size = (size_t) obj_request->xferred;
3250 ceph_copy_from_page_vector(pages, buf, 0, size);
3251 rbd_assert(size <= (size_t)INT_MAX);
3252 ret = (int)size;
3253 out:
3254 if (obj_request)
3255 rbd_obj_request_put(obj_request);
3256 else
3257 ceph_release_page_vector(pages, page_count);
3258
3259 return ret;
3260 }
3261
3262 /*
3263 * Read the complete header for the given rbd device. On successful
3264 * return, the rbd_dev->header field will contain up-to-date
3265 * information about the image.
3266 */
3267 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3268 {
3269 struct rbd_image_header_ondisk *ondisk = NULL;
3270 u32 snap_count = 0;
3271 u64 names_size = 0;
3272 u32 want_count;
3273 int ret;
3274
3275 /*
3276 * The complete header will include an array of its 64-bit
3277 * snapshot ids, followed by the names of those snapshots as
3278 * a contiguous block of NUL-terminated strings. Note that
3279 * the number of snapshots could change by the time we read
3280 * it in, in which case we re-read it.
3281 */
3282 do {
3283 size_t size;
3284
3285 kfree(ondisk);
3286
3287 size = sizeof (*ondisk);
3288 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3289 size += names_size;
3290 ondisk = kmalloc(size, GFP_KERNEL);
3291 if (!ondisk)
3292 return -ENOMEM;
3293
3294 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3295 0, size, ondisk);
3296 if (ret < 0)
3297 goto out;
3298 if ((size_t)ret < size) {
3299 ret = -ENXIO;
3300 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3301 size, ret);
3302 goto out;
3303 }
3304 if (!rbd_dev_ondisk_valid(ondisk)) {
3305 ret = -ENXIO;
3306 rbd_warn(rbd_dev, "invalid header");
3307 goto out;
3308 }
3309
3310 names_size = le64_to_cpu(ondisk->snap_names_len);
3311 want_count = snap_count;
3312 snap_count = le32_to_cpu(ondisk->snap_count);
3313 } while (snap_count != want_count);
3314
3315 ret = rbd_header_from_disk(rbd_dev, ondisk);
3316 out:
3317 kfree(ondisk);
3318
3319 return ret;
3320 }
3321
3322 /*
3323 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3324 * has disappeared from the (just updated) snapshot context.
3325 */
3326 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3327 {
3328 u64 snap_id;
3329
3330 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3331 return;
3332
3333 snap_id = rbd_dev->spec->snap_id;
3334 if (snap_id == CEPH_NOSNAP)
3335 return;
3336
3337 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3338 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3339 }
3340
3341 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3342 {
3343 sector_t size;
3344 bool removing;
3345
3346 /*
3347 * Don't hold the lock while doing disk operations,
3348 * or lock ordering will conflict with the bdev mutex via:
3349 * rbd_add() -> blkdev_get() -> rbd_open()
3350 */
3351 spin_lock_irq(&rbd_dev->lock);
3352 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3353 spin_unlock_irq(&rbd_dev->lock);
3354 /*
3355 * If the device is being removed, rbd_dev->disk has
3356 * been destroyed, so don't try to update its size
3357 */
3358 if (!removing) {
3359 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3360 dout("setting size to %llu sectors", (unsigned long long)size);
3361 set_capacity(rbd_dev->disk, size);
3362 revalidate_disk(rbd_dev->disk);
3363 }
3364 }
3365
3366 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3367 {
3368 u64 mapping_size;
3369 int ret;
3370
3371 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3372 mapping_size = rbd_dev->mapping.size;
3373 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3374 if (rbd_dev->image_format == 1)
3375 ret = rbd_dev_v1_header_info(rbd_dev);
3376 else
3377 ret = rbd_dev_v2_header_info(rbd_dev);
3378
3379 /* If it's a mapped snapshot, validate its EXISTS flag */
3380
3381 rbd_exists_validate(rbd_dev);
3382 mutex_unlock(&ctl_mutex);
3383 if (mapping_size != rbd_dev->mapping.size) {
3384 rbd_dev_update_size(rbd_dev);
3385 }
3386
3387 return ret;
3388 }
3389
3390 static int rbd_init_disk(struct rbd_device *rbd_dev)
3391 {
3392 struct gendisk *disk;
3393 struct request_queue *q;
3394 u64 segment_size;
3395
3396 /* create gendisk info */
3397 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3398 if (!disk)
3399 return -ENOMEM;
3400
3401 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3402 rbd_dev->dev_id);
3403 disk->major = rbd_dev->major;
3404 disk->first_minor = 0;
3405 disk->fops = &rbd_bd_ops;
3406 disk->private_data = rbd_dev;
3407
3408 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3409 if (!q)
3410 goto out_disk;
3411
3412 /* We use the default size, but let's be explicit about it. */
3413 blk_queue_physical_block_size(q, SECTOR_SIZE);
3414
3415 /* set io sizes to object size */
3416 segment_size = rbd_obj_bytes(&rbd_dev->header);
3417 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3418 blk_queue_max_segment_size(q, segment_size);
3419 blk_queue_io_min(q, segment_size);
3420 blk_queue_io_opt(q, segment_size);
3421
3422 blk_queue_merge_bvec(q, rbd_merge_bvec);
3423 disk->queue = q;
3424
3425 q->queuedata = rbd_dev;
3426
3427 rbd_dev->disk = disk;
3428
3429 return 0;
3430 out_disk:
3431 put_disk(disk);
3432
3433 return -ENOMEM;
3434 }
3435
3436 /*
3437 sysfs
3438 */
3439
3440 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3441 {
3442 return container_of(dev, struct rbd_device, dev);
3443 }
3444
3445 static ssize_t rbd_size_show(struct device *dev,
3446 struct device_attribute *attr, char *buf)
3447 {
3448 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3449
3450 return sprintf(buf, "%llu\n",
3451 (unsigned long long)rbd_dev->mapping.size);
3452 }
3453
3454 /*
3455 * Note this shows the features for whatever's mapped, which is not
3456 * necessarily the base image.
3457 */
3458 static ssize_t rbd_features_show(struct device *dev,
3459 struct device_attribute *attr, char *buf)
3460 {
3461 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3462
3463 return sprintf(buf, "0x%016llx\n",
3464 (unsigned long long)rbd_dev->mapping.features);
3465 }
3466
3467 static ssize_t rbd_major_show(struct device *dev,
3468 struct device_attribute *attr, char *buf)
3469 {
3470 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3471
3472 if (rbd_dev->major)
3473 return sprintf(buf, "%d\n", rbd_dev->major);
3474
3475 return sprintf(buf, "(none)\n");
3476
3477 }
3478
3479 static ssize_t rbd_client_id_show(struct device *dev,
3480 struct device_attribute *attr, char *buf)
3481 {
3482 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3483
3484 return sprintf(buf, "client%lld\n",
3485 ceph_client_id(rbd_dev->rbd_client->client));
3486 }
3487
3488 static ssize_t rbd_pool_show(struct device *dev,
3489 struct device_attribute *attr, char *buf)
3490 {
3491 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3492
3493 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3494 }
3495
3496 static ssize_t rbd_pool_id_show(struct device *dev,
3497 struct device_attribute *attr, char *buf)
3498 {
3499 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3500
3501 return sprintf(buf, "%llu\n",
3502 (unsigned long long) rbd_dev->spec->pool_id);
3503 }
3504
3505 static ssize_t rbd_name_show(struct device *dev,
3506 struct device_attribute *attr, char *buf)
3507 {
3508 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3509
3510 if (rbd_dev->spec->image_name)
3511 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3512
3513 return sprintf(buf, "(unknown)\n");
3514 }
3515
3516 static ssize_t rbd_image_id_show(struct device *dev,
3517 struct device_attribute *attr, char *buf)
3518 {
3519 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3520
3521 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3522 }
3523
3524 /*
3525 * Shows the name of the currently-mapped snapshot (or
3526 * RBD_SNAP_HEAD_NAME for the base image).
3527 */
3528 static ssize_t rbd_snap_show(struct device *dev,
3529 struct device_attribute *attr,
3530 char *buf)
3531 {
3532 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3533
3534 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3535 }
3536
3537 /*
3538 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3539 * for the parent image. If there is no parent, simply shows
3540 * "(no parent image)".
3541 */
3542 static ssize_t rbd_parent_show(struct device *dev,
3543 struct device_attribute *attr,
3544 char *buf)
3545 {
3546 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3547 struct rbd_spec *spec = rbd_dev->parent_spec;
3548 int count;
3549 char *bufp = buf;
3550
3551 if (!spec)
3552 return sprintf(buf, "(no parent image)\n");
3553
3554 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3555 (unsigned long long) spec->pool_id, spec->pool_name);
3556 if (count < 0)
3557 return count;
3558 bufp += count;
3559
3560 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3561 spec->image_name ? spec->image_name : "(unknown)");
3562 if (count < 0)
3563 return count;
3564 bufp += count;
3565
3566 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3567 (unsigned long long) spec->snap_id, spec->snap_name);
3568 if (count < 0)
3569 return count;
3570 bufp += count;
3571
3572 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3573 if (count < 0)
3574 return count;
3575 bufp += count;
3576
3577 return (ssize_t) (bufp - buf);
3578 }
3579
3580 static ssize_t rbd_image_refresh(struct device *dev,
3581 struct device_attribute *attr,
3582 const char *buf,
3583 size_t size)
3584 {
3585 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3586 int ret;
3587
3588 ret = rbd_dev_refresh(rbd_dev);
3589 if (ret)
3590 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3591
3592 return ret < 0 ? ret : size;
3593 }
3594
3595 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3596 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3597 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3598 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3599 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3600 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3601 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3602 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3603 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3604 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3605 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3606
3607 static struct attribute *rbd_attrs[] = {
3608 &dev_attr_size.attr,
3609 &dev_attr_features.attr,
3610 &dev_attr_major.attr,
3611 &dev_attr_client_id.attr,
3612 &dev_attr_pool.attr,
3613 &dev_attr_pool_id.attr,
3614 &dev_attr_name.attr,
3615 &dev_attr_image_id.attr,
3616 &dev_attr_current_snap.attr,
3617 &dev_attr_parent.attr,
3618 &dev_attr_refresh.attr,
3619 NULL
3620 };
3621
3622 static struct attribute_group rbd_attr_group = {
3623 .attrs = rbd_attrs,
3624 };
3625
3626 static const struct attribute_group *rbd_attr_groups[] = {
3627 &rbd_attr_group,
3628 NULL
3629 };
3630
3631 static void rbd_sysfs_dev_release(struct device *dev)
3632 {
3633 }
3634
3635 static struct device_type rbd_device_type = {
3636 .name = "rbd",
3637 .groups = rbd_attr_groups,
3638 .release = rbd_sysfs_dev_release,
3639 };
3640
3641 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3642 {
3643 kref_get(&spec->kref);
3644
3645 return spec;
3646 }
3647
3648 static void rbd_spec_free(struct kref *kref);
3649 static void rbd_spec_put(struct rbd_spec *spec)
3650 {
3651 if (spec)
3652 kref_put(&spec->kref, rbd_spec_free);
3653 }
3654
3655 static struct rbd_spec *rbd_spec_alloc(void)
3656 {
3657 struct rbd_spec *spec;
3658
3659 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3660 if (!spec)
3661 return NULL;
3662 kref_init(&spec->kref);
3663
3664 return spec;
3665 }
3666
3667 static void rbd_spec_free(struct kref *kref)
3668 {
3669 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3670
3671 kfree(spec->pool_name);
3672 kfree(spec->image_id);
3673 kfree(spec->image_name);
3674 kfree(spec->snap_name);
3675 kfree(spec);
3676 }
3677
3678 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3679 struct rbd_spec *spec)
3680 {
3681 struct rbd_device *rbd_dev;
3682
3683 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3684 if (!rbd_dev)
3685 return NULL;
3686
3687 spin_lock_init(&rbd_dev->lock);
3688 rbd_dev->flags = 0;
3689 atomic_set(&rbd_dev->parent_ref, 0);
3690 INIT_LIST_HEAD(&rbd_dev->node);
3691 init_rwsem(&rbd_dev->header_rwsem);
3692
3693 rbd_dev->spec = spec;
3694 rbd_dev->rbd_client = rbdc;
3695
3696 /* Initialize the layout used for all rbd requests */
3697
3698 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3699 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3700 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3701 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3702
3703 return rbd_dev;
3704 }
3705
3706 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3707 {
3708 rbd_put_client(rbd_dev->rbd_client);
3709 rbd_spec_put(rbd_dev->spec);
3710 kfree(rbd_dev);
3711 }
3712
3713 /*
3714 * Get the size and object order for an image snapshot, or if
3715 * snap_id is CEPH_NOSNAP, gets this information for the base
3716 * image.
3717 */
3718 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3719 u8 *order, u64 *snap_size)
3720 {
3721 __le64 snapid = cpu_to_le64(snap_id);
3722 int ret;
3723 struct {
3724 u8 order;
3725 __le64 size;
3726 } __attribute__ ((packed)) size_buf = { 0 };
3727
3728 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3729 "rbd", "get_size",
3730 &snapid, sizeof (snapid),
3731 &size_buf, sizeof (size_buf));
3732 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3733 if (ret < 0)
3734 return ret;
3735 if (ret < sizeof (size_buf))
3736 return -ERANGE;
3737
3738 if (order) {
3739 *order = size_buf.order;
3740 dout(" order %u", (unsigned int)*order);
3741 }
3742 *snap_size = le64_to_cpu(size_buf.size);
3743
3744 dout(" snap_id 0x%016llx snap_size = %llu\n",
3745 (unsigned long long)snap_id,
3746 (unsigned long long)*snap_size);
3747
3748 return 0;
3749 }
3750
3751 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3752 {
3753 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3754 &rbd_dev->header.obj_order,
3755 &rbd_dev->header.image_size);
3756 }
3757
3758 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3759 {
3760 void *reply_buf;
3761 int ret;
3762 void *p;
3763
3764 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3765 if (!reply_buf)
3766 return -ENOMEM;
3767
3768 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3769 "rbd", "get_object_prefix", NULL, 0,
3770 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3771 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3772 if (ret < 0)
3773 goto out;
3774
3775 p = reply_buf;
3776 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3777 p + ret, NULL, GFP_NOIO);
3778 ret = 0;
3779
3780 if (IS_ERR(rbd_dev->header.object_prefix)) {
3781 ret = PTR_ERR(rbd_dev->header.object_prefix);
3782 rbd_dev->header.object_prefix = NULL;
3783 } else {
3784 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3785 }
3786 out:
3787 kfree(reply_buf);
3788
3789 return ret;
3790 }
3791
3792 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3793 u64 *snap_features)
3794 {
3795 __le64 snapid = cpu_to_le64(snap_id);
3796 struct {
3797 __le64 features;
3798 __le64 incompat;
3799 } __attribute__ ((packed)) features_buf = { 0 };
3800 u64 incompat;
3801 int ret;
3802
3803 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3804 "rbd", "get_features",
3805 &snapid, sizeof (snapid),
3806 &features_buf, sizeof (features_buf));
3807 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3808 if (ret < 0)
3809 return ret;
3810 if (ret < sizeof (features_buf))
3811 return -ERANGE;
3812
3813 incompat = le64_to_cpu(features_buf.incompat);
3814 if (incompat & ~RBD_FEATURES_SUPPORTED)
3815 return -ENXIO;
3816
3817 *snap_features = le64_to_cpu(features_buf.features);
3818
3819 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3820 (unsigned long long)snap_id,
3821 (unsigned long long)*snap_features,
3822 (unsigned long long)le64_to_cpu(features_buf.incompat));
3823
3824 return 0;
3825 }
3826
3827 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3828 {
3829 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3830 &rbd_dev->header.features);
3831 }
3832
3833 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3834 {
3835 struct rbd_spec *parent_spec;
3836 size_t size;
3837 void *reply_buf = NULL;
3838 __le64 snapid;
3839 void *p;
3840 void *end;
3841 u64 pool_id;
3842 char *image_id;
3843 u64 overlap;
3844 int ret;
3845
3846 parent_spec = rbd_spec_alloc();
3847 if (!parent_spec)
3848 return -ENOMEM;
3849
3850 size = sizeof (__le64) + /* pool_id */
3851 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3852 sizeof (__le64) + /* snap_id */
3853 sizeof (__le64); /* overlap */
3854 reply_buf = kmalloc(size, GFP_KERNEL);
3855 if (!reply_buf) {
3856 ret = -ENOMEM;
3857 goto out_err;
3858 }
3859
3860 snapid = cpu_to_le64(CEPH_NOSNAP);
3861 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3862 "rbd", "get_parent",
3863 &snapid, sizeof (snapid),
3864 reply_buf, size);
3865 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3866 if (ret < 0)
3867 goto out_err;
3868
3869 p = reply_buf;
3870 end = reply_buf + ret;
3871 ret = -ERANGE;
3872 ceph_decode_64_safe(&p, end, pool_id, out_err);
3873 if (pool_id == CEPH_NOPOOL) {
3874 /*
3875 * Either the parent never existed, or we have
3876 * record of it but the image got flattened so it no
3877 * longer has a parent. When the parent of a
3878 * layered image disappears we immediately set the
3879 * overlap to 0. The effect of this is that all new
3880 * requests will be treated as if the image had no
3881 * parent.
3882 */
3883 if (rbd_dev->parent_overlap) {
3884 rbd_dev->parent_overlap = 0;
3885 smp_mb();
3886 rbd_dev_parent_put(rbd_dev);
3887 pr_info("%s: clone image has been flattened\n",
3888 rbd_dev->disk->disk_name);
3889 }
3890
3891 goto out; /* No parent? No problem. */
3892 }
3893
3894 /* The ceph file layout needs to fit pool id in 32 bits */
3895
3896 ret = -EIO;
3897 if (pool_id > (u64)U32_MAX) {
3898 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3899 (unsigned long long)pool_id, U32_MAX);
3900 goto out_err;
3901 }
3902 parent_spec->pool_id = pool_id;
3903
3904 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3905 if (IS_ERR(image_id)) {
3906 ret = PTR_ERR(image_id);
3907 goto out_err;
3908 }
3909 parent_spec->image_id = image_id;
3910 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3911 ceph_decode_64_safe(&p, end, overlap, out_err);
3912
3913 if (overlap) {
3914 rbd_spec_put(rbd_dev->parent_spec);
3915 rbd_dev->parent_spec = parent_spec;
3916 parent_spec = NULL; /* rbd_dev now owns this */
3917 rbd_dev->parent_overlap = overlap;
3918 } else {
3919 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3920 }
3921 out:
3922 ret = 0;
3923 out_err:
3924 kfree(reply_buf);
3925 rbd_spec_put(parent_spec);
3926
3927 return ret;
3928 }
3929
3930 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3931 {
3932 struct {
3933 __le64 stripe_unit;
3934 __le64 stripe_count;
3935 } __attribute__ ((packed)) striping_info_buf = { 0 };
3936 size_t size = sizeof (striping_info_buf);
3937 void *p;
3938 u64 obj_size;
3939 u64 stripe_unit;
3940 u64 stripe_count;
3941 int ret;
3942
3943 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3944 "rbd", "get_stripe_unit_count", NULL, 0,
3945 (char *)&striping_info_buf, size);
3946 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3947 if (ret < 0)
3948 return ret;
3949 if (ret < size)
3950 return -ERANGE;
3951
3952 /*
3953 * We don't actually support the "fancy striping" feature
3954 * (STRIPINGV2) yet, but if the striping sizes are the
3955 * defaults the behavior is the same as before. So find
3956 * out, and only fail if the image has non-default values.
3957 */
3958 ret = -EINVAL;
3959 obj_size = (u64)1 << rbd_dev->header.obj_order;
3960 p = &striping_info_buf;
3961 stripe_unit = ceph_decode_64(&p);
3962 if (stripe_unit != obj_size) {
3963 rbd_warn(rbd_dev, "unsupported stripe unit "
3964 "(got %llu want %llu)",
3965 stripe_unit, obj_size);
3966 return -EINVAL;
3967 }
3968 stripe_count = ceph_decode_64(&p);
3969 if (stripe_count != 1) {
3970 rbd_warn(rbd_dev, "unsupported stripe count "
3971 "(got %llu want 1)", stripe_count);
3972 return -EINVAL;
3973 }
3974 rbd_dev->header.stripe_unit = stripe_unit;
3975 rbd_dev->header.stripe_count = stripe_count;
3976
3977 return 0;
3978 }
3979
3980 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3981 {
3982 size_t image_id_size;
3983 char *image_id;
3984 void *p;
3985 void *end;
3986 size_t size;
3987 void *reply_buf = NULL;
3988 size_t len = 0;
3989 char *image_name = NULL;
3990 int ret;
3991
3992 rbd_assert(!rbd_dev->spec->image_name);
3993
3994 len = strlen(rbd_dev->spec->image_id);
3995 image_id_size = sizeof (__le32) + len;
3996 image_id = kmalloc(image_id_size, GFP_KERNEL);
3997 if (!image_id)
3998 return NULL;
3999
4000 p = image_id;
4001 end = image_id + image_id_size;
4002 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4003
4004 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4005 reply_buf = kmalloc(size, GFP_KERNEL);
4006 if (!reply_buf)
4007 goto out;
4008
4009 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4010 "rbd", "dir_get_name",
4011 image_id, image_id_size,
4012 reply_buf, size);
4013 if (ret < 0)
4014 goto out;
4015 p = reply_buf;
4016 end = reply_buf + ret;
4017
4018 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4019 if (IS_ERR(image_name))
4020 image_name = NULL;
4021 else
4022 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4023 out:
4024 kfree(reply_buf);
4025 kfree(image_id);
4026
4027 return image_name;
4028 }
4029
4030 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4031 {
4032 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4033 const char *snap_name;
4034 u32 which = 0;
4035
4036 /* Skip over names until we find the one we are looking for */
4037
4038 snap_name = rbd_dev->header.snap_names;
4039 while (which < snapc->num_snaps) {
4040 if (!strcmp(name, snap_name))
4041 return snapc->snaps[which];
4042 snap_name += strlen(snap_name) + 1;
4043 which++;
4044 }
4045 return CEPH_NOSNAP;
4046 }
4047
4048 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4049 {
4050 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4051 u32 which;
4052 bool found = false;
4053 u64 snap_id;
4054
4055 for (which = 0; !found && which < snapc->num_snaps; which++) {
4056 const char *snap_name;
4057
4058 snap_id = snapc->snaps[which];
4059 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4060 if (IS_ERR(snap_name)) {
4061 /* ignore no-longer existing snapshots */
4062 if (PTR_ERR(snap_name) == -ENOENT)
4063 continue;
4064 else
4065 break;
4066 }
4067 found = !strcmp(name, snap_name);
4068 kfree(snap_name);
4069 }
4070 return found ? snap_id : CEPH_NOSNAP;
4071 }
4072
4073 /*
4074 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4075 * no snapshot by that name is found, or if an error occurs.
4076 */
4077 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4078 {
4079 if (rbd_dev->image_format == 1)
4080 return rbd_v1_snap_id_by_name(rbd_dev, name);
4081
4082 return rbd_v2_snap_id_by_name(rbd_dev, name);
4083 }
4084
4085 /*
4086 * When an rbd image has a parent image, it is identified by the
4087 * pool, image, and snapshot ids (not names). This function fills
4088 * in the names for those ids. (It's OK if we can't figure out the
4089 * name for an image id, but the pool and snapshot ids should always
4090 * exist and have names.) All names in an rbd spec are dynamically
4091 * allocated.
4092 *
4093 * When an image being mapped (not a parent) is probed, we have the
4094 * pool name and pool id, image name and image id, and the snapshot
4095 * name. The only thing we're missing is the snapshot id.
4096 */
4097 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4098 {
4099 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4100 struct rbd_spec *spec = rbd_dev->spec;
4101 const char *pool_name;
4102 const char *image_name;
4103 const char *snap_name;
4104 int ret;
4105
4106 /*
4107 * An image being mapped will have the pool name (etc.), but
4108 * we need to look up the snapshot id.
4109 */
4110 if (spec->pool_name) {
4111 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4112 u64 snap_id;
4113
4114 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4115 if (snap_id == CEPH_NOSNAP)
4116 return -ENOENT;
4117 spec->snap_id = snap_id;
4118 } else {
4119 spec->snap_id = CEPH_NOSNAP;
4120 }
4121
4122 return 0;
4123 }
4124
4125 /* Get the pool name; we have to make our own copy of this */
4126
4127 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4128 if (!pool_name) {
4129 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4130 return -EIO;
4131 }
4132 pool_name = kstrdup(pool_name, GFP_KERNEL);
4133 if (!pool_name)
4134 return -ENOMEM;
4135
4136 /* Fetch the image name; tolerate failure here */
4137
4138 image_name = rbd_dev_image_name(rbd_dev);
4139 if (!image_name)
4140 rbd_warn(rbd_dev, "unable to get image name");
4141
4142 /* Look up the snapshot name, and make a copy */
4143
4144 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4145 if (IS_ERR(snap_name)) {
4146 ret = PTR_ERR(snap_name);
4147 goto out_err;
4148 }
4149
4150 spec->pool_name = pool_name;
4151 spec->image_name = image_name;
4152 spec->snap_name = snap_name;
4153
4154 return 0;
4155 out_err:
4156 kfree(image_name);
4157 kfree(pool_name);
4158
4159 return ret;
4160 }
4161
4162 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4163 {
4164 size_t size;
4165 int ret;
4166 void *reply_buf;
4167 void *p;
4168 void *end;
4169 u64 seq;
4170 u32 snap_count;
4171 struct ceph_snap_context *snapc;
4172 u32 i;
4173
4174 /*
4175 * We'll need room for the seq value (maximum snapshot id),
4176 * snapshot count, and array of that many snapshot ids.
4177 * For now we have a fixed upper limit on the number we're
4178 * prepared to receive.
4179 */
4180 size = sizeof (__le64) + sizeof (__le32) +
4181 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4182 reply_buf = kzalloc(size, GFP_KERNEL);
4183 if (!reply_buf)
4184 return -ENOMEM;
4185
4186 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4187 "rbd", "get_snapcontext", NULL, 0,
4188 reply_buf, size);
4189 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4190 if (ret < 0)
4191 goto out;
4192
4193 p = reply_buf;
4194 end = reply_buf + ret;
4195 ret = -ERANGE;
4196 ceph_decode_64_safe(&p, end, seq, out);
4197 ceph_decode_32_safe(&p, end, snap_count, out);
4198
4199 /*
4200 * Make sure the reported number of snapshot ids wouldn't go
4201 * beyond the end of our buffer. But before checking that,
4202 * make sure the computed size of the snapshot context we
4203 * allocate is representable in a size_t.
4204 */
4205 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4206 / sizeof (u64)) {
4207 ret = -EINVAL;
4208 goto out;
4209 }
4210 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4211 goto out;
4212 ret = 0;
4213
4214 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4215 if (!snapc) {
4216 ret = -ENOMEM;
4217 goto out;
4218 }
4219 snapc->seq = seq;
4220 for (i = 0; i < snap_count; i++)
4221 snapc->snaps[i] = ceph_decode_64(&p);
4222
4223 ceph_put_snap_context(rbd_dev->header.snapc);
4224 rbd_dev->header.snapc = snapc;
4225
4226 dout(" snap context seq = %llu, snap_count = %u\n",
4227 (unsigned long long)seq, (unsigned int)snap_count);
4228 out:
4229 kfree(reply_buf);
4230
4231 return ret;
4232 }
4233
4234 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4235 u64 snap_id)
4236 {
4237 size_t size;
4238 void *reply_buf;
4239 __le64 snapid;
4240 int ret;
4241 void *p;
4242 void *end;
4243 char *snap_name;
4244
4245 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4246 reply_buf = kmalloc(size, GFP_KERNEL);
4247 if (!reply_buf)
4248 return ERR_PTR(-ENOMEM);
4249
4250 snapid = cpu_to_le64(snap_id);
4251 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4252 "rbd", "get_snapshot_name",
4253 &snapid, sizeof (snapid),
4254 reply_buf, size);
4255 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4256 if (ret < 0) {
4257 snap_name = ERR_PTR(ret);
4258 goto out;
4259 }
4260
4261 p = reply_buf;
4262 end = reply_buf + ret;
4263 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4264 if (IS_ERR(snap_name))
4265 goto out;
4266
4267 dout(" snap_id 0x%016llx snap_name = %s\n",
4268 (unsigned long long)snap_id, snap_name);
4269 out:
4270 kfree(reply_buf);
4271
4272 return snap_name;
4273 }
4274
4275 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4276 {
4277 bool first_time = rbd_dev->header.object_prefix == NULL;
4278 int ret;
4279
4280 down_write(&rbd_dev->header_rwsem);
4281
4282 ret = rbd_dev_v2_image_size(rbd_dev);
4283 if (ret)
4284 goto out;
4285
4286 if (first_time) {
4287 ret = rbd_dev_v2_header_onetime(rbd_dev);
4288 if (ret)
4289 goto out;
4290 }
4291
4292 /*
4293 * If the image supports layering, get the parent info. We
4294 * need to probe the first time regardless. Thereafter we
4295 * only need to if there's a parent, to see if it has
4296 * disappeared due to the mapped image getting flattened.
4297 */
4298 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4299 (first_time || rbd_dev->parent_spec)) {
4300 bool warn;
4301
4302 ret = rbd_dev_v2_parent_info(rbd_dev);
4303 if (ret)
4304 goto out;
4305
4306 /*
4307 * Print a warning if this is the initial probe and
4308 * the image has a parent. Don't print it if the
4309 * image now being probed is itself a parent. We
4310 * can tell at this point because we won't know its
4311 * pool name yet (just its pool id).
4312 */
4313 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4314 if (first_time && warn)
4315 rbd_warn(rbd_dev, "WARNING: kernel layering "
4316 "is EXPERIMENTAL!");
4317 }
4318
4319 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4320 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4321 rbd_dev->mapping.size = rbd_dev->header.image_size;
4322
4323 ret = rbd_dev_v2_snap_context(rbd_dev);
4324 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4325 out:
4326 up_write(&rbd_dev->header_rwsem);
4327
4328 return ret;
4329 }
4330
4331 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4332 {
4333 struct device *dev;
4334 int ret;
4335
4336 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4337
4338 dev = &rbd_dev->dev;
4339 dev->bus = &rbd_bus_type;
4340 dev->type = &rbd_device_type;
4341 dev->parent = &rbd_root_dev;
4342 dev->release = rbd_dev_device_release;
4343 dev_set_name(dev, "%d", rbd_dev->dev_id);
4344 ret = device_register(dev);
4345
4346 mutex_unlock(&ctl_mutex);
4347
4348 return ret;
4349 }
4350
4351 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4352 {
4353 device_unregister(&rbd_dev->dev);
4354 }
4355
4356 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4357
4358 /*
4359 * Get a unique rbd identifier for the given new rbd_dev, and add
4360 * the rbd_dev to the global list. The minimum rbd id is 1.
4361 */
4362 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4363 {
4364 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4365
4366 spin_lock(&rbd_dev_list_lock);
4367 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4368 spin_unlock(&rbd_dev_list_lock);
4369 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4370 (unsigned long long) rbd_dev->dev_id);
4371 }
4372
4373 /*
4374 * Remove an rbd_dev from the global list, and record that its
4375 * identifier is no longer in use.
4376 */
4377 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4378 {
4379 struct list_head *tmp;
4380 int rbd_id = rbd_dev->dev_id;
4381 int max_id;
4382
4383 rbd_assert(rbd_id > 0);
4384
4385 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4386 (unsigned long long) rbd_dev->dev_id);
4387 spin_lock(&rbd_dev_list_lock);
4388 list_del_init(&rbd_dev->node);
4389
4390 /*
4391 * If the id being "put" is not the current maximum, there
4392 * is nothing special we need to do.
4393 */
4394 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4395 spin_unlock(&rbd_dev_list_lock);
4396 return;
4397 }
4398
4399 /*
4400 * We need to update the current maximum id. Search the
4401 * list to find out what it is. We're more likely to find
4402 * the maximum at the end, so search the list backward.
4403 */
4404 max_id = 0;
4405 list_for_each_prev(tmp, &rbd_dev_list) {
4406 struct rbd_device *rbd_dev;
4407
4408 rbd_dev = list_entry(tmp, struct rbd_device, node);
4409 if (rbd_dev->dev_id > max_id)
4410 max_id = rbd_dev->dev_id;
4411 }
4412 spin_unlock(&rbd_dev_list_lock);
4413
4414 /*
4415 * The max id could have been updated by rbd_dev_id_get(), in
4416 * which case it now accurately reflects the new maximum.
4417 * Be careful not to overwrite the maximum value in that
4418 * case.
4419 */
4420 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4421 dout(" max dev id has been reset\n");
4422 }
4423
4424 /*
4425 * Skips over white space at *buf, and updates *buf to point to the
4426 * first found non-space character (if any). Returns the length of
4427 * the token (string of non-white space characters) found. Note
4428 * that *buf must be terminated with '\0'.
4429 */
4430 static inline size_t next_token(const char **buf)
4431 {
4432 /*
4433 * These are the characters that produce nonzero for
4434 * isspace() in the "C" and "POSIX" locales.
4435 */
4436 const char *spaces = " \f\n\r\t\v";
4437
4438 *buf += strspn(*buf, spaces); /* Find start of token */
4439
4440 return strcspn(*buf, spaces); /* Return token length */
4441 }
4442
4443 /*
4444 * Finds the next token in *buf, and if the provided token buffer is
4445 * big enough, copies the found token into it. The result, if
4446 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4447 * must be terminated with '\0' on entry.
4448 *
4449 * Returns the length of the token found (not including the '\0').
4450 * Return value will be 0 if no token is found, and it will be >=
4451 * token_size if the token would not fit.
4452 *
4453 * The *buf pointer will be updated to point beyond the end of the
4454 * found token. Note that this occurs even if the token buffer is
4455 * too small to hold it.
4456 */
4457 static inline size_t copy_token(const char **buf,
4458 char *token,
4459 size_t token_size)
4460 {
4461 size_t len;
4462
4463 len = next_token(buf);
4464 if (len < token_size) {
4465 memcpy(token, *buf, len);
4466 *(token + len) = '\0';
4467 }
4468 *buf += len;
4469
4470 return len;
4471 }
4472
4473 /*
4474 * Finds the next token in *buf, dynamically allocates a buffer big
4475 * enough to hold a copy of it, and copies the token into the new
4476 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4477 * that a duplicate buffer is created even for a zero-length token.
4478 *
4479 * Returns a pointer to the newly-allocated duplicate, or a null
4480 * pointer if memory for the duplicate was not available. If
4481 * the lenp argument is a non-null pointer, the length of the token
4482 * (not including the '\0') is returned in *lenp.
4483 *
4484 * If successful, the *buf pointer will be updated to point beyond
4485 * the end of the found token.
4486 *
4487 * Note: uses GFP_KERNEL for allocation.
4488 */
4489 static inline char *dup_token(const char **buf, size_t *lenp)
4490 {
4491 char *dup;
4492 size_t len;
4493
4494 len = next_token(buf);
4495 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4496 if (!dup)
4497 return NULL;
4498 *(dup + len) = '\0';
4499 *buf += len;
4500
4501 if (lenp)
4502 *lenp = len;
4503
4504 return dup;
4505 }
4506
4507 /*
4508 * Parse the options provided for an "rbd add" (i.e., rbd image
4509 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4510 * and the data written is passed here via a NUL-terminated buffer.
4511 * Returns 0 if successful or an error code otherwise.
4512 *
4513 * The information extracted from these options is recorded in
4514 * the other parameters which return dynamically-allocated
4515 * structures:
4516 * ceph_opts
4517 * The address of a pointer that will refer to a ceph options
4518 * structure. Caller must release the returned pointer using
4519 * ceph_destroy_options() when it is no longer needed.
4520 * rbd_opts
4521 * Address of an rbd options pointer. Fully initialized by
4522 * this function; caller must release with kfree().
4523 * spec
4524 * Address of an rbd image specification pointer. Fully
4525 * initialized by this function based on parsed options.
4526 * Caller must release with rbd_spec_put().
4527 *
4528 * The options passed take this form:
4529 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4530 * where:
4531 * <mon_addrs>
4532 * A comma-separated list of one or more monitor addresses.
4533 * A monitor address is an ip address, optionally followed
4534 * by a port number (separated by a colon).
4535 * I.e.: ip1[:port1][,ip2[:port2]...]
4536 * <options>
4537 * A comma-separated list of ceph and/or rbd options.
4538 * <pool_name>
4539 * The name of the rados pool containing the rbd image.
4540 * <image_name>
4541 * The name of the image in that pool to map.
4542 * <snap_id>
4543 * An optional snapshot id. If provided, the mapping will
4544 * present data from the image at the time that snapshot was
4545 * created. The image head is used if no snapshot id is
4546 * provided. Snapshot mappings are always read-only.
4547 */
4548 static int rbd_add_parse_args(const char *buf,
4549 struct ceph_options **ceph_opts,
4550 struct rbd_options **opts,
4551 struct rbd_spec **rbd_spec)
4552 {
4553 size_t len;
4554 char *options;
4555 const char *mon_addrs;
4556 char *snap_name;
4557 size_t mon_addrs_size;
4558 struct rbd_spec *spec = NULL;
4559 struct rbd_options *rbd_opts = NULL;
4560 struct ceph_options *copts;
4561 int ret;
4562
4563 /* The first four tokens are required */
4564
4565 len = next_token(&buf);
4566 if (!len) {
4567 rbd_warn(NULL, "no monitor address(es) provided");
4568 return -EINVAL;
4569 }
4570 mon_addrs = buf;
4571 mon_addrs_size = len + 1;
4572 buf += len;
4573
4574 ret = -EINVAL;
4575 options = dup_token(&buf, NULL);
4576 if (!options)
4577 return -ENOMEM;
4578 if (!*options) {
4579 rbd_warn(NULL, "no options provided");
4580 goto out_err;
4581 }
4582
4583 spec = rbd_spec_alloc();
4584 if (!spec)
4585 goto out_mem;
4586
4587 spec->pool_name = dup_token(&buf, NULL);
4588 if (!spec->pool_name)
4589 goto out_mem;
4590 if (!*spec->pool_name) {
4591 rbd_warn(NULL, "no pool name provided");
4592 goto out_err;
4593 }
4594
4595 spec->image_name = dup_token(&buf, NULL);
4596 if (!spec->image_name)
4597 goto out_mem;
4598 if (!*spec->image_name) {
4599 rbd_warn(NULL, "no image name provided");
4600 goto out_err;
4601 }
4602
4603 /*
4604 * Snapshot name is optional; default is to use "-"
4605 * (indicating the head/no snapshot).
4606 */
4607 len = next_token(&buf);
4608 if (!len) {
4609 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4610 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4611 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4612 ret = -ENAMETOOLONG;
4613 goto out_err;
4614 }
4615 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4616 if (!snap_name)
4617 goto out_mem;
4618 *(snap_name + len) = '\0';
4619 spec->snap_name = snap_name;
4620
4621 /* Initialize all rbd options to the defaults */
4622
4623 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4624 if (!rbd_opts)
4625 goto out_mem;
4626
4627 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4628
4629 copts = ceph_parse_options(options, mon_addrs,
4630 mon_addrs + mon_addrs_size - 1,
4631 parse_rbd_opts_token, rbd_opts);
4632 if (IS_ERR(copts)) {
4633 ret = PTR_ERR(copts);
4634 goto out_err;
4635 }
4636 kfree(options);
4637
4638 *ceph_opts = copts;
4639 *opts = rbd_opts;
4640 *rbd_spec = spec;
4641
4642 return 0;
4643 out_mem:
4644 ret = -ENOMEM;
4645 out_err:
4646 kfree(rbd_opts);
4647 rbd_spec_put(spec);
4648 kfree(options);
4649
4650 return ret;
4651 }
4652
4653 /*
4654 * An rbd format 2 image has a unique identifier, distinct from the
4655 * name given to it by the user. Internally, that identifier is
4656 * what's used to specify the names of objects related to the image.
4657 *
4658 * A special "rbd id" object is used to map an rbd image name to its
4659 * id. If that object doesn't exist, then there is no v2 rbd image
4660 * with the supplied name.
4661 *
4662 * This function will record the given rbd_dev's image_id field if
4663 * it can be determined, and in that case will return 0. If any
4664 * errors occur a negative errno will be returned and the rbd_dev's
4665 * image_id field will be unchanged (and should be NULL).
4666 */
4667 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4668 {
4669 int ret;
4670 size_t size;
4671 char *object_name;
4672 void *response;
4673 char *image_id;
4674
4675 /*
4676 * When probing a parent image, the image id is already
4677 * known (and the image name likely is not). There's no
4678 * need to fetch the image id again in this case. We
4679 * do still need to set the image format though.
4680 */
4681 if (rbd_dev->spec->image_id) {
4682 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4683
4684 return 0;
4685 }
4686
4687 /*
4688 * First, see if the format 2 image id file exists, and if
4689 * so, get the image's persistent id from it.
4690 */
4691 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4692 object_name = kmalloc(size, GFP_NOIO);
4693 if (!object_name)
4694 return -ENOMEM;
4695 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4696 dout("rbd id object name is %s\n", object_name);
4697
4698 /* Response will be an encoded string, which includes a length */
4699
4700 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4701 response = kzalloc(size, GFP_NOIO);
4702 if (!response) {
4703 ret = -ENOMEM;
4704 goto out;
4705 }
4706
4707 /* If it doesn't exist we'll assume it's a format 1 image */
4708
4709 ret = rbd_obj_method_sync(rbd_dev, object_name,
4710 "rbd", "get_id", NULL, 0,
4711 response, RBD_IMAGE_ID_LEN_MAX);
4712 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4713 if (ret == -ENOENT) {
4714 image_id = kstrdup("", GFP_KERNEL);
4715 ret = image_id ? 0 : -ENOMEM;
4716 if (!ret)
4717 rbd_dev->image_format = 1;
4718 } else if (ret > sizeof (__le32)) {
4719 void *p = response;
4720
4721 image_id = ceph_extract_encoded_string(&p, p + ret,
4722 NULL, GFP_NOIO);
4723 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4724 if (!ret)
4725 rbd_dev->image_format = 2;
4726 } else {
4727 ret = -EINVAL;
4728 }
4729
4730 if (!ret) {
4731 rbd_dev->spec->image_id = image_id;
4732 dout("image_id is %s\n", image_id);
4733 }
4734 out:
4735 kfree(response);
4736 kfree(object_name);
4737
4738 return ret;
4739 }
4740
4741 /*
4742 * Undo whatever state changes are made by v1 or v2 header info
4743 * call.
4744 */
4745 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4746 {
4747 struct rbd_image_header *header;
4748
4749 /* Drop parent reference unless it's already been done (or none) */
4750
4751 if (rbd_dev->parent_overlap)
4752 rbd_dev_parent_put(rbd_dev);
4753
4754 /* Free dynamic fields from the header, then zero it out */
4755
4756 header = &rbd_dev->header;
4757 ceph_put_snap_context(header->snapc);
4758 kfree(header->snap_sizes);
4759 kfree(header->snap_names);
4760 kfree(header->object_prefix);
4761 memset(header, 0, sizeof (*header));
4762 }
4763
4764 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4765 {
4766 int ret;
4767
4768 ret = rbd_dev_v2_object_prefix(rbd_dev);
4769 if (ret)
4770 goto out_err;
4771
4772 /*
4773 * Get the and check features for the image. Currently the
4774 * features are assumed to never change.
4775 */
4776 ret = rbd_dev_v2_features(rbd_dev);
4777 if (ret)
4778 goto out_err;
4779
4780 /* If the image supports fancy striping, get its parameters */
4781
4782 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4783 ret = rbd_dev_v2_striping_info(rbd_dev);
4784 if (ret < 0)
4785 goto out_err;
4786 }
4787 /* No support for crypto and compression type format 2 images */
4788
4789 return 0;
4790 out_err:
4791 rbd_dev->header.features = 0;
4792 kfree(rbd_dev->header.object_prefix);
4793 rbd_dev->header.object_prefix = NULL;
4794
4795 return ret;
4796 }
4797
4798 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4799 {
4800 struct rbd_device *parent = NULL;
4801 struct rbd_spec *parent_spec;
4802 struct rbd_client *rbdc;
4803 int ret;
4804
4805 if (!rbd_dev->parent_spec)
4806 return 0;
4807 /*
4808 * We need to pass a reference to the client and the parent
4809 * spec when creating the parent rbd_dev. Images related by
4810 * parent/child relationships always share both.
4811 */
4812 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4813 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4814
4815 ret = -ENOMEM;
4816 parent = rbd_dev_create(rbdc, parent_spec);
4817 if (!parent)
4818 goto out_err;
4819
4820 ret = rbd_dev_image_probe(parent, false);
4821 if (ret < 0)
4822 goto out_err;
4823 rbd_dev->parent = parent;
4824 atomic_set(&rbd_dev->parent_ref, 1);
4825
4826 return 0;
4827 out_err:
4828 if (parent) {
4829 rbd_dev_unparent(rbd_dev);
4830 kfree(rbd_dev->header_name);
4831 rbd_dev_destroy(parent);
4832 } else {
4833 rbd_put_client(rbdc);
4834 rbd_spec_put(parent_spec);
4835 }
4836
4837 return ret;
4838 }
4839
4840 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4841 {
4842 int ret;
4843
4844 /* generate unique id: find highest unique id, add one */
4845 rbd_dev_id_get(rbd_dev);
4846
4847 /* Fill in the device name, now that we have its id. */
4848 BUILD_BUG_ON(DEV_NAME_LEN
4849 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4850 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4851
4852 /* Get our block major device number. */
4853
4854 ret = register_blkdev(0, rbd_dev->name);
4855 if (ret < 0)
4856 goto err_out_id;
4857 rbd_dev->major = ret;
4858
4859 /* Set up the blkdev mapping. */
4860
4861 ret = rbd_init_disk(rbd_dev);
4862 if (ret)
4863 goto err_out_blkdev;
4864
4865 ret = rbd_dev_mapping_set(rbd_dev);
4866 if (ret)
4867 goto err_out_disk;
4868 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4869
4870 ret = rbd_bus_add_dev(rbd_dev);
4871 if (ret)
4872 goto err_out_mapping;
4873
4874 /* Everything's ready. Announce the disk to the world. */
4875
4876 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4877 add_disk(rbd_dev->disk);
4878
4879 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4880 (unsigned long long) rbd_dev->mapping.size);
4881
4882 return ret;
4883
4884 err_out_mapping:
4885 rbd_dev_mapping_clear(rbd_dev);
4886 err_out_disk:
4887 rbd_free_disk(rbd_dev);
4888 err_out_blkdev:
4889 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4890 err_out_id:
4891 rbd_dev_id_put(rbd_dev);
4892 rbd_dev_mapping_clear(rbd_dev);
4893
4894 return ret;
4895 }
4896
4897 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4898 {
4899 struct rbd_spec *spec = rbd_dev->spec;
4900 size_t size;
4901
4902 /* Record the header object name for this rbd image. */
4903
4904 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4905
4906 if (rbd_dev->image_format == 1)
4907 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4908 else
4909 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4910
4911 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4912 if (!rbd_dev->header_name)
4913 return -ENOMEM;
4914
4915 if (rbd_dev->image_format == 1)
4916 sprintf(rbd_dev->header_name, "%s%s",
4917 spec->image_name, RBD_SUFFIX);
4918 else
4919 sprintf(rbd_dev->header_name, "%s%s",
4920 RBD_HEADER_PREFIX, spec->image_id);
4921 return 0;
4922 }
4923
4924 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4925 {
4926 rbd_dev_unprobe(rbd_dev);
4927 kfree(rbd_dev->header_name);
4928 rbd_dev->header_name = NULL;
4929 rbd_dev->image_format = 0;
4930 kfree(rbd_dev->spec->image_id);
4931 rbd_dev->spec->image_id = NULL;
4932
4933 rbd_dev_destroy(rbd_dev);
4934 }
4935
4936 /*
4937 * Probe for the existence of the header object for the given rbd
4938 * device. If this image is the one being mapped (i.e., not a
4939 * parent), initiate a watch on its header object before using that
4940 * object to get detailed information about the rbd image.
4941 */
4942 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4943 {
4944 int ret;
4945 int tmp;
4946
4947 /*
4948 * Get the id from the image id object. Unless there's an
4949 * error, rbd_dev->spec->image_id will be filled in with
4950 * a dynamically-allocated string, and rbd_dev->image_format
4951 * will be set to either 1 or 2.
4952 */
4953 ret = rbd_dev_image_id(rbd_dev);
4954 if (ret)
4955 return ret;
4956 rbd_assert(rbd_dev->spec->image_id);
4957 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4958
4959 ret = rbd_dev_header_name(rbd_dev);
4960 if (ret)
4961 goto err_out_format;
4962
4963 if (mapping) {
4964 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4965 if (ret)
4966 goto out_header_name;
4967 }
4968
4969 if (rbd_dev->image_format == 1)
4970 ret = rbd_dev_v1_header_info(rbd_dev);
4971 else
4972 ret = rbd_dev_v2_header_info(rbd_dev);
4973 if (ret)
4974 goto err_out_watch;
4975
4976 ret = rbd_dev_spec_update(rbd_dev);
4977 if (ret)
4978 goto err_out_probe;
4979
4980 ret = rbd_dev_probe_parent(rbd_dev);
4981 if (ret)
4982 goto err_out_probe;
4983
4984 dout("discovered format %u image, header name is %s\n",
4985 rbd_dev->image_format, rbd_dev->header_name);
4986
4987 return 0;
4988 err_out_probe:
4989 rbd_dev_unprobe(rbd_dev);
4990 err_out_watch:
4991 if (mapping) {
4992 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4993 if (tmp)
4994 rbd_warn(rbd_dev, "unable to tear down "
4995 "watch request (%d)\n", tmp);
4996 }
4997 out_header_name:
4998 kfree(rbd_dev->header_name);
4999 rbd_dev->header_name = NULL;
5000 err_out_format:
5001 rbd_dev->image_format = 0;
5002 kfree(rbd_dev->spec->image_id);
5003 rbd_dev->spec->image_id = NULL;
5004
5005 dout("probe failed, returning %d\n", ret);
5006
5007 return ret;
5008 }
5009
5010 static ssize_t rbd_add(struct bus_type *bus,
5011 const char *buf,
5012 size_t count)
5013 {
5014 struct rbd_device *rbd_dev = NULL;
5015 struct ceph_options *ceph_opts = NULL;
5016 struct rbd_options *rbd_opts = NULL;
5017 struct rbd_spec *spec = NULL;
5018 struct rbd_client *rbdc;
5019 struct ceph_osd_client *osdc;
5020 bool read_only;
5021 int rc = -ENOMEM;
5022
5023 if (!try_module_get(THIS_MODULE))
5024 return -ENODEV;
5025
5026 /* parse add command */
5027 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5028 if (rc < 0)
5029 goto err_out_module;
5030 read_only = rbd_opts->read_only;
5031 kfree(rbd_opts);
5032 rbd_opts = NULL; /* done with this */
5033
5034 rbdc = rbd_get_client(ceph_opts);
5035 if (IS_ERR(rbdc)) {
5036 rc = PTR_ERR(rbdc);
5037 goto err_out_args;
5038 }
5039
5040 /* pick the pool */
5041 osdc = &rbdc->client->osdc;
5042 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5043 if (rc < 0)
5044 goto err_out_client;
5045 spec->pool_id = (u64)rc;
5046
5047 /* The ceph file layout needs to fit pool id in 32 bits */
5048
5049 if (spec->pool_id > (u64)U32_MAX) {
5050 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5051 (unsigned long long)spec->pool_id, U32_MAX);
5052 rc = -EIO;
5053 goto err_out_client;
5054 }
5055
5056 rbd_dev = rbd_dev_create(rbdc, spec);
5057 if (!rbd_dev)
5058 goto err_out_client;
5059 rbdc = NULL; /* rbd_dev now owns this */
5060 spec = NULL; /* rbd_dev now owns this */
5061
5062 rc = rbd_dev_image_probe(rbd_dev, true);
5063 if (rc < 0)
5064 goto err_out_rbd_dev;
5065
5066 /* If we are mapping a snapshot it must be marked read-only */
5067
5068 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5069 read_only = true;
5070 rbd_dev->mapping.read_only = read_only;
5071
5072 rc = rbd_dev_device_setup(rbd_dev);
5073 if (rc) {
5074 rbd_dev_image_release(rbd_dev);
5075 goto err_out_module;
5076 }
5077
5078 return count;
5079
5080 err_out_rbd_dev:
5081 rbd_dev_destroy(rbd_dev);
5082 err_out_client:
5083 rbd_put_client(rbdc);
5084 err_out_args:
5085 rbd_spec_put(spec);
5086 err_out_module:
5087 module_put(THIS_MODULE);
5088
5089 dout("Error adding device %s\n", buf);
5090
5091 return (ssize_t)rc;
5092 }
5093
5094 static void rbd_dev_device_release(struct device *dev)
5095 {
5096 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5097
5098 rbd_free_disk(rbd_dev);
5099 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5100 rbd_dev_mapping_clear(rbd_dev);
5101 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5102 rbd_dev->major = 0;
5103 rbd_dev_id_put(rbd_dev);
5104 rbd_dev_mapping_clear(rbd_dev);
5105 }
5106
5107 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5108 {
5109 while (rbd_dev->parent) {
5110 struct rbd_device *first = rbd_dev;
5111 struct rbd_device *second = first->parent;
5112 struct rbd_device *third;
5113
5114 /*
5115 * Follow to the parent with no grandparent and
5116 * remove it.
5117 */
5118 while (second && (third = second->parent)) {
5119 first = second;
5120 second = third;
5121 }
5122 rbd_assert(second);
5123 rbd_dev_image_release(second);
5124 first->parent = NULL;
5125 first->parent_overlap = 0;
5126
5127 rbd_assert(first->parent_spec);
5128 rbd_spec_put(first->parent_spec);
5129 first->parent_spec = NULL;
5130 }
5131 }
5132
5133 static ssize_t rbd_remove(struct bus_type *bus,
5134 const char *buf,
5135 size_t count)
5136 {
5137 struct rbd_device *rbd_dev = NULL;
5138 struct list_head *tmp;
5139 int dev_id;
5140 unsigned long ul;
5141 bool already = false;
5142 int ret;
5143
5144 ret = strict_strtoul(buf, 10, &ul);
5145 if (ret)
5146 return ret;
5147
5148 /* convert to int; abort if we lost anything in the conversion */
5149 dev_id = (int)ul;
5150 if (dev_id != ul)
5151 return -EINVAL;
5152
5153 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5154
5155 ret = -ENOENT;
5156 spin_lock(&rbd_dev_list_lock);
5157 list_for_each(tmp, &rbd_dev_list) {
5158 rbd_dev = list_entry(tmp, struct rbd_device, node);
5159 if (rbd_dev->dev_id == dev_id) {
5160 ret = 0;
5161 break;
5162 }
5163 }
5164 if (!ret) {
5165 spin_lock_irq(&rbd_dev->lock);
5166 if (rbd_dev->open_count)
5167 ret = -EBUSY;
5168 else
5169 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5170 &rbd_dev->flags);
5171 spin_unlock_irq(&rbd_dev->lock);
5172 }
5173 spin_unlock(&rbd_dev_list_lock);
5174 if (ret < 0 || already)
5175 goto done;
5176
5177 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5178 if (ret)
5179 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5180
5181 /*
5182 * flush remaining watch callbacks - these must be complete
5183 * before the osd_client is shutdown
5184 */
5185 dout("%s: flushing notifies", __func__);
5186 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5187 /*
5188 * Don't free anything from rbd_dev->disk until after all
5189 * notifies are completely processed. Otherwise
5190 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5191 * in a potential use after free of rbd_dev->disk or rbd_dev.
5192 */
5193 rbd_bus_del_dev(rbd_dev);
5194 rbd_dev_image_release(rbd_dev);
5195 module_put(THIS_MODULE);
5196 ret = count;
5197 done:
5198 mutex_unlock(&ctl_mutex);
5199
5200 return ret;
5201 }
5202
5203 /*
5204 * create control files in sysfs
5205 * /sys/bus/rbd/...
5206 */
5207 static int rbd_sysfs_init(void)
5208 {
5209 int ret;
5210
5211 ret = device_register(&rbd_root_dev);
5212 if (ret < 0)
5213 return ret;
5214
5215 ret = bus_register(&rbd_bus_type);
5216 if (ret < 0)
5217 device_unregister(&rbd_root_dev);
5218
5219 return ret;
5220 }
5221
5222 static void rbd_sysfs_cleanup(void)
5223 {
5224 bus_unregister(&rbd_bus_type);
5225 device_unregister(&rbd_root_dev);
5226 }
5227
5228 static int rbd_slab_init(void)
5229 {
5230 rbd_assert(!rbd_img_request_cache);
5231 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5232 sizeof (struct rbd_img_request),
5233 __alignof__(struct rbd_img_request),
5234 0, NULL);
5235 if (!rbd_img_request_cache)
5236 return -ENOMEM;
5237
5238 rbd_assert(!rbd_obj_request_cache);
5239 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5240 sizeof (struct rbd_obj_request),
5241 __alignof__(struct rbd_obj_request),
5242 0, NULL);
5243 if (!rbd_obj_request_cache)
5244 goto out_err;
5245
5246 rbd_assert(!rbd_segment_name_cache);
5247 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5248 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5249 if (rbd_segment_name_cache)
5250 return 0;
5251 out_err:
5252 if (rbd_obj_request_cache) {
5253 kmem_cache_destroy(rbd_obj_request_cache);
5254 rbd_obj_request_cache = NULL;
5255 }
5256
5257 kmem_cache_destroy(rbd_img_request_cache);
5258 rbd_img_request_cache = NULL;
5259
5260 return -ENOMEM;
5261 }
5262
5263 static void rbd_slab_exit(void)
5264 {
5265 rbd_assert(rbd_segment_name_cache);
5266 kmem_cache_destroy(rbd_segment_name_cache);
5267 rbd_segment_name_cache = NULL;
5268
5269 rbd_assert(rbd_obj_request_cache);
5270 kmem_cache_destroy(rbd_obj_request_cache);
5271 rbd_obj_request_cache = NULL;
5272
5273 rbd_assert(rbd_img_request_cache);
5274 kmem_cache_destroy(rbd_img_request_cache);
5275 rbd_img_request_cache = NULL;
5276 }
5277
5278 static int __init rbd_init(void)
5279 {
5280 int rc;
5281
5282 if (!libceph_compatible(NULL)) {
5283 rbd_warn(NULL, "libceph incompatibility (quitting)");
5284
5285 return -EINVAL;
5286 }
5287 rc = rbd_slab_init();
5288 if (rc)
5289 return rc;
5290 rc = rbd_sysfs_init();
5291 if (rc)
5292 rbd_slab_exit();
5293 else
5294 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5295
5296 return rc;
5297 }
5298
5299 static void __exit rbd_exit(void)
5300 {
5301 rbd_sysfs_cleanup();
5302 rbd_slab_exit();
5303 }
5304
5305 module_init(rbd_init);
5306 module_exit(rbd_exit);
5307
5308 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5309 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5310 MODULE_DESCRIPTION("rados block device");
5311
5312 /* following authorship retained from original osdblk.c */
5313 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5314
5315 MODULE_LICENSE("GPL");