3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t
*v
)
68 counter
= (unsigned int)__atomic_add_unless(v
, 1, 0);
69 if (counter
<= (unsigned int)INT_MAX
)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t
*v
)
82 counter
= atomic_dec_return(v
);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header
{
136 /* These six fields never change for a given rbd image */
143 u64 features
; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context
*snapc
;
148 char *snap_names
; /* format 1 only */
149 u64
*snap_sizes
; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name
;
181 const char *image_id
;
182 const char *image_name
;
185 const char *snap_name
;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client
*client
;
196 struct list_head node
;
199 struct rbd_img_request
;
200 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request
;
205 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
207 enum obj_request_type
{
208 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE
, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA
, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN
, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS
, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request
{
219 const char *object_name
;
220 u64 offset
; /* object start byte */
221 u64 length
; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request
*obj_request
; /* STAT op */
243 struct rbd_img_request
*img_request
;
245 /* links for img_request->obj_requests list */
246 struct list_head links
;
249 u32 which
; /* posn image request list */
251 enum obj_request_type type
;
253 struct bio
*bio_list
;
259 struct page
**copyup_pages
;
260 u32 copyup_page_count
;
262 struct ceph_osd_request
*osd_req
;
264 u64 xferred
; /* bytes transferred */
267 rbd_obj_callback_t callback
;
268 struct completion completion
;
274 IMG_REQ_WRITE
, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request
{
280 struct rbd_device
*rbd_dev
;
281 u64 offset
; /* starting image byte offset */
282 u64 length
; /* byte count from offset */
285 u64 snap_id
; /* for reads */
286 struct ceph_snap_context
*snapc
; /* for writes */
289 struct request
*rq
; /* block request */
290 struct rbd_obj_request
*obj_request
; /* obj req initiator */
292 struct page
**copyup_pages
;
293 u32 copyup_page_count
;
294 spinlock_t completion_lock
;/* protects next_completion */
296 rbd_img_callback_t callback
;
297 u64 xferred
;/* aggregate bytes transferred */
298 int result
; /* first nonzero obj_request result */
300 u32 obj_request_count
;
301 struct list_head obj_requests
; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id
; /* blkdev unique id */
325 int major
; /* blkdev assigned major */
326 struct gendisk
*disk
; /* blkdev's gendisk and rq */
328 u32 image_format
; /* Either 1 or 2 */
329 struct rbd_client
*rbd_client
;
331 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock
; /* queue, flags, open_count */
335 struct rbd_image_header header
;
336 unsigned long flags
; /* possibly lock protected */
337 struct rbd_spec
*spec
;
341 struct ceph_file_layout layout
;
343 struct ceph_osd_event
*watch_event
;
344 struct rbd_obj_request
*watch_request
;
346 struct rbd_spec
*parent_spec
;
349 struct rbd_device
*parent
;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem
;
354 struct rbd_mapping mapping
;
356 struct list_head node
;
360 unsigned long open_count
; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex
); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list
); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
380 static LIST_HEAD(rbd_client_list
); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock
);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache
*rbd_img_request_cache
;
386 static struct kmem_cache
*rbd_obj_request_cache
;
387 static struct kmem_cache
*rbd_segment_name_cache
;
389 static int rbd_img_request_submit(struct rbd_img_request
*img_request
);
391 static void rbd_dev_device_release(struct device
*dev
);
393 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
395 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
397 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
);
398 static void rbd_spec_put(struct rbd_spec
*spec
);
400 static struct bus_attribute rbd_bus_attrs
[] = {
401 __ATTR(add
, S_IWUSR
, NULL
, rbd_add
),
402 __ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
),
406 static struct bus_type rbd_bus_type
= {
408 .bus_attrs
= rbd_bus_attrs
,
411 static void rbd_root_dev_release(struct device
*dev
)
415 static struct device rbd_root_dev
= {
417 .release
= rbd_root_dev_release
,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
423 struct va_format vaf
;
431 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
432 else if (rbd_dev
->disk
)
433 printk(KERN_WARNING
"%s: %s: %pV\n",
434 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
435 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
436 printk(KERN_WARNING
"%s: image %s: %pV\n",
437 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
438 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
439 printk(KERN_WARNING
"%s: id %s: %pV\n",
440 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
442 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME
, rbd_dev
, &vaf
);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
);
461 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
);
462 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
464 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
465 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
466 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
469 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
470 u8
*order
, u64
*snap_size
);
471 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
473 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
);
475 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
477 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
478 bool removing
= false;
480 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
483 spin_lock_irq(&rbd_dev
->lock
);
484 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
487 rbd_dev
->open_count
++;
488 spin_unlock_irq(&rbd_dev
->lock
);
492 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
493 (void) get_device(&rbd_dev
->dev
);
494 set_device_ro(bdev
, rbd_dev
->mapping
.read_only
);
495 mutex_unlock(&ctl_mutex
);
500 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
502 struct rbd_device
*rbd_dev
= disk
->private_data
;
503 unsigned long open_count_before
;
505 spin_lock_irq(&rbd_dev
->lock
);
506 open_count_before
= rbd_dev
->open_count
--;
507 spin_unlock_irq(&rbd_dev
->lock
);
508 rbd_assert(open_count_before
> 0);
510 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
511 put_device(&rbd_dev
->dev
);
512 mutex_unlock(&ctl_mutex
);
515 static const struct block_device_operations rbd_bd_ops
= {
516 .owner
= THIS_MODULE
,
518 .release
= rbd_release
,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts.
525 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
527 struct rbd_client
*rbdc
;
530 dout("%s:\n", __func__
);
531 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
535 kref_init(&rbdc
->kref
);
536 INIT_LIST_HEAD(&rbdc
->node
);
538 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
540 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
541 if (IS_ERR(rbdc
->client
))
543 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
545 ret
= ceph_open_session(rbdc
->client
);
549 spin_lock(&rbd_client_list_lock
);
550 list_add_tail(&rbdc
->node
, &rbd_client_list
);
551 spin_unlock(&rbd_client_list_lock
);
553 mutex_unlock(&ctl_mutex
);
554 dout("%s: rbdc %p\n", __func__
, rbdc
);
559 ceph_destroy_client(rbdc
->client
);
561 mutex_unlock(&ctl_mutex
);
565 ceph_destroy_options(ceph_opts
);
566 dout("%s: error %d\n", __func__
, ret
);
571 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
573 kref_get(&rbdc
->kref
);
579 * Find a ceph client with specific addr and configuration. If
580 * found, bump its reference count.
582 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
584 struct rbd_client
*client_node
;
587 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
590 spin_lock(&rbd_client_list_lock
);
591 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
592 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
593 __rbd_get_client(client_node
);
599 spin_unlock(&rbd_client_list_lock
);
601 return found
? client_node
: NULL
;
611 /* string args above */
614 /* Boolean args above */
618 static match_table_t rbd_opts_tokens
= {
620 /* string args above */
621 {Opt_read_only
, "read_only"},
622 {Opt_read_only
, "ro"}, /* Alternate spelling */
623 {Opt_read_write
, "read_write"},
624 {Opt_read_write
, "rw"}, /* Alternate spelling */
625 /* Boolean args above */
633 #define RBD_READ_ONLY_DEFAULT false
635 static int parse_rbd_opts_token(char *c
, void *private)
637 struct rbd_options
*rbd_opts
= private;
638 substring_t argstr
[MAX_OPT_ARGS
];
639 int token
, intval
, ret
;
641 token
= match_token(c
, rbd_opts_tokens
, argstr
);
645 if (token
< Opt_last_int
) {
646 ret
= match_int(&argstr
[0], &intval
);
648 pr_err("bad mount option arg (not int) "
652 dout("got int token %d val %d\n", token
, intval
);
653 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
654 dout("got string token %d val %s\n", token
,
656 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
657 dout("got Boolean token %d\n", token
);
659 dout("got token %d\n", token
);
664 rbd_opts
->read_only
= true;
667 rbd_opts
->read_only
= false;
677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. Either way, ceph_opts is consumed by this
681 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
683 struct rbd_client
*rbdc
;
685 rbdc
= rbd_client_find(ceph_opts
);
686 if (rbdc
) /* using an existing client */
687 ceph_destroy_options(ceph_opts
);
689 rbdc
= rbd_client_create(ceph_opts
);
695 * Destroy ceph client
697 * Caller must hold rbd_client_list_lock.
699 static void rbd_client_release(struct kref
*kref
)
701 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
703 dout("%s: rbdc %p\n", __func__
, rbdc
);
704 spin_lock(&rbd_client_list_lock
);
705 list_del(&rbdc
->node
);
706 spin_unlock(&rbd_client_list_lock
);
708 ceph_destroy_client(rbdc
->client
);
713 * Drop reference to ceph client node. If it's not referenced anymore, release
716 static void rbd_put_client(struct rbd_client
*rbdc
)
719 kref_put(&rbdc
->kref
, rbd_client_release
);
722 static bool rbd_image_format_valid(u32 image_format
)
724 return image_format
== 1 || image_format
== 2;
727 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
732 /* The header has to start with the magic rbd header text */
733 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
736 /* The bio layer requires at least sector-sized I/O */
738 if (ondisk
->options
.order
< SECTOR_SHIFT
)
741 /* If we use u64 in a few spots we may be able to loosen this */
743 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
747 * The size of a snapshot header has to fit in a size_t, and
748 * that limits the number of snapshots.
750 snap_count
= le32_to_cpu(ondisk
->snap_count
);
751 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
752 if (snap_count
> size
/ sizeof (__le64
))
756 * Not only that, but the size of the entire the snapshot
757 * header must also be representable in a size_t.
759 size
-= snap_count
* sizeof (__le64
);
760 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
767 * Fill an rbd image header with information from the given format 1
770 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
771 struct rbd_image_header_ondisk
*ondisk
)
773 struct rbd_image_header
*header
= &rbd_dev
->header
;
774 bool first_time
= header
->object_prefix
== NULL
;
775 struct ceph_snap_context
*snapc
;
776 char *object_prefix
= NULL
;
777 char *snap_names
= NULL
;
778 u64
*snap_sizes
= NULL
;
784 /* Allocate this now to avoid having to handle failure below */
789 len
= strnlen(ondisk
->object_prefix
,
790 sizeof (ondisk
->object_prefix
));
791 object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
794 memcpy(object_prefix
, ondisk
->object_prefix
, len
);
795 object_prefix
[len
] = '\0';
798 /* Allocate the snapshot context and fill it in */
800 snap_count
= le32_to_cpu(ondisk
->snap_count
);
801 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
804 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
806 struct rbd_image_snap_ondisk
*snaps
;
807 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
809 /* We'll keep a copy of the snapshot names... */
811 if (snap_names_len
> (u64
)SIZE_MAX
)
813 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
817 /* ...as well as the array of their sizes. */
819 size
= snap_count
* sizeof (*header
->snap_sizes
);
820 snap_sizes
= kmalloc(size
, GFP_KERNEL
);
825 * Copy the names, and fill in each snapshot's id
828 * Note that rbd_dev_v1_header_info() guarantees the
829 * ondisk buffer we're working with has
830 * snap_names_len bytes beyond the end of the
831 * snapshot id array, this memcpy() is safe.
833 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
834 snaps
= ondisk
->snaps
;
835 for (i
= 0; i
< snap_count
; i
++) {
836 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
837 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
841 /* We won't fail any more, fill in the header */
843 down_write(&rbd_dev
->header_rwsem
);
845 header
->object_prefix
= object_prefix
;
846 header
->obj_order
= ondisk
->options
.order
;
847 header
->crypt_type
= ondisk
->options
.crypt_type
;
848 header
->comp_type
= ondisk
->options
.comp_type
;
849 /* The rest aren't used for format 1 images */
850 header
->stripe_unit
= 0;
851 header
->stripe_count
= 0;
852 header
->features
= 0;
854 ceph_put_snap_context(header
->snapc
);
855 kfree(header
->snap_names
);
856 kfree(header
->snap_sizes
);
859 /* The remaining fields always get updated (when we refresh) */
861 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
862 header
->snapc
= snapc
;
863 header
->snap_names
= snap_names
;
864 header
->snap_sizes
= snap_sizes
;
866 /* Make sure mapping size is consistent with header info */
868 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
|| first_time
)
869 if (rbd_dev
->mapping
.size
!= header
->image_size
)
870 rbd_dev
->mapping
.size
= header
->image_size
;
872 up_write(&rbd_dev
->header_rwsem
);
880 ceph_put_snap_context(snapc
);
881 kfree(object_prefix
);
886 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
888 const char *snap_name
;
890 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
892 /* Skip over names until we find the one we are looking for */
894 snap_name
= rbd_dev
->header
.snap_names
;
896 snap_name
+= strlen(snap_name
) + 1;
898 return kstrdup(snap_name
, GFP_KERNEL
);
902 * Snapshot id comparison function for use with qsort()/bsearch().
903 * Note that result is for snapshots in *descending* order.
905 static int snapid_compare_reverse(const void *s1
, const void *s2
)
907 u64 snap_id1
= *(u64
*)s1
;
908 u64 snap_id2
= *(u64
*)s2
;
910 if (snap_id1
< snap_id2
)
912 return snap_id1
== snap_id2
? 0 : -1;
916 * Search a snapshot context to see if the given snapshot id is
919 * Returns the position of the snapshot id in the array if it's found,
920 * or BAD_SNAP_INDEX otherwise.
922 * Note: The snapshot array is in kept sorted (by the osd) in
923 * reverse order, highest snapshot id first.
925 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
927 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
930 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
931 sizeof (snap_id
), snapid_compare_reverse
);
933 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
936 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
940 const char *snap_name
;
942 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
943 if (which
== BAD_SNAP_INDEX
)
944 return ERR_PTR(-ENOENT
);
946 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
947 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
950 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
952 if (snap_id
== CEPH_NOSNAP
)
953 return RBD_SNAP_HEAD_NAME
;
955 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
956 if (rbd_dev
->image_format
== 1)
957 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
959 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
962 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
965 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
966 if (snap_id
== CEPH_NOSNAP
) {
967 *snap_size
= rbd_dev
->header
.image_size
;
968 } else if (rbd_dev
->image_format
== 1) {
971 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
972 if (which
== BAD_SNAP_INDEX
)
975 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
980 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
989 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
992 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
993 if (snap_id
== CEPH_NOSNAP
) {
994 *snap_features
= rbd_dev
->header
.features
;
995 } else if (rbd_dev
->image_format
== 1) {
996 *snap_features
= 0; /* No features for format 1 */
1001 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1005 *snap_features
= features
;
1010 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1012 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1017 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1020 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1024 rbd_dev
->mapping
.size
= size
;
1025 rbd_dev
->mapping
.features
= features
;
1030 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1032 rbd_dev
->mapping
.size
= 0;
1033 rbd_dev
->mapping
.features
= 0;
1036 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
1043 name
= kmem_cache_alloc(rbd_segment_name_cache
, GFP_NOIO
);
1046 segment
= offset
>> rbd_dev
->header
.obj_order
;
1047 name_format
= "%s.%012llx";
1048 if (rbd_dev
->image_format
== 2)
1049 name_format
= "%s.%016llx";
1050 ret
= snprintf(name
, MAX_OBJ_NAME_SIZE
+ 1, name_format
,
1051 rbd_dev
->header
.object_prefix
, segment
);
1052 if (ret
< 0 || ret
> MAX_OBJ_NAME_SIZE
) {
1053 pr_err("error formatting segment name for #%llu (%d)\n",
1062 static void rbd_segment_name_free(const char *name
)
1064 /* The explicit cast here is needed to drop the const qualifier */
1066 kmem_cache_free(rbd_segment_name_cache
, (void *)name
);
1069 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
1071 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1073 return offset
& (segment_size
- 1);
1076 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
1077 u64 offset
, u64 length
)
1079 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1081 offset
&= segment_size
- 1;
1083 rbd_assert(length
<= U64_MAX
- offset
);
1084 if (offset
+ length
> segment_size
)
1085 length
= segment_size
- offset
;
1091 * returns the size of an object in the image
1093 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
1095 return 1 << header
->obj_order
;
1102 static void bio_chain_put(struct bio
*chain
)
1108 chain
= chain
->bi_next
;
1114 * zeros a bio chain, starting at specific offset
1116 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
1119 unsigned long flags
;
1125 bio_for_each_segment(bv
, chain
, i
) {
1126 if (pos
+ bv
->bv_len
> start_ofs
) {
1127 int remainder
= max(start_ofs
- pos
, 0);
1128 buf
= bvec_kmap_irq(bv
, &flags
);
1129 memset(buf
+ remainder
, 0,
1130 bv
->bv_len
- remainder
);
1131 flush_dcache_page(bv
->bv_page
);
1132 bvec_kunmap_irq(buf
, &flags
);
1137 chain
= chain
->bi_next
;
1142 * similar to zero_bio_chain(), zeros data defined by a page array,
1143 * starting at the given byte offset from the start of the array and
1144 * continuing up to the given end offset. The pages array is
1145 * assumed to be big enough to hold all bytes up to the end.
1147 static void zero_pages(struct page
**pages
, u64 offset
, u64 end
)
1149 struct page
**page
= &pages
[offset
>> PAGE_SHIFT
];
1151 rbd_assert(end
> offset
);
1152 rbd_assert(end
- offset
<= (u64
)SIZE_MAX
);
1153 while (offset
< end
) {
1156 unsigned long flags
;
1159 page_offset
= (size_t)(offset
& ~PAGE_MASK
);
1160 length
= min(PAGE_SIZE
- page_offset
, (size_t)(end
- offset
));
1161 local_irq_save(flags
);
1162 kaddr
= kmap_atomic(*page
);
1163 memset(kaddr
+ page_offset
, 0, length
);
1164 flush_dcache_page(*page
);
1165 kunmap_atomic(kaddr
);
1166 local_irq_restore(flags
);
1174 * Clone a portion of a bio, starting at the given byte offset
1175 * and continuing for the number of bytes indicated.
1177 static struct bio
*bio_clone_range(struct bio
*bio_src
,
1178 unsigned int offset
,
1186 unsigned short end_idx
;
1187 unsigned short vcnt
;
1190 /* Handle the easy case for the caller */
1192 if (!offset
&& len
== bio_src
->bi_size
)
1193 return bio_clone(bio_src
, gfpmask
);
1195 if (WARN_ON_ONCE(!len
))
1197 if (WARN_ON_ONCE(len
> bio_src
->bi_size
))
1199 if (WARN_ON_ONCE(offset
> bio_src
->bi_size
- len
))
1202 /* Find first affected segment... */
1205 bio_for_each_segment(bv
, bio_src
, idx
) {
1206 if (resid
< bv
->bv_len
)
1208 resid
-= bv
->bv_len
;
1212 /* ...and the last affected segment */
1215 __bio_for_each_segment(bv
, bio_src
, end_idx
, idx
) {
1216 if (resid
<= bv
->bv_len
)
1218 resid
-= bv
->bv_len
;
1220 vcnt
= end_idx
- idx
+ 1;
1222 /* Build the clone */
1224 bio
= bio_alloc(gfpmask
, (unsigned int) vcnt
);
1226 return NULL
; /* ENOMEM */
1228 bio
->bi_bdev
= bio_src
->bi_bdev
;
1229 bio
->bi_sector
= bio_src
->bi_sector
+ (offset
>> SECTOR_SHIFT
);
1230 bio
->bi_rw
= bio_src
->bi_rw
;
1231 bio
->bi_flags
|= 1 << BIO_CLONED
;
1234 * Copy over our part of the bio_vec, then update the first
1235 * and last (or only) entries.
1237 memcpy(&bio
->bi_io_vec
[0], &bio_src
->bi_io_vec
[idx
],
1238 vcnt
* sizeof (struct bio_vec
));
1239 bio
->bi_io_vec
[0].bv_offset
+= voff
;
1241 bio
->bi_io_vec
[0].bv_len
-= voff
;
1242 bio
->bi_io_vec
[vcnt
- 1].bv_len
= resid
;
1244 bio
->bi_io_vec
[0].bv_len
= len
;
1247 bio
->bi_vcnt
= vcnt
;
1255 * Clone a portion of a bio chain, starting at the given byte offset
1256 * into the first bio in the source chain and continuing for the
1257 * number of bytes indicated. The result is another bio chain of
1258 * exactly the given length, or a null pointer on error.
1260 * The bio_src and offset parameters are both in-out. On entry they
1261 * refer to the first source bio and the offset into that bio where
1262 * the start of data to be cloned is located.
1264 * On return, bio_src is updated to refer to the bio in the source
1265 * chain that contains first un-cloned byte, and *offset will
1266 * contain the offset of that byte within that bio.
1268 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1269 unsigned int *offset
,
1273 struct bio
*bi
= *bio_src
;
1274 unsigned int off
= *offset
;
1275 struct bio
*chain
= NULL
;
1278 /* Build up a chain of clone bios up to the limit */
1280 if (!bi
|| off
>= bi
->bi_size
|| !len
)
1281 return NULL
; /* Nothing to clone */
1285 unsigned int bi_size
;
1289 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1290 goto out_err
; /* EINVAL; ran out of bio's */
1292 bi_size
= min_t(unsigned int, bi
->bi_size
- off
, len
);
1293 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1295 goto out_err
; /* ENOMEM */
1298 end
= &bio
->bi_next
;
1301 if (off
== bi
->bi_size
) {
1312 bio_chain_put(chain
);
1318 * The default/initial value for all object request flags is 0. For
1319 * each flag, once its value is set to 1 it is never reset to 0
1322 static void obj_request_img_data_set(struct rbd_obj_request
*obj_request
)
1324 if (test_and_set_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
)) {
1325 struct rbd_device
*rbd_dev
;
1327 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1328 rbd_warn(rbd_dev
, "obj_request %p already marked img_data\n",
1333 static bool obj_request_img_data_test(struct rbd_obj_request
*obj_request
)
1336 return test_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
) != 0;
1339 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1341 if (test_and_set_bit(OBJ_REQ_DONE
, &obj_request
->flags
)) {
1342 struct rbd_device
*rbd_dev
= NULL
;
1344 if (obj_request_img_data_test(obj_request
))
1345 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1346 rbd_warn(rbd_dev
, "obj_request %p already marked done\n",
1351 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1354 return test_bit(OBJ_REQ_DONE
, &obj_request
->flags
) != 0;
1358 * This sets the KNOWN flag after (possibly) setting the EXISTS
1359 * flag. The latter is set based on the "exists" value provided.
1361 * Note that for our purposes once an object exists it never goes
1362 * away again. It's possible that the response from two existence
1363 * checks are separated by the creation of the target object, and
1364 * the first ("doesn't exist") response arrives *after* the second
1365 * ("does exist"). In that case we ignore the second one.
1367 static void obj_request_existence_set(struct rbd_obj_request
*obj_request
,
1371 set_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
);
1372 set_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
);
1376 static bool obj_request_known_test(struct rbd_obj_request
*obj_request
)
1379 return test_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
) != 0;
1382 static bool obj_request_exists_test(struct rbd_obj_request
*obj_request
)
1385 return test_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
) != 0;
1388 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1390 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1391 atomic_read(&obj_request
->kref
.refcount
));
1392 kref_get(&obj_request
->kref
);
1395 static void rbd_obj_request_destroy(struct kref
*kref
);
1396 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1398 rbd_assert(obj_request
!= NULL
);
1399 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1400 atomic_read(&obj_request
->kref
.refcount
));
1401 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1404 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1406 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1407 atomic_read(&img_request
->kref
.refcount
));
1408 kref_get(&img_request
->kref
);
1411 static bool img_request_child_test(struct rbd_img_request
*img_request
);
1412 static void rbd_parent_request_destroy(struct kref
*kref
);
1413 static void rbd_img_request_destroy(struct kref
*kref
);
1414 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1416 rbd_assert(img_request
!= NULL
);
1417 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1418 atomic_read(&img_request
->kref
.refcount
));
1419 if (img_request_child_test(img_request
))
1420 kref_put(&img_request
->kref
, rbd_parent_request_destroy
);
1422 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1425 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1426 struct rbd_obj_request
*obj_request
)
1428 rbd_assert(obj_request
->img_request
== NULL
);
1430 /* Image request now owns object's original reference */
1431 obj_request
->img_request
= img_request
;
1432 obj_request
->which
= img_request
->obj_request_count
;
1433 rbd_assert(!obj_request_img_data_test(obj_request
));
1434 obj_request_img_data_set(obj_request
);
1435 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1436 img_request
->obj_request_count
++;
1437 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1438 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1439 obj_request
->which
);
1442 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1443 struct rbd_obj_request
*obj_request
)
1445 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1447 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1448 obj_request
->which
);
1449 list_del(&obj_request
->links
);
1450 rbd_assert(img_request
->obj_request_count
> 0);
1451 img_request
->obj_request_count
--;
1452 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1453 obj_request
->which
= BAD_WHICH
;
1454 rbd_assert(obj_request_img_data_test(obj_request
));
1455 rbd_assert(obj_request
->img_request
== img_request
);
1456 obj_request
->img_request
= NULL
;
1457 obj_request
->callback
= NULL
;
1458 rbd_obj_request_put(obj_request
);
1461 static bool obj_request_type_valid(enum obj_request_type type
)
1464 case OBJ_REQUEST_NODATA
:
1465 case OBJ_REQUEST_BIO
:
1466 case OBJ_REQUEST_PAGES
:
1473 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1474 struct rbd_obj_request
*obj_request
)
1476 dout("%s: osdc %p obj %p\n", __func__
, osdc
, obj_request
);
1478 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1481 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1484 dout("%s: img %p\n", __func__
, img_request
);
1487 * If no error occurred, compute the aggregate transfer
1488 * count for the image request. We could instead use
1489 * atomic64_cmpxchg() to update it as each object request
1490 * completes; not clear which way is better off hand.
1492 if (!img_request
->result
) {
1493 struct rbd_obj_request
*obj_request
;
1496 for_each_obj_request(img_request
, obj_request
)
1497 xferred
+= obj_request
->xferred
;
1498 img_request
->xferred
= xferred
;
1501 if (img_request
->callback
)
1502 img_request
->callback(img_request
);
1504 rbd_img_request_put(img_request
);
1507 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1509 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1511 dout("%s: obj %p\n", __func__
, obj_request
);
1513 return wait_for_completion_interruptible(&obj_request
->completion
);
1517 * The default/initial value for all image request flags is 0. Each
1518 * is conditionally set to 1 at image request initialization time
1519 * and currently never change thereafter.
1521 static void img_request_write_set(struct rbd_img_request
*img_request
)
1523 set_bit(IMG_REQ_WRITE
, &img_request
->flags
);
1527 static bool img_request_write_test(struct rbd_img_request
*img_request
)
1530 return test_bit(IMG_REQ_WRITE
, &img_request
->flags
) != 0;
1533 static void img_request_child_set(struct rbd_img_request
*img_request
)
1535 set_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1539 static void img_request_child_clear(struct rbd_img_request
*img_request
)
1541 clear_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1545 static bool img_request_child_test(struct rbd_img_request
*img_request
)
1548 return test_bit(IMG_REQ_CHILD
, &img_request
->flags
) != 0;
1551 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1553 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1557 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1559 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1563 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1566 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1570 rbd_img_obj_request_read_callback(struct rbd_obj_request
*obj_request
)
1572 u64 xferred
= obj_request
->xferred
;
1573 u64 length
= obj_request
->length
;
1575 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1576 obj_request
, obj_request
->img_request
, obj_request
->result
,
1579 * ENOENT means a hole in the image. We zero-fill the entire
1580 * length of the request. A short read also implies zero-fill
1581 * to the end of the request. An error requires the whole
1582 * length of the request to be reported finished with an error
1583 * to the block layer. In each case we update the xferred
1584 * count to indicate the whole request was satisfied.
1586 rbd_assert(obj_request
->type
!= OBJ_REQUEST_NODATA
);
1587 if (obj_request
->result
== -ENOENT
) {
1588 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1589 zero_bio_chain(obj_request
->bio_list
, 0);
1591 zero_pages(obj_request
->pages
, 0, length
);
1592 obj_request
->result
= 0;
1593 } else if (xferred
< length
&& !obj_request
->result
) {
1594 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1595 zero_bio_chain(obj_request
->bio_list
, xferred
);
1597 zero_pages(obj_request
->pages
, xferred
, length
);
1599 obj_request
->xferred
= length
;
1600 obj_request_done_set(obj_request
);
1603 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1605 dout("%s: obj %p cb %p\n", __func__
, obj_request
,
1606 obj_request
->callback
);
1607 if (obj_request
->callback
)
1608 obj_request
->callback(obj_request
);
1610 complete_all(&obj_request
->completion
);
1613 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
)
1615 dout("%s: obj %p\n", __func__
, obj_request
);
1616 obj_request_done_set(obj_request
);
1619 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
)
1621 struct rbd_img_request
*img_request
= NULL
;
1622 struct rbd_device
*rbd_dev
= NULL
;
1623 bool layered
= false;
1625 if (obj_request_img_data_test(obj_request
)) {
1626 img_request
= obj_request
->img_request
;
1627 layered
= img_request
&& img_request_layered_test(img_request
);
1628 rbd_dev
= img_request
->rbd_dev
;
1631 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1632 obj_request
, img_request
, obj_request
->result
,
1633 obj_request
->xferred
, obj_request
->length
);
1634 if (layered
&& obj_request
->result
== -ENOENT
&&
1635 obj_request
->img_offset
< rbd_dev
->parent_overlap
)
1636 rbd_img_parent_read(obj_request
);
1637 else if (img_request
)
1638 rbd_img_obj_request_read_callback(obj_request
);
1640 obj_request_done_set(obj_request
);
1643 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
)
1645 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1646 obj_request
->result
, obj_request
->length
);
1648 * There is no such thing as a successful short write. Set
1649 * it to our originally-requested length.
1651 obj_request
->xferred
= obj_request
->length
;
1652 obj_request_done_set(obj_request
);
1656 * For a simple stat call there's nothing to do. We'll do more if
1657 * this is part of a write sequence for a layered image.
1659 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
)
1661 dout("%s: obj %p\n", __func__
, obj_request
);
1662 obj_request_done_set(obj_request
);
1665 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1666 struct ceph_msg
*msg
)
1668 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1671 dout("%s: osd_req %p msg %p\n", __func__
, osd_req
, msg
);
1672 rbd_assert(osd_req
== obj_request
->osd_req
);
1673 if (obj_request_img_data_test(obj_request
)) {
1674 rbd_assert(obj_request
->img_request
);
1675 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1677 rbd_assert(obj_request
->which
== BAD_WHICH
);
1680 if (osd_req
->r_result
< 0)
1681 obj_request
->result
= osd_req
->r_result
;
1683 BUG_ON(osd_req
->r_num_ops
> 2);
1686 * We support a 64-bit length, but ultimately it has to be
1687 * passed to blk_end_request(), which takes an unsigned int.
1689 obj_request
->xferred
= osd_req
->r_reply_op_len
[0];
1690 rbd_assert(obj_request
->xferred
< (u64
)UINT_MAX
);
1691 opcode
= osd_req
->r_ops
[0].op
;
1693 case CEPH_OSD_OP_READ
:
1694 rbd_osd_read_callback(obj_request
);
1696 case CEPH_OSD_OP_WRITE
:
1697 rbd_osd_write_callback(obj_request
);
1699 case CEPH_OSD_OP_STAT
:
1700 rbd_osd_stat_callback(obj_request
);
1702 case CEPH_OSD_OP_CALL
:
1703 case CEPH_OSD_OP_NOTIFY_ACK
:
1704 case CEPH_OSD_OP_WATCH
:
1705 rbd_osd_trivial_callback(obj_request
);
1708 rbd_warn(NULL
, "%s: unsupported op %hu\n",
1709 obj_request
->object_name
, (unsigned short) opcode
);
1713 if (obj_request_done_test(obj_request
))
1714 rbd_obj_request_complete(obj_request
);
1717 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1719 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1720 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1723 rbd_assert(osd_req
!= NULL
);
1725 snap_id
= img_request
? img_request
->snap_id
: CEPH_NOSNAP
;
1726 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1727 NULL
, snap_id
, NULL
);
1730 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1732 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1733 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1734 struct ceph_snap_context
*snapc
;
1735 struct timespec mtime
= CURRENT_TIME
;
1737 rbd_assert(osd_req
!= NULL
);
1739 snapc
= img_request
? img_request
->snapc
: NULL
;
1740 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1741 snapc
, CEPH_NOSNAP
, &mtime
);
1744 static struct ceph_osd_request
*rbd_osd_req_create(
1745 struct rbd_device
*rbd_dev
,
1747 struct rbd_obj_request
*obj_request
)
1749 struct ceph_snap_context
*snapc
= NULL
;
1750 struct ceph_osd_client
*osdc
;
1751 struct ceph_osd_request
*osd_req
;
1753 if (obj_request_img_data_test(obj_request
)) {
1754 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1756 rbd_assert(write_request
==
1757 img_request_write_test(img_request
));
1759 snapc
= img_request
->snapc
;
1762 /* Allocate and initialize the request, for the single op */
1764 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1765 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 1, false, GFP_ATOMIC
);
1767 return NULL
; /* ENOMEM */
1770 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1772 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1774 osd_req
->r_callback
= rbd_osd_req_callback
;
1775 osd_req
->r_priv
= obj_request
;
1777 osd_req
->r_oid_len
= strlen(obj_request
->object_name
);
1778 rbd_assert(osd_req
->r_oid_len
< sizeof (osd_req
->r_oid
));
1779 memcpy(osd_req
->r_oid
, obj_request
->object_name
, osd_req
->r_oid_len
);
1781 osd_req
->r_file_layout
= rbd_dev
->layout
; /* struct */
1787 * Create a copyup osd request based on the information in the
1788 * object request supplied. A copyup request has two osd ops,
1789 * a copyup method call, and a "normal" write request.
1791 static struct ceph_osd_request
*
1792 rbd_osd_req_create_copyup(struct rbd_obj_request
*obj_request
)
1794 struct rbd_img_request
*img_request
;
1795 struct ceph_snap_context
*snapc
;
1796 struct rbd_device
*rbd_dev
;
1797 struct ceph_osd_client
*osdc
;
1798 struct ceph_osd_request
*osd_req
;
1800 rbd_assert(obj_request_img_data_test(obj_request
));
1801 img_request
= obj_request
->img_request
;
1802 rbd_assert(img_request
);
1803 rbd_assert(img_request_write_test(img_request
));
1805 /* Allocate and initialize the request, for the two ops */
1807 snapc
= img_request
->snapc
;
1808 rbd_dev
= img_request
->rbd_dev
;
1809 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1810 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 2, false, GFP_ATOMIC
);
1812 return NULL
; /* ENOMEM */
1814 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1815 osd_req
->r_callback
= rbd_osd_req_callback
;
1816 osd_req
->r_priv
= obj_request
;
1818 osd_req
->r_oid_len
= strlen(obj_request
->object_name
);
1819 rbd_assert(osd_req
->r_oid_len
< sizeof (osd_req
->r_oid
));
1820 memcpy(osd_req
->r_oid
, obj_request
->object_name
, osd_req
->r_oid_len
);
1822 osd_req
->r_file_layout
= rbd_dev
->layout
; /* struct */
1828 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1830 ceph_osdc_put_request(osd_req
);
1833 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1835 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
1836 u64 offset
, u64 length
,
1837 enum obj_request_type type
)
1839 struct rbd_obj_request
*obj_request
;
1843 rbd_assert(obj_request_type_valid(type
));
1845 size
= strlen(object_name
) + 1;
1846 name
= kmalloc(size
, GFP_KERNEL
);
1850 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_KERNEL
);
1856 obj_request
->object_name
= memcpy(name
, object_name
, size
);
1857 obj_request
->offset
= offset
;
1858 obj_request
->length
= length
;
1859 obj_request
->flags
= 0;
1860 obj_request
->which
= BAD_WHICH
;
1861 obj_request
->type
= type
;
1862 INIT_LIST_HEAD(&obj_request
->links
);
1863 init_completion(&obj_request
->completion
);
1864 kref_init(&obj_request
->kref
);
1866 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__
, object_name
,
1867 offset
, length
, (int)type
, obj_request
);
1872 static void rbd_obj_request_destroy(struct kref
*kref
)
1874 struct rbd_obj_request
*obj_request
;
1876 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1878 dout("%s: obj %p\n", __func__
, obj_request
);
1880 rbd_assert(obj_request
->img_request
== NULL
);
1881 rbd_assert(obj_request
->which
== BAD_WHICH
);
1883 if (obj_request
->osd_req
)
1884 rbd_osd_req_destroy(obj_request
->osd_req
);
1886 rbd_assert(obj_request_type_valid(obj_request
->type
));
1887 switch (obj_request
->type
) {
1888 case OBJ_REQUEST_NODATA
:
1889 break; /* Nothing to do */
1890 case OBJ_REQUEST_BIO
:
1891 if (obj_request
->bio_list
)
1892 bio_chain_put(obj_request
->bio_list
);
1894 case OBJ_REQUEST_PAGES
:
1895 if (obj_request
->pages
)
1896 ceph_release_page_vector(obj_request
->pages
,
1897 obj_request
->page_count
);
1901 kfree(obj_request
->object_name
);
1902 obj_request
->object_name
= NULL
;
1903 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1906 /* It's OK to call this for a device with no parent */
1908 static void rbd_spec_put(struct rbd_spec
*spec
);
1909 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1911 rbd_dev_remove_parent(rbd_dev
);
1912 rbd_spec_put(rbd_dev
->parent_spec
);
1913 rbd_dev
->parent_spec
= NULL
;
1914 rbd_dev
->parent_overlap
= 0;
1918 * Parent image reference counting is used to determine when an
1919 * image's parent fields can be safely torn down--after there are no
1920 * more in-flight requests to the parent image. When the last
1921 * reference is dropped, cleaning them up is safe.
1923 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1927 if (!rbd_dev
->parent_spec
)
1930 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1934 /* Last reference; clean up parent data structures */
1937 rbd_dev_unparent(rbd_dev
);
1939 rbd_warn(rbd_dev
, "parent reference underflow\n");
1943 * If an image has a non-zero parent overlap, get a reference to its
1946 * We must get the reference before checking for the overlap to
1947 * coordinate properly with zeroing the parent overlap in
1948 * rbd_dev_v2_parent_info() when an image gets flattened. We
1949 * drop it again if there is no overlap.
1951 * Returns true if the rbd device has a parent with a non-zero
1952 * overlap and a reference for it was successfully taken, or
1955 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1959 if (!rbd_dev
->parent_spec
)
1962 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1963 if (counter
> 0 && rbd_dev
->parent_overlap
)
1966 /* Image was flattened, but parent is not yet torn down */
1969 rbd_warn(rbd_dev
, "parent reference overflow\n");
1975 * Caller is responsible for filling in the list of object requests
1976 * that comprises the image request, and the Linux request pointer
1977 * (if there is one).
1979 static struct rbd_img_request
*rbd_img_request_create(
1980 struct rbd_device
*rbd_dev
,
1981 u64 offset
, u64 length
,
1984 struct rbd_img_request
*img_request
;
1986 img_request
= kmem_cache_alloc(rbd_img_request_cache
, GFP_ATOMIC
);
1990 if (write_request
) {
1991 down_read(&rbd_dev
->header_rwsem
);
1992 ceph_get_snap_context(rbd_dev
->header
.snapc
);
1993 up_read(&rbd_dev
->header_rwsem
);
1996 img_request
->rq
= NULL
;
1997 img_request
->rbd_dev
= rbd_dev
;
1998 img_request
->offset
= offset
;
1999 img_request
->length
= length
;
2000 img_request
->flags
= 0;
2001 if (write_request
) {
2002 img_request_write_set(img_request
);
2003 img_request
->snapc
= rbd_dev
->header
.snapc
;
2005 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
2007 if (rbd_dev_parent_get(rbd_dev
))
2008 img_request_layered_set(img_request
);
2009 spin_lock_init(&img_request
->completion_lock
);
2010 img_request
->next_completion
= 0;
2011 img_request
->callback
= NULL
;
2012 img_request
->result
= 0;
2013 img_request
->obj_request_count
= 0;
2014 INIT_LIST_HEAD(&img_request
->obj_requests
);
2015 kref_init(&img_request
->kref
);
2017 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__
, rbd_dev
,
2018 write_request
? "write" : "read", offset
, length
,
2024 static void rbd_img_request_destroy(struct kref
*kref
)
2026 struct rbd_img_request
*img_request
;
2027 struct rbd_obj_request
*obj_request
;
2028 struct rbd_obj_request
*next_obj_request
;
2030 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
2032 dout("%s: img %p\n", __func__
, img_request
);
2034 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2035 rbd_img_obj_request_del(img_request
, obj_request
);
2036 rbd_assert(img_request
->obj_request_count
== 0);
2038 if (img_request_layered_test(img_request
)) {
2039 img_request_layered_clear(img_request
);
2040 rbd_dev_parent_put(img_request
->rbd_dev
);
2043 if (img_request_write_test(img_request
))
2044 ceph_put_snap_context(img_request
->snapc
);
2046 kmem_cache_free(rbd_img_request_cache
, img_request
);
2049 static struct rbd_img_request
*rbd_parent_request_create(
2050 struct rbd_obj_request
*obj_request
,
2051 u64 img_offset
, u64 length
)
2053 struct rbd_img_request
*parent_request
;
2054 struct rbd_device
*rbd_dev
;
2056 rbd_assert(obj_request
->img_request
);
2057 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2059 parent_request
= rbd_img_request_create(rbd_dev
->parent
,
2060 img_offset
, length
, false);
2061 if (!parent_request
)
2064 img_request_child_set(parent_request
);
2065 rbd_obj_request_get(obj_request
);
2066 parent_request
->obj_request
= obj_request
;
2068 return parent_request
;
2071 static void rbd_parent_request_destroy(struct kref
*kref
)
2073 struct rbd_img_request
*parent_request
;
2074 struct rbd_obj_request
*orig_request
;
2076 parent_request
= container_of(kref
, struct rbd_img_request
, kref
);
2077 orig_request
= parent_request
->obj_request
;
2079 parent_request
->obj_request
= NULL
;
2080 rbd_obj_request_put(orig_request
);
2081 img_request_child_clear(parent_request
);
2083 rbd_img_request_destroy(kref
);
2086 static bool rbd_img_obj_end_request(struct rbd_obj_request
*obj_request
)
2088 struct rbd_img_request
*img_request
;
2089 unsigned int xferred
;
2093 rbd_assert(obj_request_img_data_test(obj_request
));
2094 img_request
= obj_request
->img_request
;
2096 rbd_assert(obj_request
->xferred
<= (u64
)UINT_MAX
);
2097 xferred
= (unsigned int)obj_request
->xferred
;
2098 result
= obj_request
->result
;
2100 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2102 rbd_warn(rbd_dev
, "%s %llx at %llx (%llx)\n",
2103 img_request_write_test(img_request
) ? "write" : "read",
2104 obj_request
->length
, obj_request
->img_offset
,
2105 obj_request
->offset
);
2106 rbd_warn(rbd_dev
, " result %d xferred %x\n",
2108 if (!img_request
->result
)
2109 img_request
->result
= result
;
2112 /* Image object requests don't own their page array */
2114 if (obj_request
->type
== OBJ_REQUEST_PAGES
) {
2115 obj_request
->pages
= NULL
;
2116 obj_request
->page_count
= 0;
2119 if (img_request_child_test(img_request
)) {
2120 rbd_assert(img_request
->obj_request
!= NULL
);
2121 more
= obj_request
->which
< img_request
->obj_request_count
- 1;
2123 rbd_assert(img_request
->rq
!= NULL
);
2124 more
= blk_end_request(img_request
->rq
, result
, xferred
);
2130 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
2132 struct rbd_img_request
*img_request
;
2133 u32 which
= obj_request
->which
;
2136 rbd_assert(obj_request_img_data_test(obj_request
));
2137 img_request
= obj_request
->img_request
;
2139 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
2140 rbd_assert(img_request
!= NULL
);
2141 rbd_assert(img_request
->obj_request_count
> 0);
2142 rbd_assert(which
!= BAD_WHICH
);
2143 rbd_assert(which
< img_request
->obj_request_count
);
2144 rbd_assert(which
>= img_request
->next_completion
);
2146 spin_lock_irq(&img_request
->completion_lock
);
2147 if (which
!= img_request
->next_completion
)
2150 for_each_obj_request_from(img_request
, obj_request
) {
2152 rbd_assert(which
< img_request
->obj_request_count
);
2154 if (!obj_request_done_test(obj_request
))
2156 more
= rbd_img_obj_end_request(obj_request
);
2160 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
2161 img_request
->next_completion
= which
;
2163 spin_unlock_irq(&img_request
->completion_lock
);
2164 rbd_img_request_put(img_request
);
2167 rbd_img_request_complete(img_request
);
2171 * Split up an image request into one or more object requests, each
2172 * to a different object. The "type" parameter indicates whether
2173 * "data_desc" is the pointer to the head of a list of bio
2174 * structures, or the base of a page array. In either case this
2175 * function assumes data_desc describes memory sufficient to hold
2176 * all data described by the image request.
2178 static int rbd_img_request_fill(struct rbd_img_request
*img_request
,
2179 enum obj_request_type type
,
2182 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2183 struct rbd_obj_request
*obj_request
= NULL
;
2184 struct rbd_obj_request
*next_obj_request
;
2185 bool write_request
= img_request_write_test(img_request
);
2186 struct bio
*bio_list
= 0;
2187 unsigned int bio_offset
= 0;
2188 struct page
**pages
= 0;
2193 dout("%s: img %p type %d data_desc %p\n", __func__
, img_request
,
2194 (int)type
, data_desc
);
2196 opcode
= write_request
? CEPH_OSD_OP_WRITE
: CEPH_OSD_OP_READ
;
2197 img_offset
= img_request
->offset
;
2198 resid
= img_request
->length
;
2199 rbd_assert(resid
> 0);
2201 if (type
== OBJ_REQUEST_BIO
) {
2202 bio_list
= data_desc
;
2203 rbd_assert(img_offset
== bio_list
->bi_sector
<< SECTOR_SHIFT
);
2205 rbd_assert(type
== OBJ_REQUEST_PAGES
);
2210 struct ceph_osd_request
*osd_req
;
2211 const char *object_name
;
2215 object_name
= rbd_segment_name(rbd_dev
, img_offset
);
2218 offset
= rbd_segment_offset(rbd_dev
, img_offset
);
2219 length
= rbd_segment_length(rbd_dev
, img_offset
, resid
);
2220 obj_request
= rbd_obj_request_create(object_name
,
2221 offset
, length
, type
);
2222 /* object request has its own copy of the object name */
2223 rbd_segment_name_free(object_name
);
2227 * set obj_request->img_request before creating the
2228 * osd_request so that it gets the right snapc
2230 rbd_img_obj_request_add(img_request
, obj_request
);
2232 if (type
== OBJ_REQUEST_BIO
) {
2233 unsigned int clone_size
;
2235 rbd_assert(length
<= (u64
)UINT_MAX
);
2236 clone_size
= (unsigned int)length
;
2237 obj_request
->bio_list
=
2238 bio_chain_clone_range(&bio_list
,
2242 if (!obj_request
->bio_list
)
2245 unsigned int page_count
;
2247 obj_request
->pages
= pages
;
2248 page_count
= (u32
)calc_pages_for(offset
, length
);
2249 obj_request
->page_count
= page_count
;
2250 if ((offset
+ length
) & ~PAGE_MASK
)
2251 page_count
--; /* more on last page */
2252 pages
+= page_count
;
2255 osd_req
= rbd_osd_req_create(rbd_dev
, write_request
,
2259 obj_request
->osd_req
= osd_req
;
2260 obj_request
->callback
= rbd_img_obj_callback
;
2261 rbd_img_request_get(img_request
);
2263 osd_req_op_extent_init(osd_req
, 0, opcode
, offset
, length
,
2265 if (type
== OBJ_REQUEST_BIO
)
2266 osd_req_op_extent_osd_data_bio(osd_req
, 0,
2267 obj_request
->bio_list
, length
);
2269 osd_req_op_extent_osd_data_pages(osd_req
, 0,
2270 obj_request
->pages
, length
,
2271 offset
& ~PAGE_MASK
, false, false);
2274 rbd_osd_req_format_write(obj_request
);
2276 rbd_osd_req_format_read(obj_request
);
2278 obj_request
->img_offset
= img_offset
;
2280 img_offset
+= length
;
2287 rbd_obj_request_put(obj_request
);
2289 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2290 rbd_img_obj_request_del(img_request
, obj_request
);
2296 rbd_img_obj_copyup_callback(struct rbd_obj_request
*obj_request
)
2298 struct rbd_img_request
*img_request
;
2299 struct rbd_device
*rbd_dev
;
2300 struct page
**pages
;
2303 rbd_assert(obj_request
->type
== OBJ_REQUEST_BIO
);
2304 rbd_assert(obj_request_img_data_test(obj_request
));
2305 img_request
= obj_request
->img_request
;
2306 rbd_assert(img_request
);
2308 rbd_dev
= img_request
->rbd_dev
;
2309 rbd_assert(rbd_dev
);
2311 pages
= obj_request
->copyup_pages
;
2312 rbd_assert(pages
!= NULL
);
2313 obj_request
->copyup_pages
= NULL
;
2314 page_count
= obj_request
->copyup_page_count
;
2315 rbd_assert(page_count
);
2316 obj_request
->copyup_page_count
= 0;
2317 ceph_release_page_vector(pages
, page_count
);
2320 * We want the transfer count to reflect the size of the
2321 * original write request. There is no such thing as a
2322 * successful short write, so if the request was successful
2323 * we can just set it to the originally-requested length.
2325 if (!obj_request
->result
)
2326 obj_request
->xferred
= obj_request
->length
;
2328 /* Finish up with the normal image object callback */
2330 rbd_img_obj_callback(obj_request
);
2334 rbd_img_obj_parent_read_full_callback(struct rbd_img_request
*img_request
)
2336 struct rbd_obj_request
*orig_request
;
2337 struct ceph_osd_request
*osd_req
;
2338 struct ceph_osd_client
*osdc
;
2339 struct rbd_device
*rbd_dev
;
2340 struct page
**pages
;
2347 rbd_assert(img_request_child_test(img_request
));
2349 /* First get what we need from the image request */
2351 pages
= img_request
->copyup_pages
;
2352 rbd_assert(pages
!= NULL
);
2353 img_request
->copyup_pages
= NULL
;
2354 page_count
= img_request
->copyup_page_count
;
2355 rbd_assert(page_count
);
2356 img_request
->copyup_page_count
= 0;
2358 orig_request
= img_request
->obj_request
;
2359 rbd_assert(orig_request
!= NULL
);
2360 rbd_assert(obj_request_type_valid(orig_request
->type
));
2361 img_result
= img_request
->result
;
2362 parent_length
= img_request
->length
;
2363 rbd_assert(parent_length
== img_request
->xferred
);
2364 rbd_img_request_put(img_request
);
2366 rbd_assert(orig_request
->img_request
);
2367 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2368 rbd_assert(rbd_dev
);
2371 * If the overlap has become 0 (most likely because the
2372 * image has been flattened) we need to free the pages
2373 * and re-submit the original write request.
2375 if (!rbd_dev
->parent_overlap
) {
2376 struct ceph_osd_client
*osdc
;
2378 ceph_release_page_vector(pages
, page_count
);
2379 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2380 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2389 * The original osd request is of no use to use any more.
2390 * We need a new one that can hold the two ops in a copyup
2391 * request. Allocate the new copyup osd request for the
2392 * original request, and release the old one.
2394 img_result
= -ENOMEM
;
2395 osd_req
= rbd_osd_req_create_copyup(orig_request
);
2398 rbd_osd_req_destroy(orig_request
->osd_req
);
2399 orig_request
->osd_req
= osd_req
;
2400 orig_request
->copyup_pages
= pages
;
2401 orig_request
->copyup_page_count
= page_count
;
2403 /* Initialize the copyup op */
2405 osd_req_op_cls_init(osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd", "copyup");
2406 osd_req_op_cls_request_data_pages(osd_req
, 0, pages
, parent_length
, 0,
2409 /* Then the original write request op */
2411 offset
= orig_request
->offset
;
2412 length
= orig_request
->length
;
2413 osd_req_op_extent_init(osd_req
, 1, CEPH_OSD_OP_WRITE
,
2414 offset
, length
, 0, 0);
2415 if (orig_request
->type
== OBJ_REQUEST_BIO
)
2416 osd_req_op_extent_osd_data_bio(osd_req
, 1,
2417 orig_request
->bio_list
, length
);
2419 osd_req_op_extent_osd_data_pages(osd_req
, 1,
2420 orig_request
->pages
, length
,
2421 offset
& ~PAGE_MASK
, false, false);
2423 rbd_osd_req_format_write(orig_request
);
2425 /* All set, send it off. */
2427 orig_request
->callback
= rbd_img_obj_copyup_callback
;
2428 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2429 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2433 /* Record the error code and complete the request */
2435 orig_request
->result
= img_result
;
2436 orig_request
->xferred
= 0;
2437 obj_request_done_set(orig_request
);
2438 rbd_obj_request_complete(orig_request
);
2442 * Read from the parent image the range of data that covers the
2443 * entire target of the given object request. This is used for
2444 * satisfying a layered image write request when the target of an
2445 * object request from the image request does not exist.
2447 * A page array big enough to hold the returned data is allocated
2448 * and supplied to rbd_img_request_fill() as the "data descriptor."
2449 * When the read completes, this page array will be transferred to
2450 * the original object request for the copyup operation.
2452 * If an error occurs, record it as the result of the original
2453 * object request and mark it done so it gets completed.
2455 static int rbd_img_obj_parent_read_full(struct rbd_obj_request
*obj_request
)
2457 struct rbd_img_request
*img_request
= NULL
;
2458 struct rbd_img_request
*parent_request
= NULL
;
2459 struct rbd_device
*rbd_dev
;
2462 struct page
**pages
= NULL
;
2466 rbd_assert(obj_request_img_data_test(obj_request
));
2467 rbd_assert(obj_request_type_valid(obj_request
->type
));
2469 img_request
= obj_request
->img_request
;
2470 rbd_assert(img_request
!= NULL
);
2471 rbd_dev
= img_request
->rbd_dev
;
2472 rbd_assert(rbd_dev
->parent
!= NULL
);
2475 * Determine the byte range covered by the object in the
2476 * child image to which the original request was to be sent.
2478 img_offset
= obj_request
->img_offset
- obj_request
->offset
;
2479 length
= (u64
)1 << rbd_dev
->header
.obj_order
;
2482 * There is no defined parent data beyond the parent
2483 * overlap, so limit what we read at that boundary if
2486 if (img_offset
+ length
> rbd_dev
->parent_overlap
) {
2487 rbd_assert(img_offset
< rbd_dev
->parent_overlap
);
2488 length
= rbd_dev
->parent_overlap
- img_offset
;
2492 * Allocate a page array big enough to receive the data read
2495 page_count
= (u32
)calc_pages_for(0, length
);
2496 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2497 if (IS_ERR(pages
)) {
2498 result
= PTR_ERR(pages
);
2504 parent_request
= rbd_parent_request_create(obj_request
,
2505 img_offset
, length
);
2506 if (!parent_request
)
2509 result
= rbd_img_request_fill(parent_request
, OBJ_REQUEST_PAGES
, pages
);
2512 parent_request
->copyup_pages
= pages
;
2513 parent_request
->copyup_page_count
= page_count
;
2515 parent_request
->callback
= rbd_img_obj_parent_read_full_callback
;
2516 result
= rbd_img_request_submit(parent_request
);
2520 parent_request
->copyup_pages
= NULL
;
2521 parent_request
->copyup_page_count
= 0;
2522 parent_request
->obj_request
= NULL
;
2523 rbd_obj_request_put(obj_request
);
2526 ceph_release_page_vector(pages
, page_count
);
2528 rbd_img_request_put(parent_request
);
2529 obj_request
->result
= result
;
2530 obj_request
->xferred
= 0;
2531 obj_request_done_set(obj_request
);
2536 static void rbd_img_obj_exists_callback(struct rbd_obj_request
*obj_request
)
2538 struct rbd_obj_request
*orig_request
;
2539 struct rbd_device
*rbd_dev
;
2542 rbd_assert(!obj_request_img_data_test(obj_request
));
2545 * All we need from the object request is the original
2546 * request and the result of the STAT op. Grab those, then
2547 * we're done with the request.
2549 orig_request
= obj_request
->obj_request
;
2550 obj_request
->obj_request
= NULL
;
2551 rbd_assert(orig_request
);
2552 rbd_assert(orig_request
->img_request
);
2554 result
= obj_request
->result
;
2555 obj_request
->result
= 0;
2557 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__
,
2558 obj_request
, orig_request
, result
,
2559 obj_request
->xferred
, obj_request
->length
);
2560 rbd_obj_request_put(obj_request
);
2563 * If the overlap has become 0 (most likely because the
2564 * image has been flattened) we need to free the pages
2565 * and re-submit the original write request.
2567 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2568 if (!rbd_dev
->parent_overlap
) {
2569 struct ceph_osd_client
*osdc
;
2571 rbd_obj_request_put(orig_request
);
2572 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2573 result
= rbd_obj_request_submit(osdc
, orig_request
);
2579 * Our only purpose here is to determine whether the object
2580 * exists, and we don't want to treat the non-existence as
2581 * an error. If something else comes back, transfer the
2582 * error to the original request and complete it now.
2585 obj_request_existence_set(orig_request
, true);
2586 } else if (result
== -ENOENT
) {
2587 obj_request_existence_set(orig_request
, false);
2588 } else if (result
) {
2589 orig_request
->result
= result
;
2594 * Resubmit the original request now that we have recorded
2595 * whether the target object exists.
2597 orig_request
->result
= rbd_img_obj_request_submit(orig_request
);
2599 if (orig_request
->result
)
2600 rbd_obj_request_complete(orig_request
);
2601 rbd_obj_request_put(orig_request
);
2604 static int rbd_img_obj_exists_submit(struct rbd_obj_request
*obj_request
)
2606 struct rbd_obj_request
*stat_request
;
2607 struct rbd_device
*rbd_dev
;
2608 struct ceph_osd_client
*osdc
;
2609 struct page
**pages
= NULL
;
2615 * The response data for a STAT call consists of:
2622 size
= sizeof (__le64
) + sizeof (__le32
) + sizeof (__le32
);
2623 page_count
= (u32
)calc_pages_for(0, size
);
2624 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2626 return PTR_ERR(pages
);
2629 stat_request
= rbd_obj_request_create(obj_request
->object_name
, 0, 0,
2634 rbd_obj_request_get(obj_request
);
2635 stat_request
->obj_request
= obj_request
;
2636 stat_request
->pages
= pages
;
2637 stat_request
->page_count
= page_count
;
2639 rbd_assert(obj_request
->img_request
);
2640 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2641 stat_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
2643 if (!stat_request
->osd_req
)
2645 stat_request
->callback
= rbd_img_obj_exists_callback
;
2647 osd_req_op_init(stat_request
->osd_req
, 0, CEPH_OSD_OP_STAT
);
2648 osd_req_op_raw_data_in_pages(stat_request
->osd_req
, 0, pages
, size
, 0,
2650 rbd_osd_req_format_read(stat_request
);
2652 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2653 ret
= rbd_obj_request_submit(osdc
, stat_request
);
2656 rbd_obj_request_put(obj_request
);
2661 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
)
2663 struct rbd_img_request
*img_request
;
2664 struct rbd_device
*rbd_dev
;
2667 rbd_assert(obj_request_img_data_test(obj_request
));
2669 img_request
= obj_request
->img_request
;
2670 rbd_assert(img_request
);
2671 rbd_dev
= img_request
->rbd_dev
;
2674 * Only writes to layered images need special handling.
2675 * Reads and non-layered writes are simple object requests.
2676 * Layered writes that start beyond the end of the overlap
2677 * with the parent have no parent data, so they too are
2678 * simple object requests. Finally, if the target object is
2679 * known to already exist, its parent data has already been
2680 * copied, so a write to the object can also be handled as a
2681 * simple object request.
2683 if (!img_request_write_test(img_request
) ||
2684 !img_request_layered_test(img_request
) ||
2685 rbd_dev
->parent_overlap
<= obj_request
->img_offset
||
2686 ((known
= obj_request_known_test(obj_request
)) &&
2687 obj_request_exists_test(obj_request
))) {
2689 struct rbd_device
*rbd_dev
;
2690 struct ceph_osd_client
*osdc
;
2692 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2693 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2695 return rbd_obj_request_submit(osdc
, obj_request
);
2699 * It's a layered write. The target object might exist but
2700 * we may not know that yet. If we know it doesn't exist,
2701 * start by reading the data for the full target object from
2702 * the parent so we can use it for a copyup to the target.
2705 return rbd_img_obj_parent_read_full(obj_request
);
2707 /* We don't know whether the target exists. Go find out. */
2709 return rbd_img_obj_exists_submit(obj_request
);
2712 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
2714 struct rbd_obj_request
*obj_request
;
2715 struct rbd_obj_request
*next_obj_request
;
2717 dout("%s: img %p\n", __func__
, img_request
);
2718 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
) {
2721 ret
= rbd_img_obj_request_submit(obj_request
);
2729 static void rbd_img_parent_read_callback(struct rbd_img_request
*img_request
)
2731 struct rbd_obj_request
*obj_request
;
2732 struct rbd_device
*rbd_dev
;
2737 rbd_assert(img_request_child_test(img_request
));
2739 /* First get what we need from the image request and release it */
2741 obj_request
= img_request
->obj_request
;
2742 img_xferred
= img_request
->xferred
;
2743 img_result
= img_request
->result
;
2744 rbd_img_request_put(img_request
);
2747 * If the overlap has become 0 (most likely because the
2748 * image has been flattened) we need to re-submit the
2751 rbd_assert(obj_request
);
2752 rbd_assert(obj_request
->img_request
);
2753 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2754 if (!rbd_dev
->parent_overlap
) {
2755 struct ceph_osd_client
*osdc
;
2757 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2758 img_result
= rbd_obj_request_submit(osdc
, obj_request
);
2763 obj_request
->result
= img_result
;
2764 if (obj_request
->result
)
2768 * We need to zero anything beyond the parent overlap
2769 * boundary. Since rbd_img_obj_request_read_callback()
2770 * will zero anything beyond the end of a short read, an
2771 * easy way to do this is to pretend the data from the
2772 * parent came up short--ending at the overlap boundary.
2774 rbd_assert(obj_request
->img_offset
< U64_MAX
- obj_request
->length
);
2775 obj_end
= obj_request
->img_offset
+ obj_request
->length
;
2776 if (obj_end
> rbd_dev
->parent_overlap
) {
2779 if (obj_request
->img_offset
< rbd_dev
->parent_overlap
)
2780 xferred
= rbd_dev
->parent_overlap
-
2781 obj_request
->img_offset
;
2783 obj_request
->xferred
= min(img_xferred
, xferred
);
2785 obj_request
->xferred
= img_xferred
;
2788 rbd_img_obj_request_read_callback(obj_request
);
2789 rbd_obj_request_complete(obj_request
);
2792 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
)
2794 struct rbd_img_request
*img_request
;
2797 rbd_assert(obj_request_img_data_test(obj_request
));
2798 rbd_assert(obj_request
->img_request
!= NULL
);
2799 rbd_assert(obj_request
->result
== (s32
) -ENOENT
);
2800 rbd_assert(obj_request_type_valid(obj_request
->type
));
2802 /* rbd_read_finish(obj_request, obj_request->length); */
2803 img_request
= rbd_parent_request_create(obj_request
,
2804 obj_request
->img_offset
,
2805 obj_request
->length
);
2810 if (obj_request
->type
== OBJ_REQUEST_BIO
)
2811 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
2812 obj_request
->bio_list
);
2814 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_PAGES
,
2815 obj_request
->pages
);
2819 img_request
->callback
= rbd_img_parent_read_callback
;
2820 result
= rbd_img_request_submit(img_request
);
2827 rbd_img_request_put(img_request
);
2828 obj_request
->result
= result
;
2829 obj_request
->xferred
= 0;
2830 obj_request_done_set(obj_request
);
2833 static int rbd_obj_notify_ack_sync(struct rbd_device
*rbd_dev
, u64 notify_id
)
2835 struct rbd_obj_request
*obj_request
;
2836 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2839 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
2840 OBJ_REQUEST_NODATA
);
2845 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, obj_request
);
2846 if (!obj_request
->osd_req
)
2849 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_NOTIFY_ACK
,
2851 rbd_osd_req_format_read(obj_request
);
2853 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2856 ret
= rbd_obj_request_wait(obj_request
);
2858 rbd_obj_request_put(obj_request
);
2863 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
2865 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
2871 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__
,
2872 rbd_dev
->header_name
, (unsigned long long)notify_id
,
2873 (unsigned int)opcode
);
2874 ret
= rbd_dev_refresh(rbd_dev
);
2876 rbd_warn(rbd_dev
, ": header refresh error (%d)\n", ret
);
2878 rbd_obj_notify_ack_sync(rbd_dev
, notify_id
);
2882 * Request sync osd watch/unwatch. The value of "start" determines
2883 * whether a watch request is being initiated or torn down.
2885 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
, bool start
)
2887 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2888 struct rbd_obj_request
*obj_request
;
2891 rbd_assert(start
^ !!rbd_dev
->watch_event
);
2892 rbd_assert(start
^ !!rbd_dev
->watch_request
);
2895 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, rbd_dev
,
2896 &rbd_dev
->watch_event
);
2899 rbd_assert(rbd_dev
->watch_event
!= NULL
);
2903 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
2904 OBJ_REQUEST_NODATA
);
2908 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, true, obj_request
);
2909 if (!obj_request
->osd_req
)
2913 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
2915 ceph_osdc_unregister_linger_request(osdc
,
2916 rbd_dev
->watch_request
->osd_req
);
2918 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_WATCH
,
2919 rbd_dev
->watch_event
->cookie
, 0, start
? 1 : 0);
2920 rbd_osd_req_format_write(obj_request
);
2922 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2925 ret
= rbd_obj_request_wait(obj_request
);
2928 ret
= obj_request
->result
;
2933 * A watch request is set to linger, so the underlying osd
2934 * request won't go away until we unregister it. We retain
2935 * a pointer to the object request during that time (in
2936 * rbd_dev->watch_request), so we'll keep a reference to
2937 * it. We'll drop that reference (below) after we've
2941 rbd_dev
->watch_request
= obj_request
;
2946 /* We have successfully torn down the watch request */
2948 rbd_obj_request_put(rbd_dev
->watch_request
);
2949 rbd_dev
->watch_request
= NULL
;
2951 /* Cancel the event if we're tearing down, or on error */
2952 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
2953 rbd_dev
->watch_event
= NULL
;
2955 rbd_obj_request_put(obj_request
);
2961 * Synchronous osd object method call. Returns the number of bytes
2962 * returned in the outbound buffer, or a negative error code.
2964 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
2965 const char *object_name
,
2966 const char *class_name
,
2967 const char *method_name
,
2968 const void *outbound
,
2969 size_t outbound_size
,
2971 size_t inbound_size
)
2973 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2974 struct rbd_obj_request
*obj_request
;
2975 struct page
**pages
;
2980 * Method calls are ultimately read operations. The result
2981 * should placed into the inbound buffer provided. They
2982 * also supply outbound data--parameters for the object
2983 * method. Currently if this is present it will be a
2986 page_count
= (u32
)calc_pages_for(0, inbound_size
);
2987 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2989 return PTR_ERR(pages
);
2992 obj_request
= rbd_obj_request_create(object_name
, 0, inbound_size
,
2997 obj_request
->pages
= pages
;
2998 obj_request
->page_count
= page_count
;
3000 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, obj_request
);
3001 if (!obj_request
->osd_req
)
3004 osd_req_op_cls_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_CALL
,
3005 class_name
, method_name
);
3006 if (outbound_size
) {
3007 struct ceph_pagelist
*pagelist
;
3009 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
3013 ceph_pagelist_init(pagelist
);
3014 ceph_pagelist_append(pagelist
, outbound
, outbound_size
);
3015 osd_req_op_cls_request_data_pagelist(obj_request
->osd_req
, 0,
3018 osd_req_op_cls_response_data_pages(obj_request
->osd_req
, 0,
3019 obj_request
->pages
, inbound_size
,
3021 rbd_osd_req_format_read(obj_request
);
3023 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3026 ret
= rbd_obj_request_wait(obj_request
);
3030 ret
= obj_request
->result
;
3034 rbd_assert(obj_request
->xferred
< (u64
)INT_MAX
);
3035 ret
= (int)obj_request
->xferred
;
3036 ceph_copy_from_page_vector(pages
, inbound
, 0, obj_request
->xferred
);
3039 rbd_obj_request_put(obj_request
);
3041 ceph_release_page_vector(pages
, page_count
);
3046 static void rbd_request_fn(struct request_queue
*q
)
3047 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
3049 struct rbd_device
*rbd_dev
= q
->queuedata
;
3050 bool read_only
= rbd_dev
->mapping
.read_only
;
3054 while ((rq
= blk_fetch_request(q
))) {
3055 bool write_request
= rq_data_dir(rq
) == WRITE
;
3056 struct rbd_img_request
*img_request
;
3060 /* Ignore any non-FS requests that filter through. */
3062 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
3063 dout("%s: non-fs request type %d\n", __func__
,
3064 (int) rq
->cmd_type
);
3065 __blk_end_request_all(rq
, 0);
3069 /* Ignore/skip any zero-length requests */
3071 offset
= (u64
) blk_rq_pos(rq
) << SECTOR_SHIFT
;
3072 length
= (u64
) blk_rq_bytes(rq
);
3075 dout("%s: zero-length request\n", __func__
);
3076 __blk_end_request_all(rq
, 0);
3080 spin_unlock_irq(q
->queue_lock
);
3082 /* Disallow writes to a read-only device */
3084 if (write_request
) {
3088 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3092 * Quit early if the mapped snapshot no longer
3093 * exists. It's still possible the snapshot will
3094 * have disappeared by the time our request arrives
3095 * at the osd, but there's no sense in sending it if
3098 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3099 dout("request for non-existent snapshot");
3100 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3106 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3107 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)\n",
3109 goto end_request
; /* Shouldn't happen */
3113 if (offset
+ length
> rbd_dev
->mapping
.size
) {
3114 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)\n",
3115 offset
, length
, rbd_dev
->mapping
.size
);
3120 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
,
3125 img_request
->rq
= rq
;
3127 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3130 result
= rbd_img_request_submit(img_request
);
3132 rbd_img_request_put(img_request
);
3134 spin_lock_irq(q
->queue_lock
);
3136 rbd_warn(rbd_dev
, "%s %llx at %llx result %d\n",
3137 write_request
? "write" : "read",
3138 length
, offset
, result
);
3140 __blk_end_request_all(rq
, result
);
3146 * a queue callback. Makes sure that we don't create a bio that spans across
3147 * multiple osd objects. One exception would be with a single page bios,
3148 * which we handle later at bio_chain_clone_range()
3150 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
3151 struct bio_vec
*bvec
)
3153 struct rbd_device
*rbd_dev
= q
->queuedata
;
3154 sector_t sector_offset
;
3155 sector_t sectors_per_obj
;
3156 sector_t obj_sector_offset
;
3160 * Find how far into its rbd object the partition-relative
3161 * bio start sector is to offset relative to the enclosing
3164 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
3165 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
3166 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
3169 * Compute the number of bytes from that offset to the end
3170 * of the object. Account for what's already used by the bio.
3172 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
3173 if (ret
> bmd
->bi_size
)
3174 ret
-= bmd
->bi_size
;
3179 * Don't send back more than was asked for. And if the bio
3180 * was empty, let the whole thing through because: "Note
3181 * that a block device *must* allow a single page to be
3182 * added to an empty bio."
3184 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
3185 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
3186 ret
= (int) bvec
->bv_len
;
3191 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3193 struct gendisk
*disk
= rbd_dev
->disk
;
3198 rbd_dev
->disk
= NULL
;
3199 if (disk
->flags
& GENHD_FL_UP
) {
3202 blk_cleanup_queue(disk
->queue
);
3207 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3208 const char *object_name
,
3209 u64 offset
, u64 length
, void *buf
)
3212 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3213 struct rbd_obj_request
*obj_request
;
3214 struct page
**pages
= NULL
;
3219 page_count
= (u32
) calc_pages_for(offset
, length
);
3220 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3222 ret
= PTR_ERR(pages
);
3225 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
3230 obj_request
->pages
= pages
;
3231 obj_request
->page_count
= page_count
;
3233 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, obj_request
);
3234 if (!obj_request
->osd_req
)
3237 osd_req_op_extent_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_READ
,
3238 offset
, length
, 0, 0);
3239 osd_req_op_extent_osd_data_pages(obj_request
->osd_req
, 0,
3241 obj_request
->length
,
3242 obj_request
->offset
& ~PAGE_MASK
,
3244 rbd_osd_req_format_read(obj_request
);
3246 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3249 ret
= rbd_obj_request_wait(obj_request
);
3253 ret
= obj_request
->result
;
3257 rbd_assert(obj_request
->xferred
<= (u64
) SIZE_MAX
);
3258 size
= (size_t) obj_request
->xferred
;
3259 ceph_copy_from_page_vector(pages
, buf
, 0, size
);
3260 rbd_assert(size
<= (size_t)INT_MAX
);
3264 rbd_obj_request_put(obj_request
);
3266 ceph_release_page_vector(pages
, page_count
);
3272 * Read the complete header for the given rbd device. On successful
3273 * return, the rbd_dev->header field will contain up-to-date
3274 * information about the image.
3276 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
3278 struct rbd_image_header_ondisk
*ondisk
= NULL
;
3285 * The complete header will include an array of its 64-bit
3286 * snapshot ids, followed by the names of those snapshots as
3287 * a contiguous block of NUL-terminated strings. Note that
3288 * the number of snapshots could change by the time we read
3289 * it in, in which case we re-read it.
3296 size
= sizeof (*ondisk
);
3297 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
3299 ondisk
= kmalloc(size
, GFP_KERNEL
);
3303 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
3307 if ((size_t)ret
< size
) {
3309 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
3313 if (!rbd_dev_ondisk_valid(ondisk
)) {
3315 rbd_warn(rbd_dev
, "invalid header");
3319 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
3320 want_count
= snap_count
;
3321 snap_count
= le32_to_cpu(ondisk
->snap_count
);
3322 } while (snap_count
!= want_count
);
3324 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
3332 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3333 * has disappeared from the (just updated) snapshot context.
3335 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
3339 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
3342 snap_id
= rbd_dev
->spec
->snap_id
;
3343 if (snap_id
== CEPH_NOSNAP
)
3346 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
3347 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3350 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
3356 * Don't hold the lock while doing disk operations,
3357 * or lock ordering will conflict with the bdev mutex via:
3358 * rbd_add() -> blkdev_get() -> rbd_open()
3360 spin_lock_irq(&rbd_dev
->lock
);
3361 removing
= test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
3362 spin_unlock_irq(&rbd_dev
->lock
);
3364 * If the device is being removed, rbd_dev->disk has
3365 * been destroyed, so don't try to update its size
3368 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
3369 dout("setting size to %llu sectors", (unsigned long long)size
);
3370 set_capacity(rbd_dev
->disk
, size
);
3371 revalidate_disk(rbd_dev
->disk
);
3375 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
3380 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
3381 mapping_size
= rbd_dev
->mapping
.size
;
3382 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
3383 if (rbd_dev
->image_format
== 1)
3384 ret
= rbd_dev_v1_header_info(rbd_dev
);
3386 ret
= rbd_dev_v2_header_info(rbd_dev
);
3388 /* If it's a mapped snapshot, validate its EXISTS flag */
3390 rbd_exists_validate(rbd_dev
);
3391 mutex_unlock(&ctl_mutex
);
3392 if (mapping_size
!= rbd_dev
->mapping
.size
) {
3393 rbd_dev_update_size(rbd_dev
);
3399 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
3401 struct gendisk
*disk
;
3402 struct request_queue
*q
;
3405 /* create gendisk info */
3406 disk
= alloc_disk(RBD_MINORS_PER_MAJOR
);
3410 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
3412 disk
->major
= rbd_dev
->major
;
3413 disk
->first_minor
= 0;
3414 disk
->fops
= &rbd_bd_ops
;
3415 disk
->private_data
= rbd_dev
;
3417 q
= blk_init_queue(rbd_request_fn
, &rbd_dev
->lock
);
3421 /* We use the default size, but let's be explicit about it. */
3422 blk_queue_physical_block_size(q
, SECTOR_SIZE
);
3424 /* set io sizes to object size */
3425 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
3426 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
3427 blk_queue_max_segment_size(q
, segment_size
);
3428 blk_queue_io_min(q
, segment_size
);
3429 blk_queue_io_opt(q
, segment_size
);
3431 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
3434 q
->queuedata
= rbd_dev
;
3436 rbd_dev
->disk
= disk
;
3449 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
3451 return container_of(dev
, struct rbd_device
, dev
);
3454 static ssize_t
rbd_size_show(struct device
*dev
,
3455 struct device_attribute
*attr
, char *buf
)
3457 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3459 return sprintf(buf
, "%llu\n",
3460 (unsigned long long)rbd_dev
->mapping
.size
);
3464 * Note this shows the features for whatever's mapped, which is not
3465 * necessarily the base image.
3467 static ssize_t
rbd_features_show(struct device
*dev
,
3468 struct device_attribute
*attr
, char *buf
)
3470 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3472 return sprintf(buf
, "0x%016llx\n",
3473 (unsigned long long)rbd_dev
->mapping
.features
);
3476 static ssize_t
rbd_major_show(struct device
*dev
,
3477 struct device_attribute
*attr
, char *buf
)
3479 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3482 return sprintf(buf
, "%d\n", rbd_dev
->major
);
3484 return sprintf(buf
, "(none)\n");
3488 static ssize_t
rbd_client_id_show(struct device
*dev
,
3489 struct device_attribute
*attr
, char *buf
)
3491 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3493 return sprintf(buf
, "client%lld\n",
3494 ceph_client_id(rbd_dev
->rbd_client
->client
));
3497 static ssize_t
rbd_pool_show(struct device
*dev
,
3498 struct device_attribute
*attr
, char *buf
)
3500 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3502 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
3505 static ssize_t
rbd_pool_id_show(struct device
*dev
,
3506 struct device_attribute
*attr
, char *buf
)
3508 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3510 return sprintf(buf
, "%llu\n",
3511 (unsigned long long) rbd_dev
->spec
->pool_id
);
3514 static ssize_t
rbd_name_show(struct device
*dev
,
3515 struct device_attribute
*attr
, char *buf
)
3517 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3519 if (rbd_dev
->spec
->image_name
)
3520 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
3522 return sprintf(buf
, "(unknown)\n");
3525 static ssize_t
rbd_image_id_show(struct device
*dev
,
3526 struct device_attribute
*attr
, char *buf
)
3528 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3530 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
3534 * Shows the name of the currently-mapped snapshot (or
3535 * RBD_SNAP_HEAD_NAME for the base image).
3537 static ssize_t
rbd_snap_show(struct device
*dev
,
3538 struct device_attribute
*attr
,
3541 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3543 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
3547 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3548 * for the parent image. If there is no parent, simply shows
3549 * "(no parent image)".
3551 static ssize_t
rbd_parent_show(struct device
*dev
,
3552 struct device_attribute
*attr
,
3555 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3556 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
3561 return sprintf(buf
, "(no parent image)\n");
3563 count
= sprintf(bufp
, "pool_id %llu\npool_name %s\n",
3564 (unsigned long long) spec
->pool_id
, spec
->pool_name
);
3569 count
= sprintf(bufp
, "image_id %s\nimage_name %s\n", spec
->image_id
,
3570 spec
->image_name
? spec
->image_name
: "(unknown)");
3575 count
= sprintf(bufp
, "snap_id %llu\nsnap_name %s\n",
3576 (unsigned long long) spec
->snap_id
, spec
->snap_name
);
3581 count
= sprintf(bufp
, "overlap %llu\n", rbd_dev
->parent_overlap
);
3586 return (ssize_t
) (bufp
- buf
);
3589 static ssize_t
rbd_image_refresh(struct device
*dev
,
3590 struct device_attribute
*attr
,
3594 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3597 ret
= rbd_dev_refresh(rbd_dev
);
3599 rbd_warn(rbd_dev
, ": manual header refresh error (%d)\n", ret
);
3601 return ret
< 0 ? ret
: size
;
3604 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
3605 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
3606 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
3607 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
3608 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
3609 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
3610 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
3611 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
3612 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
3613 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
3614 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
3616 static struct attribute
*rbd_attrs
[] = {
3617 &dev_attr_size
.attr
,
3618 &dev_attr_features
.attr
,
3619 &dev_attr_major
.attr
,
3620 &dev_attr_client_id
.attr
,
3621 &dev_attr_pool
.attr
,
3622 &dev_attr_pool_id
.attr
,
3623 &dev_attr_name
.attr
,
3624 &dev_attr_image_id
.attr
,
3625 &dev_attr_current_snap
.attr
,
3626 &dev_attr_parent
.attr
,
3627 &dev_attr_refresh
.attr
,
3631 static struct attribute_group rbd_attr_group
= {
3635 static const struct attribute_group
*rbd_attr_groups
[] = {
3640 static void rbd_sysfs_dev_release(struct device
*dev
)
3644 static struct device_type rbd_device_type
= {
3646 .groups
= rbd_attr_groups
,
3647 .release
= rbd_sysfs_dev_release
,
3650 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
3652 kref_get(&spec
->kref
);
3657 static void rbd_spec_free(struct kref
*kref
);
3658 static void rbd_spec_put(struct rbd_spec
*spec
)
3661 kref_put(&spec
->kref
, rbd_spec_free
);
3664 static struct rbd_spec
*rbd_spec_alloc(void)
3666 struct rbd_spec
*spec
;
3668 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
3671 kref_init(&spec
->kref
);
3676 static void rbd_spec_free(struct kref
*kref
)
3678 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
3680 kfree(spec
->pool_name
);
3681 kfree(spec
->image_id
);
3682 kfree(spec
->image_name
);
3683 kfree(spec
->snap_name
);
3687 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
3688 struct rbd_spec
*spec
)
3690 struct rbd_device
*rbd_dev
;
3692 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
3696 spin_lock_init(&rbd_dev
->lock
);
3698 atomic_set(&rbd_dev
->parent_ref
, 0);
3699 INIT_LIST_HEAD(&rbd_dev
->node
);
3700 init_rwsem(&rbd_dev
->header_rwsem
);
3702 rbd_dev
->spec
= spec
;
3703 rbd_dev
->rbd_client
= rbdc
;
3705 /* Initialize the layout used for all rbd requests */
3707 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
3708 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
3709 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
3710 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
3715 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
3717 rbd_put_client(rbd_dev
->rbd_client
);
3718 rbd_spec_put(rbd_dev
->spec
);
3723 * Get the size and object order for an image snapshot, or if
3724 * snap_id is CEPH_NOSNAP, gets this information for the base
3727 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
3728 u8
*order
, u64
*snap_size
)
3730 __le64 snapid
= cpu_to_le64(snap_id
);
3735 } __attribute__ ((packed
)) size_buf
= { 0 };
3737 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3739 &snapid
, sizeof (snapid
),
3740 &size_buf
, sizeof (size_buf
));
3741 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3744 if (ret
< sizeof (size_buf
))
3748 *order
= size_buf
.order
;
3749 dout(" order %u", (unsigned int)*order
);
3751 *snap_size
= le64_to_cpu(size_buf
.size
);
3753 dout(" snap_id 0x%016llx snap_size = %llu\n",
3754 (unsigned long long)snap_id
,
3755 (unsigned long long)*snap_size
);
3760 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
3762 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
3763 &rbd_dev
->header
.obj_order
,
3764 &rbd_dev
->header
.image_size
);
3767 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
3773 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
3777 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3778 "rbd", "get_object_prefix", NULL
, 0,
3779 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
3780 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3785 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
3786 p
+ ret
, NULL
, GFP_NOIO
);
3789 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
3790 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
3791 rbd_dev
->header
.object_prefix
= NULL
;
3793 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
3801 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
3804 __le64 snapid
= cpu_to_le64(snap_id
);
3808 } __attribute__ ((packed
)) features_buf
= { 0 };
3812 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3813 "rbd", "get_features",
3814 &snapid
, sizeof (snapid
),
3815 &features_buf
, sizeof (features_buf
));
3816 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3819 if (ret
< sizeof (features_buf
))
3822 incompat
= le64_to_cpu(features_buf
.incompat
);
3823 if (incompat
& ~RBD_FEATURES_SUPPORTED
)
3826 *snap_features
= le64_to_cpu(features_buf
.features
);
3828 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3829 (unsigned long long)snap_id
,
3830 (unsigned long long)*snap_features
,
3831 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
3836 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
3838 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
3839 &rbd_dev
->header
.features
);
3842 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
3844 struct rbd_spec
*parent_spec
;
3846 void *reply_buf
= NULL
;
3855 parent_spec
= rbd_spec_alloc();
3859 size
= sizeof (__le64
) + /* pool_id */
3860 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
3861 sizeof (__le64
) + /* snap_id */
3862 sizeof (__le64
); /* overlap */
3863 reply_buf
= kmalloc(size
, GFP_KERNEL
);
3869 snapid
= cpu_to_le64(CEPH_NOSNAP
);
3870 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3871 "rbd", "get_parent",
3872 &snapid
, sizeof (snapid
),
3874 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3879 end
= reply_buf
+ ret
;
3881 ceph_decode_64_safe(&p
, end
, pool_id
, out_err
);
3882 if (pool_id
== CEPH_NOPOOL
) {
3884 * Either the parent never existed, or we have
3885 * record of it but the image got flattened so it no
3886 * longer has a parent. When the parent of a
3887 * layered image disappears we immediately set the
3888 * overlap to 0. The effect of this is that all new
3889 * requests will be treated as if the image had no
3892 if (rbd_dev
->parent_overlap
) {
3893 rbd_dev
->parent_overlap
= 0;
3895 rbd_dev_parent_put(rbd_dev
);
3896 pr_info("%s: clone image has been flattened\n",
3897 rbd_dev
->disk
->disk_name
);
3900 goto out
; /* No parent? No problem. */
3903 /* The ceph file layout needs to fit pool id in 32 bits */
3906 if (pool_id
> (u64
)U32_MAX
) {
3907 rbd_warn(NULL
, "parent pool id too large (%llu > %u)\n",
3908 (unsigned long long)pool_id
, U32_MAX
);
3911 parent_spec
->pool_id
= pool_id
;
3913 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
3914 if (IS_ERR(image_id
)) {
3915 ret
= PTR_ERR(image_id
);
3918 parent_spec
->image_id
= image_id
;
3919 ceph_decode_64_safe(&p
, end
, parent_spec
->snap_id
, out_err
);
3920 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
3923 rbd_spec_put(rbd_dev
->parent_spec
);
3924 rbd_dev
->parent_spec
= parent_spec
;
3925 parent_spec
= NULL
; /* rbd_dev now owns this */
3926 rbd_dev
->parent_overlap
= overlap
;
3928 rbd_warn(rbd_dev
, "ignoring parent of clone with overlap 0\n");
3934 rbd_spec_put(parent_spec
);
3939 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
3943 __le64 stripe_count
;
3944 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
3945 size_t size
= sizeof (striping_info_buf
);
3952 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3953 "rbd", "get_stripe_unit_count", NULL
, 0,
3954 (char *)&striping_info_buf
, size
);
3955 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3962 * We don't actually support the "fancy striping" feature
3963 * (STRIPINGV2) yet, but if the striping sizes are the
3964 * defaults the behavior is the same as before. So find
3965 * out, and only fail if the image has non-default values.
3968 obj_size
= (u64
)1 << rbd_dev
->header
.obj_order
;
3969 p
= &striping_info_buf
;
3970 stripe_unit
= ceph_decode_64(&p
);
3971 if (stripe_unit
!= obj_size
) {
3972 rbd_warn(rbd_dev
, "unsupported stripe unit "
3973 "(got %llu want %llu)",
3974 stripe_unit
, obj_size
);
3977 stripe_count
= ceph_decode_64(&p
);
3978 if (stripe_count
!= 1) {
3979 rbd_warn(rbd_dev
, "unsupported stripe count "
3980 "(got %llu want 1)", stripe_count
);
3983 rbd_dev
->header
.stripe_unit
= stripe_unit
;
3984 rbd_dev
->header
.stripe_count
= stripe_count
;
3989 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
3991 size_t image_id_size
;
3996 void *reply_buf
= NULL
;
3998 char *image_name
= NULL
;
4001 rbd_assert(!rbd_dev
->spec
->image_name
);
4003 len
= strlen(rbd_dev
->spec
->image_id
);
4004 image_id_size
= sizeof (__le32
) + len
;
4005 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
4010 end
= image_id
+ image_id_size
;
4011 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
4013 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
4014 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4018 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
4019 "rbd", "dir_get_name",
4020 image_id
, image_id_size
,
4025 end
= reply_buf
+ ret
;
4027 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
4028 if (IS_ERR(image_name
))
4031 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
4039 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4041 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4042 const char *snap_name
;
4045 /* Skip over names until we find the one we are looking for */
4047 snap_name
= rbd_dev
->header
.snap_names
;
4048 while (which
< snapc
->num_snaps
) {
4049 if (!strcmp(name
, snap_name
))
4050 return snapc
->snaps
[which
];
4051 snap_name
+= strlen(snap_name
) + 1;
4057 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4059 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4064 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
4065 const char *snap_name
;
4067 snap_id
= snapc
->snaps
[which
];
4068 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
4069 if (IS_ERR(snap_name
)) {
4070 /* ignore no-longer existing snapshots */
4071 if (PTR_ERR(snap_name
) == -ENOENT
)
4076 found
= !strcmp(name
, snap_name
);
4079 return found
? snap_id
: CEPH_NOSNAP
;
4083 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4084 * no snapshot by that name is found, or if an error occurs.
4086 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4088 if (rbd_dev
->image_format
== 1)
4089 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
4091 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
4095 * When an rbd image has a parent image, it is identified by the
4096 * pool, image, and snapshot ids (not names). This function fills
4097 * in the names for those ids. (It's OK if we can't figure out the
4098 * name for an image id, but the pool and snapshot ids should always
4099 * exist and have names.) All names in an rbd spec are dynamically
4102 * When an image being mapped (not a parent) is probed, we have the
4103 * pool name and pool id, image name and image id, and the snapshot
4104 * name. The only thing we're missing is the snapshot id.
4106 static int rbd_dev_spec_update(struct rbd_device
*rbd_dev
)
4108 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4109 struct rbd_spec
*spec
= rbd_dev
->spec
;
4110 const char *pool_name
;
4111 const char *image_name
;
4112 const char *snap_name
;
4116 * An image being mapped will have the pool name (etc.), but
4117 * we need to look up the snapshot id.
4119 if (spec
->pool_name
) {
4120 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
4123 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
4124 if (snap_id
== CEPH_NOSNAP
)
4126 spec
->snap_id
= snap_id
;
4128 spec
->snap_id
= CEPH_NOSNAP
;
4134 /* Get the pool name; we have to make our own copy of this */
4136 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
4138 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
4141 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
4145 /* Fetch the image name; tolerate failure here */
4147 image_name
= rbd_dev_image_name(rbd_dev
);
4149 rbd_warn(rbd_dev
, "unable to get image name");
4151 /* Look up the snapshot name, and make a copy */
4153 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
4154 if (IS_ERR(snap_name
)) {
4155 ret
= PTR_ERR(snap_name
);
4159 spec
->pool_name
= pool_name
;
4160 spec
->image_name
= image_name
;
4161 spec
->snap_name
= snap_name
;
4171 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
4180 struct ceph_snap_context
*snapc
;
4184 * We'll need room for the seq value (maximum snapshot id),
4185 * snapshot count, and array of that many snapshot ids.
4186 * For now we have a fixed upper limit on the number we're
4187 * prepared to receive.
4189 size
= sizeof (__le64
) + sizeof (__le32
) +
4190 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
4191 reply_buf
= kzalloc(size
, GFP_KERNEL
);
4195 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4196 "rbd", "get_snapcontext", NULL
, 0,
4198 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4203 end
= reply_buf
+ ret
;
4205 ceph_decode_64_safe(&p
, end
, seq
, out
);
4206 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
4209 * Make sure the reported number of snapshot ids wouldn't go
4210 * beyond the end of our buffer. But before checking that,
4211 * make sure the computed size of the snapshot context we
4212 * allocate is representable in a size_t.
4214 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
4219 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
4223 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
4229 for (i
= 0; i
< snap_count
; i
++)
4230 snapc
->snaps
[i
] = ceph_decode_64(&p
);
4232 ceph_put_snap_context(rbd_dev
->header
.snapc
);
4233 rbd_dev
->header
.snapc
= snapc
;
4235 dout(" snap context seq = %llu, snap_count = %u\n",
4236 (unsigned long long)seq
, (unsigned int)snap_count
);
4243 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
4254 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
4255 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4257 return ERR_PTR(-ENOMEM
);
4259 snapid
= cpu_to_le64(snap_id
);
4260 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4261 "rbd", "get_snapshot_name",
4262 &snapid
, sizeof (snapid
),
4264 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4266 snap_name
= ERR_PTR(ret
);
4271 end
= reply_buf
+ ret
;
4272 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4273 if (IS_ERR(snap_name
))
4276 dout(" snap_id 0x%016llx snap_name = %s\n",
4277 (unsigned long long)snap_id
, snap_name
);
4284 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
4286 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
4289 down_write(&rbd_dev
->header_rwsem
);
4291 ret
= rbd_dev_v2_image_size(rbd_dev
);
4296 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
4302 * If the image supports layering, get the parent info. We
4303 * need to probe the first time regardless. Thereafter we
4304 * only need to if there's a parent, to see if it has
4305 * disappeared due to the mapped image getting flattened.
4307 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
&&
4308 (first_time
|| rbd_dev
->parent_spec
)) {
4311 ret
= rbd_dev_v2_parent_info(rbd_dev
);
4316 * Print a warning if this is the initial probe and
4317 * the image has a parent. Don't print it if the
4318 * image now being probed is itself a parent. We
4319 * can tell at this point because we won't know its
4320 * pool name yet (just its pool id).
4322 warn
= rbd_dev
->parent_spec
&& rbd_dev
->spec
->pool_name
;
4323 if (first_time
&& warn
)
4324 rbd_warn(rbd_dev
, "WARNING: kernel layering "
4325 "is EXPERIMENTAL!");
4328 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
)
4329 if (rbd_dev
->mapping
.size
!= rbd_dev
->header
.image_size
)
4330 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
4332 ret
= rbd_dev_v2_snap_context(rbd_dev
);
4333 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
4335 up_write(&rbd_dev
->header_rwsem
);
4340 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
4345 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
4347 dev
= &rbd_dev
->dev
;
4348 dev
->bus
= &rbd_bus_type
;
4349 dev
->type
= &rbd_device_type
;
4350 dev
->parent
= &rbd_root_dev
;
4351 dev
->release
= rbd_dev_device_release
;
4352 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
4353 ret
= device_register(dev
);
4355 mutex_unlock(&ctl_mutex
);
4360 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
4362 device_unregister(&rbd_dev
->dev
);
4365 static atomic64_t rbd_dev_id_max
= ATOMIC64_INIT(0);
4368 * Get a unique rbd identifier for the given new rbd_dev, and add
4369 * the rbd_dev to the global list. The minimum rbd id is 1.
4371 static void rbd_dev_id_get(struct rbd_device
*rbd_dev
)
4373 rbd_dev
->dev_id
= atomic64_inc_return(&rbd_dev_id_max
);
4375 spin_lock(&rbd_dev_list_lock
);
4376 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
4377 spin_unlock(&rbd_dev_list_lock
);
4378 dout("rbd_dev %p given dev id %llu\n", rbd_dev
,
4379 (unsigned long long) rbd_dev
->dev_id
);
4383 * Remove an rbd_dev from the global list, and record that its
4384 * identifier is no longer in use.
4386 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
4388 struct list_head
*tmp
;
4389 int rbd_id
= rbd_dev
->dev_id
;
4392 rbd_assert(rbd_id
> 0);
4394 dout("rbd_dev %p released dev id %llu\n", rbd_dev
,
4395 (unsigned long long) rbd_dev
->dev_id
);
4396 spin_lock(&rbd_dev_list_lock
);
4397 list_del_init(&rbd_dev
->node
);
4400 * If the id being "put" is not the current maximum, there
4401 * is nothing special we need to do.
4403 if (rbd_id
!= atomic64_read(&rbd_dev_id_max
)) {
4404 spin_unlock(&rbd_dev_list_lock
);
4409 * We need to update the current maximum id. Search the
4410 * list to find out what it is. We're more likely to find
4411 * the maximum at the end, so search the list backward.
4414 list_for_each_prev(tmp
, &rbd_dev_list
) {
4415 struct rbd_device
*rbd_dev
;
4417 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
4418 if (rbd_dev
->dev_id
> max_id
)
4419 max_id
= rbd_dev
->dev_id
;
4421 spin_unlock(&rbd_dev_list_lock
);
4424 * The max id could have been updated by rbd_dev_id_get(), in
4425 * which case it now accurately reflects the new maximum.
4426 * Be careful not to overwrite the maximum value in that
4429 atomic64_cmpxchg(&rbd_dev_id_max
, rbd_id
, max_id
);
4430 dout(" max dev id has been reset\n");
4434 * Skips over white space at *buf, and updates *buf to point to the
4435 * first found non-space character (if any). Returns the length of
4436 * the token (string of non-white space characters) found. Note
4437 * that *buf must be terminated with '\0'.
4439 static inline size_t next_token(const char **buf
)
4442 * These are the characters that produce nonzero for
4443 * isspace() in the "C" and "POSIX" locales.
4445 const char *spaces
= " \f\n\r\t\v";
4447 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
4449 return strcspn(*buf
, spaces
); /* Return token length */
4453 * Finds the next token in *buf, and if the provided token buffer is
4454 * big enough, copies the found token into it. The result, if
4455 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4456 * must be terminated with '\0' on entry.
4458 * Returns the length of the token found (not including the '\0').
4459 * Return value will be 0 if no token is found, and it will be >=
4460 * token_size if the token would not fit.
4462 * The *buf pointer will be updated to point beyond the end of the
4463 * found token. Note that this occurs even if the token buffer is
4464 * too small to hold it.
4466 static inline size_t copy_token(const char **buf
,
4472 len
= next_token(buf
);
4473 if (len
< token_size
) {
4474 memcpy(token
, *buf
, len
);
4475 *(token
+ len
) = '\0';
4483 * Finds the next token in *buf, dynamically allocates a buffer big
4484 * enough to hold a copy of it, and copies the token into the new
4485 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4486 * that a duplicate buffer is created even for a zero-length token.
4488 * Returns a pointer to the newly-allocated duplicate, or a null
4489 * pointer if memory for the duplicate was not available. If
4490 * the lenp argument is a non-null pointer, the length of the token
4491 * (not including the '\0') is returned in *lenp.
4493 * If successful, the *buf pointer will be updated to point beyond
4494 * the end of the found token.
4496 * Note: uses GFP_KERNEL for allocation.
4498 static inline char *dup_token(const char **buf
, size_t *lenp
)
4503 len
= next_token(buf
);
4504 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
4507 *(dup
+ len
) = '\0';
4517 * Parse the options provided for an "rbd add" (i.e., rbd image
4518 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4519 * and the data written is passed here via a NUL-terminated buffer.
4520 * Returns 0 if successful or an error code otherwise.
4522 * The information extracted from these options is recorded in
4523 * the other parameters which return dynamically-allocated
4526 * The address of a pointer that will refer to a ceph options
4527 * structure. Caller must release the returned pointer using
4528 * ceph_destroy_options() when it is no longer needed.
4530 * Address of an rbd options pointer. Fully initialized by
4531 * this function; caller must release with kfree().
4533 * Address of an rbd image specification pointer. Fully
4534 * initialized by this function based on parsed options.
4535 * Caller must release with rbd_spec_put().
4537 * The options passed take this form:
4538 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4541 * A comma-separated list of one or more monitor addresses.
4542 * A monitor address is an ip address, optionally followed
4543 * by a port number (separated by a colon).
4544 * I.e.: ip1[:port1][,ip2[:port2]...]
4546 * A comma-separated list of ceph and/or rbd options.
4548 * The name of the rados pool containing the rbd image.
4550 * The name of the image in that pool to map.
4552 * An optional snapshot id. If provided, the mapping will
4553 * present data from the image at the time that snapshot was
4554 * created. The image head is used if no snapshot id is
4555 * provided. Snapshot mappings are always read-only.
4557 static int rbd_add_parse_args(const char *buf
,
4558 struct ceph_options
**ceph_opts
,
4559 struct rbd_options
**opts
,
4560 struct rbd_spec
**rbd_spec
)
4564 const char *mon_addrs
;
4566 size_t mon_addrs_size
;
4567 struct rbd_spec
*spec
= NULL
;
4568 struct rbd_options
*rbd_opts
= NULL
;
4569 struct ceph_options
*copts
;
4572 /* The first four tokens are required */
4574 len
= next_token(&buf
);
4576 rbd_warn(NULL
, "no monitor address(es) provided");
4580 mon_addrs_size
= len
+ 1;
4584 options
= dup_token(&buf
, NULL
);
4588 rbd_warn(NULL
, "no options provided");
4592 spec
= rbd_spec_alloc();
4596 spec
->pool_name
= dup_token(&buf
, NULL
);
4597 if (!spec
->pool_name
)
4599 if (!*spec
->pool_name
) {
4600 rbd_warn(NULL
, "no pool name provided");
4604 spec
->image_name
= dup_token(&buf
, NULL
);
4605 if (!spec
->image_name
)
4607 if (!*spec
->image_name
) {
4608 rbd_warn(NULL
, "no image name provided");
4613 * Snapshot name is optional; default is to use "-"
4614 * (indicating the head/no snapshot).
4616 len
= next_token(&buf
);
4618 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
4619 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
4620 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
4621 ret
= -ENAMETOOLONG
;
4624 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
4627 *(snap_name
+ len
) = '\0';
4628 spec
->snap_name
= snap_name
;
4630 /* Initialize all rbd options to the defaults */
4632 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
4636 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
4638 copts
= ceph_parse_options(options
, mon_addrs
,
4639 mon_addrs
+ mon_addrs_size
- 1,
4640 parse_rbd_opts_token
, rbd_opts
);
4641 if (IS_ERR(copts
)) {
4642 ret
= PTR_ERR(copts
);
4663 * An rbd format 2 image has a unique identifier, distinct from the
4664 * name given to it by the user. Internally, that identifier is
4665 * what's used to specify the names of objects related to the image.
4667 * A special "rbd id" object is used to map an rbd image name to its
4668 * id. If that object doesn't exist, then there is no v2 rbd image
4669 * with the supplied name.
4671 * This function will record the given rbd_dev's image_id field if
4672 * it can be determined, and in that case will return 0. If any
4673 * errors occur a negative errno will be returned and the rbd_dev's
4674 * image_id field will be unchanged (and should be NULL).
4676 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
4685 * When probing a parent image, the image id is already
4686 * known (and the image name likely is not). There's no
4687 * need to fetch the image id again in this case. We
4688 * do still need to set the image format though.
4690 if (rbd_dev
->spec
->image_id
) {
4691 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
4697 * First, see if the format 2 image id file exists, and if
4698 * so, get the image's persistent id from it.
4700 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
4701 object_name
= kmalloc(size
, GFP_NOIO
);
4704 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
4705 dout("rbd id object name is %s\n", object_name
);
4707 /* Response will be an encoded string, which includes a length */
4709 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
4710 response
= kzalloc(size
, GFP_NOIO
);
4716 /* If it doesn't exist we'll assume it's a format 1 image */
4718 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
4719 "rbd", "get_id", NULL
, 0,
4720 response
, RBD_IMAGE_ID_LEN_MAX
);
4721 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4722 if (ret
== -ENOENT
) {
4723 image_id
= kstrdup("", GFP_KERNEL
);
4724 ret
= image_id
? 0 : -ENOMEM
;
4726 rbd_dev
->image_format
= 1;
4727 } else if (ret
> sizeof (__le32
)) {
4730 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
4732 ret
= IS_ERR(image_id
) ? PTR_ERR(image_id
) : 0;
4734 rbd_dev
->image_format
= 2;
4740 rbd_dev
->spec
->image_id
= image_id
;
4741 dout("image_id is %s\n", image_id
);
4751 * Undo whatever state changes are made by v1 or v2 header info
4754 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
4756 struct rbd_image_header
*header
;
4758 /* Drop parent reference unless it's already been done (or none) */
4760 if (rbd_dev
->parent_overlap
)
4761 rbd_dev_parent_put(rbd_dev
);
4763 /* Free dynamic fields from the header, then zero it out */
4765 header
= &rbd_dev
->header
;
4766 ceph_put_snap_context(header
->snapc
);
4767 kfree(header
->snap_sizes
);
4768 kfree(header
->snap_names
);
4769 kfree(header
->object_prefix
);
4770 memset(header
, 0, sizeof (*header
));
4773 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
4777 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
4782 * Get the and check features for the image. Currently the
4783 * features are assumed to never change.
4785 ret
= rbd_dev_v2_features(rbd_dev
);
4789 /* If the image supports fancy striping, get its parameters */
4791 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
4792 ret
= rbd_dev_v2_striping_info(rbd_dev
);
4796 /* No support for crypto and compression type format 2 images */
4800 rbd_dev
->header
.features
= 0;
4801 kfree(rbd_dev
->header
.object_prefix
);
4802 rbd_dev
->header
.object_prefix
= NULL
;
4807 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
)
4809 struct rbd_device
*parent
= NULL
;
4810 struct rbd_spec
*parent_spec
;
4811 struct rbd_client
*rbdc
;
4814 if (!rbd_dev
->parent_spec
)
4817 * We need to pass a reference to the client and the parent
4818 * spec when creating the parent rbd_dev. Images related by
4819 * parent/child relationships always share both.
4821 parent_spec
= rbd_spec_get(rbd_dev
->parent_spec
);
4822 rbdc
= __rbd_get_client(rbd_dev
->rbd_client
);
4825 parent
= rbd_dev_create(rbdc
, parent_spec
);
4829 ret
= rbd_dev_image_probe(parent
, false);
4832 rbd_dev
->parent
= parent
;
4833 atomic_set(&rbd_dev
->parent_ref
, 1);
4838 rbd_dev_unparent(rbd_dev
);
4839 kfree(rbd_dev
->header_name
);
4840 rbd_dev_destroy(parent
);
4842 rbd_put_client(rbdc
);
4843 rbd_spec_put(parent_spec
);
4849 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
4853 /* generate unique id: find highest unique id, add one */
4854 rbd_dev_id_get(rbd_dev
);
4856 /* Fill in the device name, now that we have its id. */
4857 BUILD_BUG_ON(DEV_NAME_LEN
4858 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
4859 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
4861 /* Get our block major device number. */
4863 ret
= register_blkdev(0, rbd_dev
->name
);
4866 rbd_dev
->major
= ret
;
4868 /* Set up the blkdev mapping. */
4870 ret
= rbd_init_disk(rbd_dev
);
4872 goto err_out_blkdev
;
4874 ret
= rbd_dev_mapping_set(rbd_dev
);
4877 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
4879 ret
= rbd_bus_add_dev(rbd_dev
);
4881 goto err_out_mapping
;
4883 /* Everything's ready. Announce the disk to the world. */
4885 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
4886 add_disk(rbd_dev
->disk
);
4888 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
4889 (unsigned long long) rbd_dev
->mapping
.size
);
4894 rbd_dev_mapping_clear(rbd_dev
);
4896 rbd_free_disk(rbd_dev
);
4898 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
4900 rbd_dev_id_put(rbd_dev
);
4901 rbd_dev_mapping_clear(rbd_dev
);
4906 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
4908 struct rbd_spec
*spec
= rbd_dev
->spec
;
4911 /* Record the header object name for this rbd image. */
4913 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4915 if (rbd_dev
->image_format
== 1)
4916 size
= strlen(spec
->image_name
) + sizeof (RBD_SUFFIX
);
4918 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(spec
->image_id
);
4920 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
4921 if (!rbd_dev
->header_name
)
4924 if (rbd_dev
->image_format
== 1)
4925 sprintf(rbd_dev
->header_name
, "%s%s",
4926 spec
->image_name
, RBD_SUFFIX
);
4928 sprintf(rbd_dev
->header_name
, "%s%s",
4929 RBD_HEADER_PREFIX
, spec
->image_id
);
4933 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
4935 rbd_dev_unprobe(rbd_dev
);
4936 kfree(rbd_dev
->header_name
);
4937 rbd_dev
->header_name
= NULL
;
4938 rbd_dev
->image_format
= 0;
4939 kfree(rbd_dev
->spec
->image_id
);
4940 rbd_dev
->spec
->image_id
= NULL
;
4942 rbd_dev_destroy(rbd_dev
);
4946 * Probe for the existence of the header object for the given rbd
4947 * device. If this image is the one being mapped (i.e., not a
4948 * parent), initiate a watch on its header object before using that
4949 * object to get detailed information about the rbd image.
4951 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
)
4957 * Get the id from the image id object. Unless there's an
4958 * error, rbd_dev->spec->image_id will be filled in with
4959 * a dynamically-allocated string, and rbd_dev->image_format
4960 * will be set to either 1 or 2.
4962 ret
= rbd_dev_image_id(rbd_dev
);
4965 rbd_assert(rbd_dev
->spec
->image_id
);
4966 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4968 ret
= rbd_dev_header_name(rbd_dev
);
4970 goto err_out_format
;
4973 ret
= rbd_dev_header_watch_sync(rbd_dev
, true);
4975 goto out_header_name
;
4978 if (rbd_dev
->image_format
== 1)
4979 ret
= rbd_dev_v1_header_info(rbd_dev
);
4981 ret
= rbd_dev_v2_header_info(rbd_dev
);
4985 ret
= rbd_dev_spec_update(rbd_dev
);
4989 ret
= rbd_dev_probe_parent(rbd_dev
);
4993 dout("discovered format %u image, header name is %s\n",
4994 rbd_dev
->image_format
, rbd_dev
->header_name
);
4998 rbd_dev_unprobe(rbd_dev
);
5001 tmp
= rbd_dev_header_watch_sync(rbd_dev
, false);
5003 rbd_warn(rbd_dev
, "unable to tear down "
5004 "watch request (%d)\n", tmp
);
5007 kfree(rbd_dev
->header_name
);
5008 rbd_dev
->header_name
= NULL
;
5010 rbd_dev
->image_format
= 0;
5011 kfree(rbd_dev
->spec
->image_id
);
5012 rbd_dev
->spec
->image_id
= NULL
;
5014 dout("probe failed, returning %d\n", ret
);
5019 static ssize_t
rbd_add(struct bus_type
*bus
,
5023 struct rbd_device
*rbd_dev
= NULL
;
5024 struct ceph_options
*ceph_opts
= NULL
;
5025 struct rbd_options
*rbd_opts
= NULL
;
5026 struct rbd_spec
*spec
= NULL
;
5027 struct rbd_client
*rbdc
;
5028 struct ceph_osd_client
*osdc
;
5032 if (!try_module_get(THIS_MODULE
))
5035 /* parse add command */
5036 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
5038 goto err_out_module
;
5039 read_only
= rbd_opts
->read_only
;
5041 rbd_opts
= NULL
; /* done with this */
5043 rbdc
= rbd_get_client(ceph_opts
);
5050 osdc
= &rbdc
->client
->osdc
;
5051 rc
= ceph_pg_poolid_by_name(osdc
->osdmap
, spec
->pool_name
);
5053 goto err_out_client
;
5054 spec
->pool_id
= (u64
)rc
;
5056 /* The ceph file layout needs to fit pool id in 32 bits */
5058 if (spec
->pool_id
> (u64
)U32_MAX
) {
5059 rbd_warn(NULL
, "pool id too large (%llu > %u)\n",
5060 (unsigned long long)spec
->pool_id
, U32_MAX
);
5062 goto err_out_client
;
5065 rbd_dev
= rbd_dev_create(rbdc
, spec
);
5067 goto err_out_client
;
5068 rbdc
= NULL
; /* rbd_dev now owns this */
5069 spec
= NULL
; /* rbd_dev now owns this */
5071 rc
= rbd_dev_image_probe(rbd_dev
, true);
5073 goto err_out_rbd_dev
;
5075 /* If we are mapping a snapshot it must be marked read-only */
5077 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
5079 rbd_dev
->mapping
.read_only
= read_only
;
5081 rc
= rbd_dev_device_setup(rbd_dev
);
5083 rbd_dev_image_release(rbd_dev
);
5084 goto err_out_module
;
5090 rbd_dev_destroy(rbd_dev
);
5092 rbd_put_client(rbdc
);
5096 module_put(THIS_MODULE
);
5098 dout("Error adding device %s\n", buf
);
5103 static void rbd_dev_device_release(struct device
*dev
)
5105 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5107 rbd_free_disk(rbd_dev
);
5108 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5109 rbd_dev_mapping_clear(rbd_dev
);
5110 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5112 rbd_dev_id_put(rbd_dev
);
5113 rbd_dev_mapping_clear(rbd_dev
);
5116 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
5118 while (rbd_dev
->parent
) {
5119 struct rbd_device
*first
= rbd_dev
;
5120 struct rbd_device
*second
= first
->parent
;
5121 struct rbd_device
*third
;
5124 * Follow to the parent with no grandparent and
5127 while (second
&& (third
= second
->parent
)) {
5132 rbd_dev_image_release(second
);
5133 first
->parent
= NULL
;
5134 first
->parent_overlap
= 0;
5136 rbd_assert(first
->parent_spec
);
5137 rbd_spec_put(first
->parent_spec
);
5138 first
->parent_spec
= NULL
;
5142 static ssize_t
rbd_remove(struct bus_type
*bus
,
5146 struct rbd_device
*rbd_dev
= NULL
;
5147 struct list_head
*tmp
;
5150 bool already
= false;
5153 ret
= strict_strtoul(buf
, 10, &ul
);
5157 /* convert to int; abort if we lost anything in the conversion */
5162 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
5165 spin_lock(&rbd_dev_list_lock
);
5166 list_for_each(tmp
, &rbd_dev_list
) {
5167 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
5168 if (rbd_dev
->dev_id
== dev_id
) {
5174 spin_lock_irq(&rbd_dev
->lock
);
5175 if (rbd_dev
->open_count
)
5178 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
5180 spin_unlock_irq(&rbd_dev
->lock
);
5182 spin_unlock(&rbd_dev_list_lock
);
5183 if (ret
< 0 || already
)
5186 ret
= rbd_dev_header_watch_sync(rbd_dev
, false);
5188 rbd_warn(rbd_dev
, "failed to cancel watch event (%d)\n", ret
);
5191 * flush remaining watch callbacks - these must be complete
5192 * before the osd_client is shutdown
5194 dout("%s: flushing notifies", __func__
);
5195 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
5197 * Don't free anything from rbd_dev->disk until after all
5198 * notifies are completely processed. Otherwise
5199 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5200 * in a potential use after free of rbd_dev->disk or rbd_dev.
5202 rbd_bus_del_dev(rbd_dev
);
5203 rbd_dev_image_release(rbd_dev
);
5204 module_put(THIS_MODULE
);
5207 mutex_unlock(&ctl_mutex
);
5213 * create control files in sysfs
5216 static int rbd_sysfs_init(void)
5220 ret
= device_register(&rbd_root_dev
);
5224 ret
= bus_register(&rbd_bus_type
);
5226 device_unregister(&rbd_root_dev
);
5231 static void rbd_sysfs_cleanup(void)
5233 bus_unregister(&rbd_bus_type
);
5234 device_unregister(&rbd_root_dev
);
5237 static int rbd_slab_init(void)
5239 rbd_assert(!rbd_img_request_cache
);
5240 rbd_img_request_cache
= kmem_cache_create("rbd_img_request",
5241 sizeof (struct rbd_img_request
),
5242 __alignof__(struct rbd_img_request
),
5244 if (!rbd_img_request_cache
)
5247 rbd_assert(!rbd_obj_request_cache
);
5248 rbd_obj_request_cache
= kmem_cache_create("rbd_obj_request",
5249 sizeof (struct rbd_obj_request
),
5250 __alignof__(struct rbd_obj_request
),
5252 if (!rbd_obj_request_cache
)
5255 rbd_assert(!rbd_segment_name_cache
);
5256 rbd_segment_name_cache
= kmem_cache_create("rbd_segment_name",
5257 MAX_OBJ_NAME_SIZE
+ 1, 1, 0, NULL
);
5258 if (rbd_segment_name_cache
)
5261 if (rbd_obj_request_cache
) {
5262 kmem_cache_destroy(rbd_obj_request_cache
);
5263 rbd_obj_request_cache
= NULL
;
5266 kmem_cache_destroy(rbd_img_request_cache
);
5267 rbd_img_request_cache
= NULL
;
5272 static void rbd_slab_exit(void)
5274 rbd_assert(rbd_segment_name_cache
);
5275 kmem_cache_destroy(rbd_segment_name_cache
);
5276 rbd_segment_name_cache
= NULL
;
5278 rbd_assert(rbd_obj_request_cache
);
5279 kmem_cache_destroy(rbd_obj_request_cache
);
5280 rbd_obj_request_cache
= NULL
;
5282 rbd_assert(rbd_img_request_cache
);
5283 kmem_cache_destroy(rbd_img_request_cache
);
5284 rbd_img_request_cache
= NULL
;
5287 static int __init
rbd_init(void)
5291 if (!libceph_compatible(NULL
)) {
5292 rbd_warn(NULL
, "libceph incompatibility (quitting)");
5296 rc
= rbd_slab_init();
5299 rc
= rbd_sysfs_init();
5303 pr_info("loaded " RBD_DRV_NAME_LONG
"\n");
5308 static void __exit
rbd_exit(void)
5310 rbd_sysfs_cleanup();
5314 module_init(rbd_init
);
5315 module_exit(rbd_exit
);
5317 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5318 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5319 MODULE_DESCRIPTION("rados block device");
5321 /* following authorship retained from original osdblk.c */
5322 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5324 MODULE_LICENSE("GPL");