3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t
*v
)
68 counter
= (unsigned int)__atomic_add_unless(v
, 1, 0);
69 if (counter
<= (unsigned int)INT_MAX
)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t
*v
)
82 counter
= atomic_dec_return(v
);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header
{
136 /* These six fields never change for a given rbd image */
143 u64 features
; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context
*snapc
;
148 char *snap_names
; /* format 1 only */
149 u64
*snap_sizes
; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name
;
181 const char *image_id
;
182 const char *image_name
;
185 const char *snap_name
;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client
*client
;
196 struct list_head node
;
199 struct rbd_img_request
;
200 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request
;
205 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
207 enum obj_request_type
{
208 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE
, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA
, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN
, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS
, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request
{
219 const char *object_name
;
220 u64 offset
; /* object start byte */
221 u64 length
; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request
*obj_request
; /* STAT op */
243 struct rbd_img_request
*img_request
;
245 /* links for img_request->obj_requests list */
246 struct list_head links
;
249 u32 which
; /* posn image request list */
251 enum obj_request_type type
;
253 struct bio
*bio_list
;
259 struct page
**copyup_pages
;
260 u32 copyup_page_count
;
262 struct ceph_osd_request
*osd_req
;
264 u64 xferred
; /* bytes transferred */
267 rbd_obj_callback_t callback
;
268 struct completion completion
;
274 IMG_REQ_WRITE
, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request
{
280 struct rbd_device
*rbd_dev
;
281 u64 offset
; /* starting image byte offset */
282 u64 length
; /* byte count from offset */
285 u64 snap_id
; /* for reads */
286 struct ceph_snap_context
*snapc
; /* for writes */
289 struct request
*rq
; /* block request */
290 struct rbd_obj_request
*obj_request
; /* obj req initiator */
292 struct page
**copyup_pages
;
293 u32 copyup_page_count
;
294 spinlock_t completion_lock
;/* protects next_completion */
296 rbd_img_callback_t callback
;
297 u64 xferred
;/* aggregate bytes transferred */
298 int result
; /* first nonzero obj_request result */
300 u32 obj_request_count
;
301 struct list_head obj_requests
; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id
; /* blkdev unique id */
325 int major
; /* blkdev assigned major */
326 struct gendisk
*disk
; /* blkdev's gendisk and rq */
328 u32 image_format
; /* Either 1 or 2 */
329 struct rbd_client
*rbd_client
;
331 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock
; /* queue, flags, open_count */
335 struct rbd_image_header header
;
336 unsigned long flags
; /* possibly lock protected */
337 struct rbd_spec
*spec
;
341 struct ceph_file_layout layout
;
343 struct ceph_osd_event
*watch_event
;
344 struct rbd_obj_request
*watch_request
;
346 struct rbd_spec
*parent_spec
;
349 struct rbd_device
*parent
;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem
;
354 struct rbd_mapping mapping
;
356 struct list_head node
;
360 unsigned long open_count
; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex
); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list
); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
380 static LIST_HEAD(rbd_client_list
); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock
);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache
*rbd_img_request_cache
;
386 static struct kmem_cache
*rbd_obj_request_cache
;
387 static struct kmem_cache
*rbd_segment_name_cache
;
389 static int rbd_img_request_submit(struct rbd_img_request
*img_request
);
391 static void rbd_dev_device_release(struct device
*dev
);
393 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
395 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
397 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
);
398 static void rbd_spec_put(struct rbd_spec
*spec
);
400 static struct bus_attribute rbd_bus_attrs
[] = {
401 __ATTR(add
, S_IWUSR
, NULL
, rbd_add
),
402 __ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
),
406 static struct bus_type rbd_bus_type
= {
408 .bus_attrs
= rbd_bus_attrs
,
411 static void rbd_root_dev_release(struct device
*dev
)
415 static struct device rbd_root_dev
= {
417 .release
= rbd_root_dev_release
,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
423 struct va_format vaf
;
431 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
432 else if (rbd_dev
->disk
)
433 printk(KERN_WARNING
"%s: %s: %pV\n",
434 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
435 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
436 printk(KERN_WARNING
"%s: image %s: %pV\n",
437 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
438 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
439 printk(KERN_WARNING
"%s: id %s: %pV\n",
440 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
442 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME
, rbd_dev
, &vaf
);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
);
461 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
);
462 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
464 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
465 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
466 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
469 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
470 u8
*order
, u64
*snap_size
);
471 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
473 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
);
475 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
477 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
478 bool removing
= false;
480 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
483 spin_lock_irq(&rbd_dev
->lock
);
484 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
487 rbd_dev
->open_count
++;
488 spin_unlock_irq(&rbd_dev
->lock
);
492 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
493 (void) get_device(&rbd_dev
->dev
);
494 set_device_ro(bdev
, rbd_dev
->mapping
.read_only
);
495 mutex_unlock(&ctl_mutex
);
500 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
502 struct rbd_device
*rbd_dev
= disk
->private_data
;
503 unsigned long open_count_before
;
505 spin_lock_irq(&rbd_dev
->lock
);
506 open_count_before
= rbd_dev
->open_count
--;
507 spin_unlock_irq(&rbd_dev
->lock
);
508 rbd_assert(open_count_before
> 0);
510 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
511 put_device(&rbd_dev
->dev
);
512 mutex_unlock(&ctl_mutex
);
515 static const struct block_device_operations rbd_bd_ops
= {
516 .owner
= THIS_MODULE
,
518 .release
= rbd_release
,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts.
525 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
527 struct rbd_client
*rbdc
;
530 dout("%s:\n", __func__
);
531 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
535 kref_init(&rbdc
->kref
);
536 INIT_LIST_HEAD(&rbdc
->node
);
538 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
540 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
541 if (IS_ERR(rbdc
->client
))
543 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
545 ret
= ceph_open_session(rbdc
->client
);
549 spin_lock(&rbd_client_list_lock
);
550 list_add_tail(&rbdc
->node
, &rbd_client_list
);
551 spin_unlock(&rbd_client_list_lock
);
553 mutex_unlock(&ctl_mutex
);
554 dout("%s: rbdc %p\n", __func__
, rbdc
);
559 ceph_destroy_client(rbdc
->client
);
561 mutex_unlock(&ctl_mutex
);
565 ceph_destroy_options(ceph_opts
);
566 dout("%s: error %d\n", __func__
, ret
);
571 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
573 kref_get(&rbdc
->kref
);
579 * Find a ceph client with specific addr and configuration. If
580 * found, bump its reference count.
582 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
584 struct rbd_client
*client_node
;
587 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
590 spin_lock(&rbd_client_list_lock
);
591 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
592 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
593 __rbd_get_client(client_node
);
599 spin_unlock(&rbd_client_list_lock
);
601 return found
? client_node
: NULL
;
611 /* string args above */
614 /* Boolean args above */
618 static match_table_t rbd_opts_tokens
= {
620 /* string args above */
621 {Opt_read_only
, "read_only"},
622 {Opt_read_only
, "ro"}, /* Alternate spelling */
623 {Opt_read_write
, "read_write"},
624 {Opt_read_write
, "rw"}, /* Alternate spelling */
625 /* Boolean args above */
633 #define RBD_READ_ONLY_DEFAULT false
635 static int parse_rbd_opts_token(char *c
, void *private)
637 struct rbd_options
*rbd_opts
= private;
638 substring_t argstr
[MAX_OPT_ARGS
];
639 int token
, intval
, ret
;
641 token
= match_token(c
, rbd_opts_tokens
, argstr
);
645 if (token
< Opt_last_int
) {
646 ret
= match_int(&argstr
[0], &intval
);
648 pr_err("bad mount option arg (not int) "
652 dout("got int token %d val %d\n", token
, intval
);
653 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
654 dout("got string token %d val %s\n", token
,
656 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
657 dout("got Boolean token %d\n", token
);
659 dout("got token %d\n", token
);
664 rbd_opts
->read_only
= true;
667 rbd_opts
->read_only
= false;
677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. Either way, ceph_opts is consumed by this
681 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
683 struct rbd_client
*rbdc
;
685 rbdc
= rbd_client_find(ceph_opts
);
686 if (rbdc
) /* using an existing client */
687 ceph_destroy_options(ceph_opts
);
689 rbdc
= rbd_client_create(ceph_opts
);
695 * Destroy ceph client
697 * Caller must hold rbd_client_list_lock.
699 static void rbd_client_release(struct kref
*kref
)
701 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
703 dout("%s: rbdc %p\n", __func__
, rbdc
);
704 spin_lock(&rbd_client_list_lock
);
705 list_del(&rbdc
->node
);
706 spin_unlock(&rbd_client_list_lock
);
708 ceph_destroy_client(rbdc
->client
);
713 * Drop reference to ceph client node. If it's not referenced anymore, release
716 static void rbd_put_client(struct rbd_client
*rbdc
)
719 kref_put(&rbdc
->kref
, rbd_client_release
);
722 static bool rbd_image_format_valid(u32 image_format
)
724 return image_format
== 1 || image_format
== 2;
727 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
732 /* The header has to start with the magic rbd header text */
733 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
736 /* The bio layer requires at least sector-sized I/O */
738 if (ondisk
->options
.order
< SECTOR_SHIFT
)
741 /* If we use u64 in a few spots we may be able to loosen this */
743 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
747 * The size of a snapshot header has to fit in a size_t, and
748 * that limits the number of snapshots.
750 snap_count
= le32_to_cpu(ondisk
->snap_count
);
751 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
752 if (snap_count
> size
/ sizeof (__le64
))
756 * Not only that, but the size of the entire the snapshot
757 * header must also be representable in a size_t.
759 size
-= snap_count
* sizeof (__le64
);
760 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
767 * Fill an rbd image header with information from the given format 1
770 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
771 struct rbd_image_header_ondisk
*ondisk
)
773 struct rbd_image_header
*header
= &rbd_dev
->header
;
774 bool first_time
= header
->object_prefix
== NULL
;
775 struct ceph_snap_context
*snapc
;
776 char *object_prefix
= NULL
;
777 char *snap_names
= NULL
;
778 u64
*snap_sizes
= NULL
;
784 /* Allocate this now to avoid having to handle failure below */
789 len
= strnlen(ondisk
->object_prefix
,
790 sizeof (ondisk
->object_prefix
));
791 object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
794 memcpy(object_prefix
, ondisk
->object_prefix
, len
);
795 object_prefix
[len
] = '\0';
798 /* Allocate the snapshot context and fill it in */
800 snap_count
= le32_to_cpu(ondisk
->snap_count
);
801 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
804 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
806 struct rbd_image_snap_ondisk
*snaps
;
807 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
809 /* We'll keep a copy of the snapshot names... */
811 if (snap_names_len
> (u64
)SIZE_MAX
)
813 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
817 /* ...as well as the array of their sizes. */
819 size
= snap_count
* sizeof (*header
->snap_sizes
);
820 snap_sizes
= kmalloc(size
, GFP_KERNEL
);
825 * Copy the names, and fill in each snapshot's id
828 * Note that rbd_dev_v1_header_info() guarantees the
829 * ondisk buffer we're working with has
830 * snap_names_len bytes beyond the end of the
831 * snapshot id array, this memcpy() is safe.
833 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
834 snaps
= ondisk
->snaps
;
835 for (i
= 0; i
< snap_count
; i
++) {
836 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
837 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
841 /* We won't fail any more, fill in the header */
843 down_write(&rbd_dev
->header_rwsem
);
845 header
->object_prefix
= object_prefix
;
846 header
->obj_order
= ondisk
->options
.order
;
847 header
->crypt_type
= ondisk
->options
.crypt_type
;
848 header
->comp_type
= ondisk
->options
.comp_type
;
849 /* The rest aren't used for format 1 images */
850 header
->stripe_unit
= 0;
851 header
->stripe_count
= 0;
852 header
->features
= 0;
854 ceph_put_snap_context(header
->snapc
);
855 kfree(header
->snap_names
);
856 kfree(header
->snap_sizes
);
859 /* The remaining fields always get updated (when we refresh) */
861 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
862 header
->snapc
= snapc
;
863 header
->snap_names
= snap_names
;
864 header
->snap_sizes
= snap_sizes
;
866 /* Make sure mapping size is consistent with header info */
868 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
|| first_time
)
869 if (rbd_dev
->mapping
.size
!= header
->image_size
)
870 rbd_dev
->mapping
.size
= header
->image_size
;
872 up_write(&rbd_dev
->header_rwsem
);
880 ceph_put_snap_context(snapc
);
881 kfree(object_prefix
);
886 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
888 const char *snap_name
;
890 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
892 /* Skip over names until we find the one we are looking for */
894 snap_name
= rbd_dev
->header
.snap_names
;
896 snap_name
+= strlen(snap_name
) + 1;
898 return kstrdup(snap_name
, GFP_KERNEL
);
902 * Snapshot id comparison function for use with qsort()/bsearch().
903 * Note that result is for snapshots in *descending* order.
905 static int snapid_compare_reverse(const void *s1
, const void *s2
)
907 u64 snap_id1
= *(u64
*)s1
;
908 u64 snap_id2
= *(u64
*)s2
;
910 if (snap_id1
< snap_id2
)
912 return snap_id1
== snap_id2
? 0 : -1;
916 * Search a snapshot context to see if the given snapshot id is
919 * Returns the position of the snapshot id in the array if it's found,
920 * or BAD_SNAP_INDEX otherwise.
922 * Note: The snapshot array is in kept sorted (by the osd) in
923 * reverse order, highest snapshot id first.
925 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
927 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
930 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
931 sizeof (snap_id
), snapid_compare_reverse
);
933 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
936 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
940 const char *snap_name
;
942 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
943 if (which
== BAD_SNAP_INDEX
)
944 return ERR_PTR(-ENOENT
);
946 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
947 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
950 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
952 if (snap_id
== CEPH_NOSNAP
)
953 return RBD_SNAP_HEAD_NAME
;
955 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
956 if (rbd_dev
->image_format
== 1)
957 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
959 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
962 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
965 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
966 if (snap_id
== CEPH_NOSNAP
) {
967 *snap_size
= rbd_dev
->header
.image_size
;
968 } else if (rbd_dev
->image_format
== 1) {
971 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
972 if (which
== BAD_SNAP_INDEX
)
975 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
980 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
989 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
992 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
993 if (snap_id
== CEPH_NOSNAP
) {
994 *snap_features
= rbd_dev
->header
.features
;
995 } else if (rbd_dev
->image_format
== 1) {
996 *snap_features
= 0; /* No features for format 1 */
1001 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1005 *snap_features
= features
;
1010 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1012 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1017 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1020 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1024 rbd_dev
->mapping
.size
= size
;
1025 rbd_dev
->mapping
.features
= features
;
1030 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1032 rbd_dev
->mapping
.size
= 0;
1033 rbd_dev
->mapping
.features
= 0;
1036 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
1043 name
= kmem_cache_alloc(rbd_segment_name_cache
, GFP_NOIO
);
1046 segment
= offset
>> rbd_dev
->header
.obj_order
;
1047 name_format
= "%s.%012llx";
1048 if (rbd_dev
->image_format
== 2)
1049 name_format
= "%s.%016llx";
1050 ret
= snprintf(name
, MAX_OBJ_NAME_SIZE
+ 1, name_format
,
1051 rbd_dev
->header
.object_prefix
, segment
);
1052 if (ret
< 0 || ret
> MAX_OBJ_NAME_SIZE
) {
1053 pr_err("error formatting segment name for #%llu (%d)\n",
1062 static void rbd_segment_name_free(const char *name
)
1064 /* The explicit cast here is needed to drop the const qualifier */
1066 kmem_cache_free(rbd_segment_name_cache
, (void *)name
);
1069 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
1071 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1073 return offset
& (segment_size
- 1);
1076 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
1077 u64 offset
, u64 length
)
1079 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1081 offset
&= segment_size
- 1;
1083 rbd_assert(length
<= U64_MAX
- offset
);
1084 if (offset
+ length
> segment_size
)
1085 length
= segment_size
- offset
;
1091 * returns the size of an object in the image
1093 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
1095 return 1 << header
->obj_order
;
1102 static void bio_chain_put(struct bio
*chain
)
1108 chain
= chain
->bi_next
;
1114 * zeros a bio chain, starting at specific offset
1116 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
1119 unsigned long flags
;
1125 bio_for_each_segment(bv
, chain
, i
) {
1126 if (pos
+ bv
->bv_len
> start_ofs
) {
1127 int remainder
= max(start_ofs
- pos
, 0);
1128 buf
= bvec_kmap_irq(bv
, &flags
);
1129 memset(buf
+ remainder
, 0,
1130 bv
->bv_len
- remainder
);
1131 flush_dcache_page(bv
->bv_page
);
1132 bvec_kunmap_irq(buf
, &flags
);
1137 chain
= chain
->bi_next
;
1142 * similar to zero_bio_chain(), zeros data defined by a page array,
1143 * starting at the given byte offset from the start of the array and
1144 * continuing up to the given end offset. The pages array is
1145 * assumed to be big enough to hold all bytes up to the end.
1147 static void zero_pages(struct page
**pages
, u64 offset
, u64 end
)
1149 struct page
**page
= &pages
[offset
>> PAGE_SHIFT
];
1151 rbd_assert(end
> offset
);
1152 rbd_assert(end
- offset
<= (u64
)SIZE_MAX
);
1153 while (offset
< end
) {
1156 unsigned long flags
;
1159 page_offset
= (size_t)(offset
& ~PAGE_MASK
);
1160 length
= min(PAGE_SIZE
- page_offset
, (size_t)(end
- offset
));
1161 local_irq_save(flags
);
1162 kaddr
= kmap_atomic(*page
);
1163 memset(kaddr
+ page_offset
, 0, length
);
1164 flush_dcache_page(*page
);
1165 kunmap_atomic(kaddr
);
1166 local_irq_restore(flags
);
1174 * Clone a portion of a bio, starting at the given byte offset
1175 * and continuing for the number of bytes indicated.
1177 static struct bio
*bio_clone_range(struct bio
*bio_src
,
1178 unsigned int offset
,
1186 unsigned short end_idx
;
1187 unsigned short vcnt
;
1190 /* Handle the easy case for the caller */
1192 if (!offset
&& len
== bio_src
->bi_size
)
1193 return bio_clone(bio_src
, gfpmask
);
1195 if (WARN_ON_ONCE(!len
))
1197 if (WARN_ON_ONCE(len
> bio_src
->bi_size
))
1199 if (WARN_ON_ONCE(offset
> bio_src
->bi_size
- len
))
1202 /* Find first affected segment... */
1205 bio_for_each_segment(bv
, bio_src
, idx
) {
1206 if (resid
< bv
->bv_len
)
1208 resid
-= bv
->bv_len
;
1212 /* ...and the last affected segment */
1215 __bio_for_each_segment(bv
, bio_src
, end_idx
, idx
) {
1216 if (resid
<= bv
->bv_len
)
1218 resid
-= bv
->bv_len
;
1220 vcnt
= end_idx
- idx
+ 1;
1222 /* Build the clone */
1224 bio
= bio_alloc(gfpmask
, (unsigned int) vcnt
);
1226 return NULL
; /* ENOMEM */
1228 bio
->bi_bdev
= bio_src
->bi_bdev
;
1229 bio
->bi_sector
= bio_src
->bi_sector
+ (offset
>> SECTOR_SHIFT
);
1230 bio
->bi_rw
= bio_src
->bi_rw
;
1231 bio
->bi_flags
|= 1 << BIO_CLONED
;
1234 * Copy over our part of the bio_vec, then update the first
1235 * and last (or only) entries.
1237 memcpy(&bio
->bi_io_vec
[0], &bio_src
->bi_io_vec
[idx
],
1238 vcnt
* sizeof (struct bio_vec
));
1239 bio
->bi_io_vec
[0].bv_offset
+= voff
;
1241 bio
->bi_io_vec
[0].bv_len
-= voff
;
1242 bio
->bi_io_vec
[vcnt
- 1].bv_len
= resid
;
1244 bio
->bi_io_vec
[0].bv_len
= len
;
1247 bio
->bi_vcnt
= vcnt
;
1255 * Clone a portion of a bio chain, starting at the given byte offset
1256 * into the first bio in the source chain and continuing for the
1257 * number of bytes indicated. The result is another bio chain of
1258 * exactly the given length, or a null pointer on error.
1260 * The bio_src and offset parameters are both in-out. On entry they
1261 * refer to the first source bio and the offset into that bio where
1262 * the start of data to be cloned is located.
1264 * On return, bio_src is updated to refer to the bio in the source
1265 * chain that contains first un-cloned byte, and *offset will
1266 * contain the offset of that byte within that bio.
1268 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1269 unsigned int *offset
,
1273 struct bio
*bi
= *bio_src
;
1274 unsigned int off
= *offset
;
1275 struct bio
*chain
= NULL
;
1278 /* Build up a chain of clone bios up to the limit */
1280 if (!bi
|| off
>= bi
->bi_size
|| !len
)
1281 return NULL
; /* Nothing to clone */
1285 unsigned int bi_size
;
1289 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1290 goto out_err
; /* EINVAL; ran out of bio's */
1292 bi_size
= min_t(unsigned int, bi
->bi_size
- off
, len
);
1293 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1295 goto out_err
; /* ENOMEM */
1298 end
= &bio
->bi_next
;
1301 if (off
== bi
->bi_size
) {
1312 bio_chain_put(chain
);
1318 * The default/initial value for all object request flags is 0. For
1319 * each flag, once its value is set to 1 it is never reset to 0
1322 static void obj_request_img_data_set(struct rbd_obj_request
*obj_request
)
1324 if (test_and_set_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
)) {
1325 struct rbd_device
*rbd_dev
;
1327 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1328 rbd_warn(rbd_dev
, "obj_request %p already marked img_data\n",
1333 static bool obj_request_img_data_test(struct rbd_obj_request
*obj_request
)
1336 return test_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
) != 0;
1339 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1341 if (test_and_set_bit(OBJ_REQ_DONE
, &obj_request
->flags
)) {
1342 struct rbd_device
*rbd_dev
= NULL
;
1344 if (obj_request_img_data_test(obj_request
))
1345 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1346 rbd_warn(rbd_dev
, "obj_request %p already marked done\n",
1351 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1354 return test_bit(OBJ_REQ_DONE
, &obj_request
->flags
) != 0;
1358 * This sets the KNOWN flag after (possibly) setting the EXISTS
1359 * flag. The latter is set based on the "exists" value provided.
1361 * Note that for our purposes once an object exists it never goes
1362 * away again. It's possible that the response from two existence
1363 * checks are separated by the creation of the target object, and
1364 * the first ("doesn't exist") response arrives *after* the second
1365 * ("does exist"). In that case we ignore the second one.
1367 static void obj_request_existence_set(struct rbd_obj_request
*obj_request
,
1371 set_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
);
1372 set_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
);
1376 static bool obj_request_known_test(struct rbd_obj_request
*obj_request
)
1379 return test_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
) != 0;
1382 static bool obj_request_exists_test(struct rbd_obj_request
*obj_request
)
1385 return test_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
) != 0;
1388 static bool obj_request_overlaps_parent(struct rbd_obj_request
*obj_request
)
1390 struct rbd_device
*rbd_dev
= obj_request
->img_request
->rbd_dev
;
1392 return obj_request
->img_offset
<
1393 round_up(rbd_dev
->parent_overlap
, rbd_obj_bytes(&rbd_dev
->header
));
1396 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1398 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1399 atomic_read(&obj_request
->kref
.refcount
));
1400 kref_get(&obj_request
->kref
);
1403 static void rbd_obj_request_destroy(struct kref
*kref
);
1404 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1406 rbd_assert(obj_request
!= NULL
);
1407 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1408 atomic_read(&obj_request
->kref
.refcount
));
1409 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1412 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1414 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1415 atomic_read(&img_request
->kref
.refcount
));
1416 kref_get(&img_request
->kref
);
1419 static bool img_request_child_test(struct rbd_img_request
*img_request
);
1420 static void rbd_parent_request_destroy(struct kref
*kref
);
1421 static void rbd_img_request_destroy(struct kref
*kref
);
1422 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1424 rbd_assert(img_request
!= NULL
);
1425 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1426 atomic_read(&img_request
->kref
.refcount
));
1427 if (img_request_child_test(img_request
))
1428 kref_put(&img_request
->kref
, rbd_parent_request_destroy
);
1430 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1433 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1434 struct rbd_obj_request
*obj_request
)
1436 rbd_assert(obj_request
->img_request
== NULL
);
1438 /* Image request now owns object's original reference */
1439 obj_request
->img_request
= img_request
;
1440 obj_request
->which
= img_request
->obj_request_count
;
1441 rbd_assert(!obj_request_img_data_test(obj_request
));
1442 obj_request_img_data_set(obj_request
);
1443 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1444 img_request
->obj_request_count
++;
1445 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1446 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1447 obj_request
->which
);
1450 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1451 struct rbd_obj_request
*obj_request
)
1453 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1455 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1456 obj_request
->which
);
1457 list_del(&obj_request
->links
);
1458 rbd_assert(img_request
->obj_request_count
> 0);
1459 img_request
->obj_request_count
--;
1460 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1461 obj_request
->which
= BAD_WHICH
;
1462 rbd_assert(obj_request_img_data_test(obj_request
));
1463 rbd_assert(obj_request
->img_request
== img_request
);
1464 obj_request
->img_request
= NULL
;
1465 obj_request
->callback
= NULL
;
1466 rbd_obj_request_put(obj_request
);
1469 static bool obj_request_type_valid(enum obj_request_type type
)
1472 case OBJ_REQUEST_NODATA
:
1473 case OBJ_REQUEST_BIO
:
1474 case OBJ_REQUEST_PAGES
:
1481 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1482 struct rbd_obj_request
*obj_request
)
1484 dout("%s: osdc %p obj %p\n", __func__
, osdc
, obj_request
);
1486 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1489 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1492 dout("%s: img %p\n", __func__
, img_request
);
1495 * If no error occurred, compute the aggregate transfer
1496 * count for the image request. We could instead use
1497 * atomic64_cmpxchg() to update it as each object request
1498 * completes; not clear which way is better off hand.
1500 if (!img_request
->result
) {
1501 struct rbd_obj_request
*obj_request
;
1504 for_each_obj_request(img_request
, obj_request
)
1505 xferred
+= obj_request
->xferred
;
1506 img_request
->xferred
= xferred
;
1509 if (img_request
->callback
)
1510 img_request
->callback(img_request
);
1512 rbd_img_request_put(img_request
);
1515 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1517 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1519 dout("%s: obj %p\n", __func__
, obj_request
);
1521 return wait_for_completion_interruptible(&obj_request
->completion
);
1525 * The default/initial value for all image request flags is 0. Each
1526 * is conditionally set to 1 at image request initialization time
1527 * and currently never change thereafter.
1529 static void img_request_write_set(struct rbd_img_request
*img_request
)
1531 set_bit(IMG_REQ_WRITE
, &img_request
->flags
);
1535 static bool img_request_write_test(struct rbd_img_request
*img_request
)
1538 return test_bit(IMG_REQ_WRITE
, &img_request
->flags
) != 0;
1541 static void img_request_child_set(struct rbd_img_request
*img_request
)
1543 set_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1547 static void img_request_child_clear(struct rbd_img_request
*img_request
)
1549 clear_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1553 static bool img_request_child_test(struct rbd_img_request
*img_request
)
1556 return test_bit(IMG_REQ_CHILD
, &img_request
->flags
) != 0;
1559 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1561 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1565 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1567 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1571 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1574 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1578 rbd_img_obj_request_read_callback(struct rbd_obj_request
*obj_request
)
1580 u64 xferred
= obj_request
->xferred
;
1581 u64 length
= obj_request
->length
;
1583 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1584 obj_request
, obj_request
->img_request
, obj_request
->result
,
1587 * ENOENT means a hole in the image. We zero-fill the entire
1588 * length of the request. A short read also implies zero-fill
1589 * to the end of the request. An error requires the whole
1590 * length of the request to be reported finished with an error
1591 * to the block layer. In each case we update the xferred
1592 * count to indicate the whole request was satisfied.
1594 rbd_assert(obj_request
->type
!= OBJ_REQUEST_NODATA
);
1595 if (obj_request
->result
== -ENOENT
) {
1596 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1597 zero_bio_chain(obj_request
->bio_list
, 0);
1599 zero_pages(obj_request
->pages
, 0, length
);
1600 obj_request
->result
= 0;
1601 } else if (xferred
< length
&& !obj_request
->result
) {
1602 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1603 zero_bio_chain(obj_request
->bio_list
, xferred
);
1605 zero_pages(obj_request
->pages
, xferred
, length
);
1607 obj_request
->xferred
= length
;
1608 obj_request_done_set(obj_request
);
1611 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1613 dout("%s: obj %p cb %p\n", __func__
, obj_request
,
1614 obj_request
->callback
);
1615 if (obj_request
->callback
)
1616 obj_request
->callback(obj_request
);
1618 complete_all(&obj_request
->completion
);
1621 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
)
1623 dout("%s: obj %p\n", __func__
, obj_request
);
1624 obj_request_done_set(obj_request
);
1627 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
)
1629 struct rbd_img_request
*img_request
= NULL
;
1630 struct rbd_device
*rbd_dev
= NULL
;
1631 bool layered
= false;
1633 if (obj_request_img_data_test(obj_request
)) {
1634 img_request
= obj_request
->img_request
;
1635 layered
= img_request
&& img_request_layered_test(img_request
);
1636 rbd_dev
= img_request
->rbd_dev
;
1639 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1640 obj_request
, img_request
, obj_request
->result
,
1641 obj_request
->xferred
, obj_request
->length
);
1642 if (layered
&& obj_request
->result
== -ENOENT
&&
1643 obj_request
->img_offset
< rbd_dev
->parent_overlap
)
1644 rbd_img_parent_read(obj_request
);
1645 else if (img_request
)
1646 rbd_img_obj_request_read_callback(obj_request
);
1648 obj_request_done_set(obj_request
);
1651 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
)
1653 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1654 obj_request
->result
, obj_request
->length
);
1656 * There is no such thing as a successful short write. Set
1657 * it to our originally-requested length.
1659 obj_request
->xferred
= obj_request
->length
;
1660 obj_request_done_set(obj_request
);
1664 * For a simple stat call there's nothing to do. We'll do more if
1665 * this is part of a write sequence for a layered image.
1667 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
)
1669 dout("%s: obj %p\n", __func__
, obj_request
);
1670 obj_request_done_set(obj_request
);
1673 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1674 struct ceph_msg
*msg
)
1676 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1679 dout("%s: osd_req %p msg %p\n", __func__
, osd_req
, msg
);
1680 rbd_assert(osd_req
== obj_request
->osd_req
);
1681 if (obj_request_img_data_test(obj_request
)) {
1682 rbd_assert(obj_request
->img_request
);
1683 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1685 rbd_assert(obj_request
->which
== BAD_WHICH
);
1688 if (osd_req
->r_result
< 0)
1689 obj_request
->result
= osd_req
->r_result
;
1691 BUG_ON(osd_req
->r_num_ops
> 2);
1694 * We support a 64-bit length, but ultimately it has to be
1695 * passed to blk_end_request(), which takes an unsigned int.
1697 obj_request
->xferred
= osd_req
->r_reply_op_len
[0];
1698 rbd_assert(obj_request
->xferred
< (u64
)UINT_MAX
);
1699 opcode
= osd_req
->r_ops
[0].op
;
1701 case CEPH_OSD_OP_READ
:
1702 rbd_osd_read_callback(obj_request
);
1704 case CEPH_OSD_OP_WRITE
:
1705 rbd_osd_write_callback(obj_request
);
1707 case CEPH_OSD_OP_STAT
:
1708 rbd_osd_stat_callback(obj_request
);
1710 case CEPH_OSD_OP_CALL
:
1711 case CEPH_OSD_OP_NOTIFY_ACK
:
1712 case CEPH_OSD_OP_WATCH
:
1713 rbd_osd_trivial_callback(obj_request
);
1716 rbd_warn(NULL
, "%s: unsupported op %hu\n",
1717 obj_request
->object_name
, (unsigned short) opcode
);
1721 if (obj_request_done_test(obj_request
))
1722 rbd_obj_request_complete(obj_request
);
1725 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1727 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1728 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1731 rbd_assert(osd_req
!= NULL
);
1733 snap_id
= img_request
? img_request
->snap_id
: CEPH_NOSNAP
;
1734 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1735 NULL
, snap_id
, NULL
);
1738 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1740 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1741 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1742 struct ceph_snap_context
*snapc
;
1743 struct timespec mtime
= CURRENT_TIME
;
1745 rbd_assert(osd_req
!= NULL
);
1747 snapc
= img_request
? img_request
->snapc
: NULL
;
1748 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1749 snapc
, CEPH_NOSNAP
, &mtime
);
1752 static struct ceph_osd_request
*rbd_osd_req_create(
1753 struct rbd_device
*rbd_dev
,
1755 struct rbd_obj_request
*obj_request
)
1757 struct ceph_snap_context
*snapc
= NULL
;
1758 struct ceph_osd_client
*osdc
;
1759 struct ceph_osd_request
*osd_req
;
1761 if (obj_request_img_data_test(obj_request
)) {
1762 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1764 rbd_assert(write_request
==
1765 img_request_write_test(img_request
));
1767 snapc
= img_request
->snapc
;
1770 /* Allocate and initialize the request, for the single op */
1772 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1773 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 1, false, GFP_ATOMIC
);
1775 return NULL
; /* ENOMEM */
1778 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1780 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1782 osd_req
->r_callback
= rbd_osd_req_callback
;
1783 osd_req
->r_priv
= obj_request
;
1785 osd_req
->r_oid_len
= strlen(obj_request
->object_name
);
1786 rbd_assert(osd_req
->r_oid_len
< sizeof (osd_req
->r_oid
));
1787 memcpy(osd_req
->r_oid
, obj_request
->object_name
, osd_req
->r_oid_len
);
1789 osd_req
->r_file_layout
= rbd_dev
->layout
; /* struct */
1795 * Create a copyup osd request based on the information in the
1796 * object request supplied. A copyup request has two osd ops,
1797 * a copyup method call, and a "normal" write request.
1799 static struct ceph_osd_request
*
1800 rbd_osd_req_create_copyup(struct rbd_obj_request
*obj_request
)
1802 struct rbd_img_request
*img_request
;
1803 struct ceph_snap_context
*snapc
;
1804 struct rbd_device
*rbd_dev
;
1805 struct ceph_osd_client
*osdc
;
1806 struct ceph_osd_request
*osd_req
;
1808 rbd_assert(obj_request_img_data_test(obj_request
));
1809 img_request
= obj_request
->img_request
;
1810 rbd_assert(img_request
);
1811 rbd_assert(img_request_write_test(img_request
));
1813 /* Allocate and initialize the request, for the two ops */
1815 snapc
= img_request
->snapc
;
1816 rbd_dev
= img_request
->rbd_dev
;
1817 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1818 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 2, false, GFP_ATOMIC
);
1820 return NULL
; /* ENOMEM */
1822 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1823 osd_req
->r_callback
= rbd_osd_req_callback
;
1824 osd_req
->r_priv
= obj_request
;
1826 osd_req
->r_oid_len
= strlen(obj_request
->object_name
);
1827 rbd_assert(osd_req
->r_oid_len
< sizeof (osd_req
->r_oid
));
1828 memcpy(osd_req
->r_oid
, obj_request
->object_name
, osd_req
->r_oid_len
);
1830 osd_req
->r_file_layout
= rbd_dev
->layout
; /* struct */
1836 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1838 ceph_osdc_put_request(osd_req
);
1841 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1843 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
1844 u64 offset
, u64 length
,
1845 enum obj_request_type type
)
1847 struct rbd_obj_request
*obj_request
;
1851 rbd_assert(obj_request_type_valid(type
));
1853 size
= strlen(object_name
) + 1;
1854 name
= kmalloc(size
, GFP_NOIO
);
1858 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_NOIO
);
1864 obj_request
->object_name
= memcpy(name
, object_name
, size
);
1865 obj_request
->offset
= offset
;
1866 obj_request
->length
= length
;
1867 obj_request
->flags
= 0;
1868 obj_request
->which
= BAD_WHICH
;
1869 obj_request
->type
= type
;
1870 INIT_LIST_HEAD(&obj_request
->links
);
1871 init_completion(&obj_request
->completion
);
1872 kref_init(&obj_request
->kref
);
1874 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__
, object_name
,
1875 offset
, length
, (int)type
, obj_request
);
1880 static void rbd_obj_request_destroy(struct kref
*kref
)
1882 struct rbd_obj_request
*obj_request
;
1884 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1886 dout("%s: obj %p\n", __func__
, obj_request
);
1888 rbd_assert(obj_request
->img_request
== NULL
);
1889 rbd_assert(obj_request
->which
== BAD_WHICH
);
1891 if (obj_request
->osd_req
)
1892 rbd_osd_req_destroy(obj_request
->osd_req
);
1894 rbd_assert(obj_request_type_valid(obj_request
->type
));
1895 switch (obj_request
->type
) {
1896 case OBJ_REQUEST_NODATA
:
1897 break; /* Nothing to do */
1898 case OBJ_REQUEST_BIO
:
1899 if (obj_request
->bio_list
)
1900 bio_chain_put(obj_request
->bio_list
);
1902 case OBJ_REQUEST_PAGES
:
1903 if (obj_request
->pages
)
1904 ceph_release_page_vector(obj_request
->pages
,
1905 obj_request
->page_count
);
1909 kfree(obj_request
->object_name
);
1910 obj_request
->object_name
= NULL
;
1911 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1914 /* It's OK to call this for a device with no parent */
1916 static void rbd_spec_put(struct rbd_spec
*spec
);
1917 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1919 rbd_dev_remove_parent(rbd_dev
);
1920 rbd_spec_put(rbd_dev
->parent_spec
);
1921 rbd_dev
->parent_spec
= NULL
;
1922 rbd_dev
->parent_overlap
= 0;
1926 * Parent image reference counting is used to determine when an
1927 * image's parent fields can be safely torn down--after there are no
1928 * more in-flight requests to the parent image. When the last
1929 * reference is dropped, cleaning them up is safe.
1931 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1935 if (!rbd_dev
->parent_spec
)
1938 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1942 /* Last reference; clean up parent data structures */
1945 rbd_dev_unparent(rbd_dev
);
1947 rbd_warn(rbd_dev
, "parent reference underflow\n");
1951 * If an image has a non-zero parent overlap, get a reference to its
1954 * We must get the reference before checking for the overlap to
1955 * coordinate properly with zeroing the parent overlap in
1956 * rbd_dev_v2_parent_info() when an image gets flattened. We
1957 * drop it again if there is no overlap.
1959 * Returns true if the rbd device has a parent with a non-zero
1960 * overlap and a reference for it was successfully taken, or
1963 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1967 if (!rbd_dev
->parent_spec
)
1970 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1971 if (counter
> 0 && rbd_dev
->parent_overlap
)
1974 /* Image was flattened, but parent is not yet torn down */
1977 rbd_warn(rbd_dev
, "parent reference overflow\n");
1983 * Caller is responsible for filling in the list of object requests
1984 * that comprises the image request, and the Linux request pointer
1985 * (if there is one).
1987 static struct rbd_img_request
*rbd_img_request_create(
1988 struct rbd_device
*rbd_dev
,
1989 u64 offset
, u64 length
,
1992 struct rbd_img_request
*img_request
;
1994 img_request
= kmem_cache_alloc(rbd_img_request_cache
, GFP_ATOMIC
);
1998 if (write_request
) {
1999 down_read(&rbd_dev
->header_rwsem
);
2000 ceph_get_snap_context(rbd_dev
->header
.snapc
);
2001 up_read(&rbd_dev
->header_rwsem
);
2004 img_request
->rq
= NULL
;
2005 img_request
->rbd_dev
= rbd_dev
;
2006 img_request
->offset
= offset
;
2007 img_request
->length
= length
;
2008 img_request
->flags
= 0;
2009 if (write_request
) {
2010 img_request_write_set(img_request
);
2011 img_request
->snapc
= rbd_dev
->header
.snapc
;
2013 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
2015 if (rbd_dev_parent_get(rbd_dev
))
2016 img_request_layered_set(img_request
);
2017 spin_lock_init(&img_request
->completion_lock
);
2018 img_request
->next_completion
= 0;
2019 img_request
->callback
= NULL
;
2020 img_request
->result
= 0;
2021 img_request
->obj_request_count
= 0;
2022 INIT_LIST_HEAD(&img_request
->obj_requests
);
2023 kref_init(&img_request
->kref
);
2025 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__
, rbd_dev
,
2026 write_request
? "write" : "read", offset
, length
,
2032 static void rbd_img_request_destroy(struct kref
*kref
)
2034 struct rbd_img_request
*img_request
;
2035 struct rbd_obj_request
*obj_request
;
2036 struct rbd_obj_request
*next_obj_request
;
2038 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
2040 dout("%s: img %p\n", __func__
, img_request
);
2042 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2043 rbd_img_obj_request_del(img_request
, obj_request
);
2044 rbd_assert(img_request
->obj_request_count
== 0);
2046 if (img_request_layered_test(img_request
)) {
2047 img_request_layered_clear(img_request
);
2048 rbd_dev_parent_put(img_request
->rbd_dev
);
2051 if (img_request_write_test(img_request
))
2052 ceph_put_snap_context(img_request
->snapc
);
2054 kmem_cache_free(rbd_img_request_cache
, img_request
);
2057 static struct rbd_img_request
*rbd_parent_request_create(
2058 struct rbd_obj_request
*obj_request
,
2059 u64 img_offset
, u64 length
)
2061 struct rbd_img_request
*parent_request
;
2062 struct rbd_device
*rbd_dev
;
2064 rbd_assert(obj_request
->img_request
);
2065 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2067 parent_request
= rbd_img_request_create(rbd_dev
->parent
,
2068 img_offset
, length
, false);
2069 if (!parent_request
)
2072 img_request_child_set(parent_request
);
2073 rbd_obj_request_get(obj_request
);
2074 parent_request
->obj_request
= obj_request
;
2076 return parent_request
;
2079 static void rbd_parent_request_destroy(struct kref
*kref
)
2081 struct rbd_img_request
*parent_request
;
2082 struct rbd_obj_request
*orig_request
;
2084 parent_request
= container_of(kref
, struct rbd_img_request
, kref
);
2085 orig_request
= parent_request
->obj_request
;
2087 parent_request
->obj_request
= NULL
;
2088 rbd_obj_request_put(orig_request
);
2089 img_request_child_clear(parent_request
);
2091 rbd_img_request_destroy(kref
);
2094 static bool rbd_img_obj_end_request(struct rbd_obj_request
*obj_request
)
2096 struct rbd_img_request
*img_request
;
2097 unsigned int xferred
;
2101 rbd_assert(obj_request_img_data_test(obj_request
));
2102 img_request
= obj_request
->img_request
;
2104 rbd_assert(obj_request
->xferred
<= (u64
)UINT_MAX
);
2105 xferred
= (unsigned int)obj_request
->xferred
;
2106 result
= obj_request
->result
;
2108 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2110 rbd_warn(rbd_dev
, "%s %llx at %llx (%llx)\n",
2111 img_request_write_test(img_request
) ? "write" : "read",
2112 obj_request
->length
, obj_request
->img_offset
,
2113 obj_request
->offset
);
2114 rbd_warn(rbd_dev
, " result %d xferred %x\n",
2116 if (!img_request
->result
)
2117 img_request
->result
= result
;
2119 * Need to end I/O on the entire obj_request worth of
2120 * bytes in case of error.
2122 xferred
= obj_request
->length
;
2125 /* Image object requests don't own their page array */
2127 if (obj_request
->type
== OBJ_REQUEST_PAGES
) {
2128 obj_request
->pages
= NULL
;
2129 obj_request
->page_count
= 0;
2132 if (img_request_child_test(img_request
)) {
2133 rbd_assert(img_request
->obj_request
!= NULL
);
2134 more
= obj_request
->which
< img_request
->obj_request_count
- 1;
2136 rbd_assert(img_request
->rq
!= NULL
);
2137 more
= blk_end_request(img_request
->rq
, result
, xferred
);
2143 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
2145 struct rbd_img_request
*img_request
;
2146 u32 which
= obj_request
->which
;
2149 rbd_assert(obj_request_img_data_test(obj_request
));
2150 img_request
= obj_request
->img_request
;
2152 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
2153 rbd_assert(img_request
!= NULL
);
2154 rbd_assert(img_request
->obj_request_count
> 0);
2155 rbd_assert(which
!= BAD_WHICH
);
2156 rbd_assert(which
< img_request
->obj_request_count
);
2158 spin_lock_irq(&img_request
->completion_lock
);
2159 if (which
!= img_request
->next_completion
)
2162 for_each_obj_request_from(img_request
, obj_request
) {
2164 rbd_assert(which
< img_request
->obj_request_count
);
2166 if (!obj_request_done_test(obj_request
))
2168 more
= rbd_img_obj_end_request(obj_request
);
2172 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
2173 img_request
->next_completion
= which
;
2175 spin_unlock_irq(&img_request
->completion_lock
);
2176 rbd_img_request_put(img_request
);
2179 rbd_img_request_complete(img_request
);
2183 * Split up an image request into one or more object requests, each
2184 * to a different object. The "type" parameter indicates whether
2185 * "data_desc" is the pointer to the head of a list of bio
2186 * structures, or the base of a page array. In either case this
2187 * function assumes data_desc describes memory sufficient to hold
2188 * all data described by the image request.
2190 static int rbd_img_request_fill(struct rbd_img_request
*img_request
,
2191 enum obj_request_type type
,
2194 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2195 struct rbd_obj_request
*obj_request
= NULL
;
2196 struct rbd_obj_request
*next_obj_request
;
2197 bool write_request
= img_request_write_test(img_request
);
2198 struct bio
*bio_list
= 0;
2199 unsigned int bio_offset
= 0;
2200 struct page
**pages
= 0;
2205 dout("%s: img %p type %d data_desc %p\n", __func__
, img_request
,
2206 (int)type
, data_desc
);
2208 opcode
= write_request
? CEPH_OSD_OP_WRITE
: CEPH_OSD_OP_READ
;
2209 img_offset
= img_request
->offset
;
2210 resid
= img_request
->length
;
2211 rbd_assert(resid
> 0);
2213 if (type
== OBJ_REQUEST_BIO
) {
2214 bio_list
= data_desc
;
2215 rbd_assert(img_offset
== bio_list
->bi_sector
<< SECTOR_SHIFT
);
2217 rbd_assert(type
== OBJ_REQUEST_PAGES
);
2222 struct ceph_osd_request
*osd_req
;
2223 const char *object_name
;
2227 object_name
= rbd_segment_name(rbd_dev
, img_offset
);
2230 offset
= rbd_segment_offset(rbd_dev
, img_offset
);
2231 length
= rbd_segment_length(rbd_dev
, img_offset
, resid
);
2232 obj_request
= rbd_obj_request_create(object_name
,
2233 offset
, length
, type
);
2234 /* object request has its own copy of the object name */
2235 rbd_segment_name_free(object_name
);
2239 * set obj_request->img_request before creating the
2240 * osd_request so that it gets the right snapc
2242 rbd_img_obj_request_add(img_request
, obj_request
);
2244 if (type
== OBJ_REQUEST_BIO
) {
2245 unsigned int clone_size
;
2247 rbd_assert(length
<= (u64
)UINT_MAX
);
2248 clone_size
= (unsigned int)length
;
2249 obj_request
->bio_list
=
2250 bio_chain_clone_range(&bio_list
,
2254 if (!obj_request
->bio_list
)
2257 unsigned int page_count
;
2259 obj_request
->pages
= pages
;
2260 page_count
= (u32
)calc_pages_for(offset
, length
);
2261 obj_request
->page_count
= page_count
;
2262 if ((offset
+ length
) & ~PAGE_MASK
)
2263 page_count
--; /* more on last page */
2264 pages
+= page_count
;
2267 osd_req
= rbd_osd_req_create(rbd_dev
, write_request
,
2271 obj_request
->osd_req
= osd_req
;
2272 obj_request
->callback
= rbd_img_obj_callback
;
2273 rbd_img_request_get(img_request
);
2275 osd_req_op_extent_init(osd_req
, 0, opcode
, offset
, length
,
2277 if (type
== OBJ_REQUEST_BIO
)
2278 osd_req_op_extent_osd_data_bio(osd_req
, 0,
2279 obj_request
->bio_list
, length
);
2281 osd_req_op_extent_osd_data_pages(osd_req
, 0,
2282 obj_request
->pages
, length
,
2283 offset
& ~PAGE_MASK
, false, false);
2286 rbd_osd_req_format_write(obj_request
);
2288 rbd_osd_req_format_read(obj_request
);
2290 obj_request
->img_offset
= img_offset
;
2292 img_offset
+= length
;
2299 rbd_obj_request_put(obj_request
);
2301 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2302 rbd_img_obj_request_del(img_request
, obj_request
);
2308 rbd_img_obj_copyup_callback(struct rbd_obj_request
*obj_request
)
2310 struct rbd_img_request
*img_request
;
2311 struct rbd_device
*rbd_dev
;
2312 struct page
**pages
;
2315 rbd_assert(obj_request
->type
== OBJ_REQUEST_BIO
);
2316 rbd_assert(obj_request_img_data_test(obj_request
));
2317 img_request
= obj_request
->img_request
;
2318 rbd_assert(img_request
);
2320 rbd_dev
= img_request
->rbd_dev
;
2321 rbd_assert(rbd_dev
);
2323 pages
= obj_request
->copyup_pages
;
2324 rbd_assert(pages
!= NULL
);
2325 obj_request
->copyup_pages
= NULL
;
2326 page_count
= obj_request
->copyup_page_count
;
2327 rbd_assert(page_count
);
2328 obj_request
->copyup_page_count
= 0;
2329 ceph_release_page_vector(pages
, page_count
);
2332 * We want the transfer count to reflect the size of the
2333 * original write request. There is no such thing as a
2334 * successful short write, so if the request was successful
2335 * we can just set it to the originally-requested length.
2337 if (!obj_request
->result
)
2338 obj_request
->xferred
= obj_request
->length
;
2340 /* Finish up with the normal image object callback */
2342 rbd_img_obj_callback(obj_request
);
2346 rbd_img_obj_parent_read_full_callback(struct rbd_img_request
*img_request
)
2348 struct rbd_obj_request
*orig_request
;
2349 struct ceph_osd_request
*osd_req
;
2350 struct ceph_osd_client
*osdc
;
2351 struct rbd_device
*rbd_dev
;
2352 struct page
**pages
;
2359 rbd_assert(img_request_child_test(img_request
));
2361 /* First get what we need from the image request */
2363 pages
= img_request
->copyup_pages
;
2364 rbd_assert(pages
!= NULL
);
2365 img_request
->copyup_pages
= NULL
;
2366 page_count
= img_request
->copyup_page_count
;
2367 rbd_assert(page_count
);
2368 img_request
->copyup_page_count
= 0;
2370 orig_request
= img_request
->obj_request
;
2371 rbd_assert(orig_request
!= NULL
);
2372 rbd_assert(obj_request_type_valid(orig_request
->type
));
2373 img_result
= img_request
->result
;
2374 parent_length
= img_request
->length
;
2375 rbd_assert(parent_length
== img_request
->xferred
);
2376 rbd_img_request_put(img_request
);
2378 rbd_assert(orig_request
->img_request
);
2379 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2380 rbd_assert(rbd_dev
);
2383 * If the overlap has become 0 (most likely because the
2384 * image has been flattened) we need to free the pages
2385 * and re-submit the original write request.
2387 if (!rbd_dev
->parent_overlap
) {
2388 struct ceph_osd_client
*osdc
;
2390 ceph_release_page_vector(pages
, page_count
);
2391 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2392 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2401 * The original osd request is of no use to use any more.
2402 * We need a new one that can hold the two ops in a copyup
2403 * request. Allocate the new copyup osd request for the
2404 * original request, and release the old one.
2406 img_result
= -ENOMEM
;
2407 osd_req
= rbd_osd_req_create_copyup(orig_request
);
2410 rbd_osd_req_destroy(orig_request
->osd_req
);
2411 orig_request
->osd_req
= osd_req
;
2412 orig_request
->copyup_pages
= pages
;
2413 orig_request
->copyup_page_count
= page_count
;
2415 /* Initialize the copyup op */
2417 osd_req_op_cls_init(osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd", "copyup");
2418 osd_req_op_cls_request_data_pages(osd_req
, 0, pages
, parent_length
, 0,
2421 /* Then the original write request op */
2423 offset
= orig_request
->offset
;
2424 length
= orig_request
->length
;
2425 osd_req_op_extent_init(osd_req
, 1, CEPH_OSD_OP_WRITE
,
2426 offset
, length
, 0, 0);
2427 if (orig_request
->type
== OBJ_REQUEST_BIO
)
2428 osd_req_op_extent_osd_data_bio(osd_req
, 1,
2429 orig_request
->bio_list
, length
);
2431 osd_req_op_extent_osd_data_pages(osd_req
, 1,
2432 orig_request
->pages
, length
,
2433 offset
& ~PAGE_MASK
, false, false);
2435 rbd_osd_req_format_write(orig_request
);
2437 /* All set, send it off. */
2439 orig_request
->callback
= rbd_img_obj_copyup_callback
;
2440 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2441 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2445 /* Record the error code and complete the request */
2447 orig_request
->result
= img_result
;
2448 orig_request
->xferred
= 0;
2449 obj_request_done_set(orig_request
);
2450 rbd_obj_request_complete(orig_request
);
2454 * Read from the parent image the range of data that covers the
2455 * entire target of the given object request. This is used for
2456 * satisfying a layered image write request when the target of an
2457 * object request from the image request does not exist.
2459 * A page array big enough to hold the returned data is allocated
2460 * and supplied to rbd_img_request_fill() as the "data descriptor."
2461 * When the read completes, this page array will be transferred to
2462 * the original object request for the copyup operation.
2464 * If an error occurs, record it as the result of the original
2465 * object request and mark it done so it gets completed.
2467 static int rbd_img_obj_parent_read_full(struct rbd_obj_request
*obj_request
)
2469 struct rbd_img_request
*img_request
= NULL
;
2470 struct rbd_img_request
*parent_request
= NULL
;
2471 struct rbd_device
*rbd_dev
;
2474 struct page
**pages
= NULL
;
2478 rbd_assert(obj_request_img_data_test(obj_request
));
2479 rbd_assert(obj_request_type_valid(obj_request
->type
));
2481 img_request
= obj_request
->img_request
;
2482 rbd_assert(img_request
!= NULL
);
2483 rbd_dev
= img_request
->rbd_dev
;
2484 rbd_assert(rbd_dev
->parent
!= NULL
);
2487 * Determine the byte range covered by the object in the
2488 * child image to which the original request was to be sent.
2490 img_offset
= obj_request
->img_offset
- obj_request
->offset
;
2491 length
= (u64
)1 << rbd_dev
->header
.obj_order
;
2494 * There is no defined parent data beyond the parent
2495 * overlap, so limit what we read at that boundary if
2498 if (img_offset
+ length
> rbd_dev
->parent_overlap
) {
2499 rbd_assert(img_offset
< rbd_dev
->parent_overlap
);
2500 length
= rbd_dev
->parent_overlap
- img_offset
;
2504 * Allocate a page array big enough to receive the data read
2507 page_count
= (u32
)calc_pages_for(0, length
);
2508 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2509 if (IS_ERR(pages
)) {
2510 result
= PTR_ERR(pages
);
2516 parent_request
= rbd_parent_request_create(obj_request
,
2517 img_offset
, length
);
2518 if (!parent_request
)
2521 result
= rbd_img_request_fill(parent_request
, OBJ_REQUEST_PAGES
, pages
);
2524 parent_request
->copyup_pages
= pages
;
2525 parent_request
->copyup_page_count
= page_count
;
2527 parent_request
->callback
= rbd_img_obj_parent_read_full_callback
;
2528 result
= rbd_img_request_submit(parent_request
);
2532 parent_request
->copyup_pages
= NULL
;
2533 parent_request
->copyup_page_count
= 0;
2534 parent_request
->obj_request
= NULL
;
2535 rbd_obj_request_put(obj_request
);
2538 ceph_release_page_vector(pages
, page_count
);
2540 rbd_img_request_put(parent_request
);
2541 obj_request
->result
= result
;
2542 obj_request
->xferred
= 0;
2543 obj_request_done_set(obj_request
);
2548 static void rbd_img_obj_exists_callback(struct rbd_obj_request
*obj_request
)
2550 struct rbd_obj_request
*orig_request
;
2551 struct rbd_device
*rbd_dev
;
2554 rbd_assert(!obj_request_img_data_test(obj_request
));
2557 * All we need from the object request is the original
2558 * request and the result of the STAT op. Grab those, then
2559 * we're done with the request.
2561 orig_request
= obj_request
->obj_request
;
2562 obj_request
->obj_request
= NULL
;
2563 rbd_assert(orig_request
);
2564 rbd_assert(orig_request
->img_request
);
2566 result
= obj_request
->result
;
2567 obj_request
->result
= 0;
2569 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__
,
2570 obj_request
, orig_request
, result
,
2571 obj_request
->xferred
, obj_request
->length
);
2572 rbd_obj_request_put(obj_request
);
2575 * If the overlap has become 0 (most likely because the
2576 * image has been flattened) we need to free the pages
2577 * and re-submit the original write request.
2579 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2580 if (!rbd_dev
->parent_overlap
) {
2581 struct ceph_osd_client
*osdc
;
2583 rbd_obj_request_put(orig_request
);
2584 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2585 result
= rbd_obj_request_submit(osdc
, orig_request
);
2591 * Our only purpose here is to determine whether the object
2592 * exists, and we don't want to treat the non-existence as
2593 * an error. If something else comes back, transfer the
2594 * error to the original request and complete it now.
2597 obj_request_existence_set(orig_request
, true);
2598 } else if (result
== -ENOENT
) {
2599 obj_request_existence_set(orig_request
, false);
2600 } else if (result
) {
2601 orig_request
->result
= result
;
2606 * Resubmit the original request now that we have recorded
2607 * whether the target object exists.
2609 orig_request
->result
= rbd_img_obj_request_submit(orig_request
);
2611 if (orig_request
->result
)
2612 rbd_obj_request_complete(orig_request
);
2613 rbd_obj_request_put(orig_request
);
2616 static int rbd_img_obj_exists_submit(struct rbd_obj_request
*obj_request
)
2618 struct rbd_obj_request
*stat_request
;
2619 struct rbd_device
*rbd_dev
;
2620 struct ceph_osd_client
*osdc
;
2621 struct page
**pages
= NULL
;
2627 * The response data for a STAT call consists of:
2634 size
= sizeof (__le64
) + sizeof (__le32
) + sizeof (__le32
);
2635 page_count
= (u32
)calc_pages_for(0, size
);
2636 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2638 return PTR_ERR(pages
);
2641 stat_request
= rbd_obj_request_create(obj_request
->object_name
, 0, 0,
2646 rbd_obj_request_get(obj_request
);
2647 stat_request
->obj_request
= obj_request
;
2648 stat_request
->pages
= pages
;
2649 stat_request
->page_count
= page_count
;
2651 rbd_assert(obj_request
->img_request
);
2652 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2653 stat_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
2655 if (!stat_request
->osd_req
)
2657 stat_request
->callback
= rbd_img_obj_exists_callback
;
2659 osd_req_op_init(stat_request
->osd_req
, 0, CEPH_OSD_OP_STAT
);
2660 osd_req_op_raw_data_in_pages(stat_request
->osd_req
, 0, pages
, size
, 0,
2662 rbd_osd_req_format_read(stat_request
);
2664 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2665 ret
= rbd_obj_request_submit(osdc
, stat_request
);
2668 rbd_obj_request_put(obj_request
);
2673 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
)
2675 struct rbd_img_request
*img_request
;
2676 struct rbd_device
*rbd_dev
;
2679 rbd_assert(obj_request_img_data_test(obj_request
));
2681 img_request
= obj_request
->img_request
;
2682 rbd_assert(img_request
);
2683 rbd_dev
= img_request
->rbd_dev
;
2686 * Only writes to layered images need special handling.
2687 * Reads and non-layered writes are simple object requests.
2688 * Layered writes that start beyond the end of the overlap
2689 * with the parent have no parent data, so they too are
2690 * simple object requests. Finally, if the target object is
2691 * known to already exist, its parent data has already been
2692 * copied, so a write to the object can also be handled as a
2693 * simple object request.
2695 if (!img_request_write_test(img_request
) ||
2696 !img_request_layered_test(img_request
) ||
2697 !obj_request_overlaps_parent(obj_request
) ||
2698 ((known
= obj_request_known_test(obj_request
)) &&
2699 obj_request_exists_test(obj_request
))) {
2701 struct rbd_device
*rbd_dev
;
2702 struct ceph_osd_client
*osdc
;
2704 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2705 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2707 return rbd_obj_request_submit(osdc
, obj_request
);
2711 * It's a layered write. The target object might exist but
2712 * we may not know that yet. If we know it doesn't exist,
2713 * start by reading the data for the full target object from
2714 * the parent so we can use it for a copyup to the target.
2717 return rbd_img_obj_parent_read_full(obj_request
);
2719 /* We don't know whether the target exists. Go find out. */
2721 return rbd_img_obj_exists_submit(obj_request
);
2724 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
2726 struct rbd_obj_request
*obj_request
;
2727 struct rbd_obj_request
*next_obj_request
;
2729 dout("%s: img %p\n", __func__
, img_request
);
2730 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
) {
2733 ret
= rbd_img_obj_request_submit(obj_request
);
2741 static void rbd_img_parent_read_callback(struct rbd_img_request
*img_request
)
2743 struct rbd_obj_request
*obj_request
;
2744 struct rbd_device
*rbd_dev
;
2749 rbd_assert(img_request_child_test(img_request
));
2751 /* First get what we need from the image request and release it */
2753 obj_request
= img_request
->obj_request
;
2754 img_xferred
= img_request
->xferred
;
2755 img_result
= img_request
->result
;
2756 rbd_img_request_put(img_request
);
2759 * If the overlap has become 0 (most likely because the
2760 * image has been flattened) we need to re-submit the
2763 rbd_assert(obj_request
);
2764 rbd_assert(obj_request
->img_request
);
2765 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2766 if (!rbd_dev
->parent_overlap
) {
2767 struct ceph_osd_client
*osdc
;
2769 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2770 img_result
= rbd_obj_request_submit(osdc
, obj_request
);
2775 obj_request
->result
= img_result
;
2776 if (obj_request
->result
)
2780 * We need to zero anything beyond the parent overlap
2781 * boundary. Since rbd_img_obj_request_read_callback()
2782 * will zero anything beyond the end of a short read, an
2783 * easy way to do this is to pretend the data from the
2784 * parent came up short--ending at the overlap boundary.
2786 rbd_assert(obj_request
->img_offset
< U64_MAX
- obj_request
->length
);
2787 obj_end
= obj_request
->img_offset
+ obj_request
->length
;
2788 if (obj_end
> rbd_dev
->parent_overlap
) {
2791 if (obj_request
->img_offset
< rbd_dev
->parent_overlap
)
2792 xferred
= rbd_dev
->parent_overlap
-
2793 obj_request
->img_offset
;
2795 obj_request
->xferred
= min(img_xferred
, xferred
);
2797 obj_request
->xferred
= img_xferred
;
2800 rbd_img_obj_request_read_callback(obj_request
);
2801 rbd_obj_request_complete(obj_request
);
2804 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
)
2806 struct rbd_img_request
*img_request
;
2809 rbd_assert(obj_request_img_data_test(obj_request
));
2810 rbd_assert(obj_request
->img_request
!= NULL
);
2811 rbd_assert(obj_request
->result
== (s32
) -ENOENT
);
2812 rbd_assert(obj_request_type_valid(obj_request
->type
));
2814 /* rbd_read_finish(obj_request, obj_request->length); */
2815 img_request
= rbd_parent_request_create(obj_request
,
2816 obj_request
->img_offset
,
2817 obj_request
->length
);
2822 if (obj_request
->type
== OBJ_REQUEST_BIO
)
2823 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
2824 obj_request
->bio_list
);
2826 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_PAGES
,
2827 obj_request
->pages
);
2831 img_request
->callback
= rbd_img_parent_read_callback
;
2832 result
= rbd_img_request_submit(img_request
);
2839 rbd_img_request_put(img_request
);
2840 obj_request
->result
= result
;
2841 obj_request
->xferred
= 0;
2842 obj_request_done_set(obj_request
);
2845 static int rbd_obj_notify_ack_sync(struct rbd_device
*rbd_dev
, u64 notify_id
)
2847 struct rbd_obj_request
*obj_request
;
2848 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2851 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
2852 OBJ_REQUEST_NODATA
);
2857 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, obj_request
);
2858 if (!obj_request
->osd_req
)
2861 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_NOTIFY_ACK
,
2863 rbd_osd_req_format_read(obj_request
);
2865 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2868 ret
= rbd_obj_request_wait(obj_request
);
2870 rbd_obj_request_put(obj_request
);
2875 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
2877 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
2883 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__
,
2884 rbd_dev
->header_name
, (unsigned long long)notify_id
,
2885 (unsigned int)opcode
);
2886 ret
= rbd_dev_refresh(rbd_dev
);
2888 rbd_warn(rbd_dev
, ": header refresh error (%d)\n", ret
);
2890 rbd_obj_notify_ack_sync(rbd_dev
, notify_id
);
2894 * Request sync osd watch/unwatch. The value of "start" determines
2895 * whether a watch request is being initiated or torn down.
2897 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
, bool start
)
2899 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2900 struct rbd_obj_request
*obj_request
;
2903 rbd_assert(start
^ !!rbd_dev
->watch_event
);
2904 rbd_assert(start
^ !!rbd_dev
->watch_request
);
2907 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, rbd_dev
,
2908 &rbd_dev
->watch_event
);
2911 rbd_assert(rbd_dev
->watch_event
!= NULL
);
2915 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
2916 OBJ_REQUEST_NODATA
);
2920 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, true, obj_request
);
2921 if (!obj_request
->osd_req
)
2925 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
2927 ceph_osdc_unregister_linger_request(osdc
,
2928 rbd_dev
->watch_request
->osd_req
);
2930 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_WATCH
,
2931 rbd_dev
->watch_event
->cookie
, 0, start
? 1 : 0);
2932 rbd_osd_req_format_write(obj_request
);
2934 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2937 ret
= rbd_obj_request_wait(obj_request
);
2940 ret
= obj_request
->result
;
2945 * A watch request is set to linger, so the underlying osd
2946 * request won't go away until we unregister it. We retain
2947 * a pointer to the object request during that time (in
2948 * rbd_dev->watch_request), so we'll keep a reference to
2949 * it. We'll drop that reference (below) after we've
2953 rbd_dev
->watch_request
= obj_request
;
2958 /* We have successfully torn down the watch request */
2960 rbd_obj_request_put(rbd_dev
->watch_request
);
2961 rbd_dev
->watch_request
= NULL
;
2963 /* Cancel the event if we're tearing down, or on error */
2964 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
2965 rbd_dev
->watch_event
= NULL
;
2967 rbd_obj_request_put(obj_request
);
2973 * Synchronous osd object method call. Returns the number of bytes
2974 * returned in the outbound buffer, or a negative error code.
2976 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
2977 const char *object_name
,
2978 const char *class_name
,
2979 const char *method_name
,
2980 const void *outbound
,
2981 size_t outbound_size
,
2983 size_t inbound_size
)
2985 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2986 struct rbd_obj_request
*obj_request
;
2987 struct page
**pages
;
2992 * Method calls are ultimately read operations. The result
2993 * should placed into the inbound buffer provided. They
2994 * also supply outbound data--parameters for the object
2995 * method. Currently if this is present it will be a
2998 page_count
= (u32
)calc_pages_for(0, inbound_size
);
2999 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3001 return PTR_ERR(pages
);
3004 obj_request
= rbd_obj_request_create(object_name
, 0, inbound_size
,
3009 obj_request
->pages
= pages
;
3010 obj_request
->page_count
= page_count
;
3012 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, obj_request
);
3013 if (!obj_request
->osd_req
)
3016 osd_req_op_cls_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_CALL
,
3017 class_name
, method_name
);
3018 if (outbound_size
) {
3019 struct ceph_pagelist
*pagelist
;
3021 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
3025 ceph_pagelist_init(pagelist
);
3026 ceph_pagelist_append(pagelist
, outbound
, outbound_size
);
3027 osd_req_op_cls_request_data_pagelist(obj_request
->osd_req
, 0,
3030 osd_req_op_cls_response_data_pages(obj_request
->osd_req
, 0,
3031 obj_request
->pages
, inbound_size
,
3033 rbd_osd_req_format_read(obj_request
);
3035 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3038 ret
= rbd_obj_request_wait(obj_request
);
3042 ret
= obj_request
->result
;
3046 rbd_assert(obj_request
->xferred
< (u64
)INT_MAX
);
3047 ret
= (int)obj_request
->xferred
;
3048 ceph_copy_from_page_vector(pages
, inbound
, 0, obj_request
->xferred
);
3051 rbd_obj_request_put(obj_request
);
3053 ceph_release_page_vector(pages
, page_count
);
3058 static void rbd_request_fn(struct request_queue
*q
)
3059 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
3061 struct rbd_device
*rbd_dev
= q
->queuedata
;
3062 bool read_only
= rbd_dev
->mapping
.read_only
;
3066 while ((rq
= blk_fetch_request(q
))) {
3067 bool write_request
= rq_data_dir(rq
) == WRITE
;
3068 struct rbd_img_request
*img_request
;
3072 /* Ignore any non-FS requests that filter through. */
3074 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
3075 dout("%s: non-fs request type %d\n", __func__
,
3076 (int) rq
->cmd_type
);
3077 __blk_end_request_all(rq
, 0);
3081 /* Ignore/skip any zero-length requests */
3083 offset
= (u64
) blk_rq_pos(rq
) << SECTOR_SHIFT
;
3084 length
= (u64
) blk_rq_bytes(rq
);
3087 dout("%s: zero-length request\n", __func__
);
3088 __blk_end_request_all(rq
, 0);
3092 spin_unlock_irq(q
->queue_lock
);
3094 /* Disallow writes to a read-only device */
3096 if (write_request
) {
3100 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3104 * Quit early if the mapped snapshot no longer
3105 * exists. It's still possible the snapshot will
3106 * have disappeared by the time our request arrives
3107 * at the osd, but there's no sense in sending it if
3110 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3111 dout("request for non-existent snapshot");
3112 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3118 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3119 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)\n",
3121 goto end_request
; /* Shouldn't happen */
3125 if (offset
+ length
> rbd_dev
->mapping
.size
) {
3126 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)\n",
3127 offset
, length
, rbd_dev
->mapping
.size
);
3132 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
,
3137 img_request
->rq
= rq
;
3139 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3142 result
= rbd_img_request_submit(img_request
);
3144 rbd_img_request_put(img_request
);
3146 spin_lock_irq(q
->queue_lock
);
3148 rbd_warn(rbd_dev
, "%s %llx at %llx result %d\n",
3149 write_request
? "write" : "read",
3150 length
, offset
, result
);
3152 __blk_end_request_all(rq
, result
);
3158 * a queue callback. Makes sure that we don't create a bio that spans across
3159 * multiple osd objects. One exception would be with a single page bios,
3160 * which we handle later at bio_chain_clone_range()
3162 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
3163 struct bio_vec
*bvec
)
3165 struct rbd_device
*rbd_dev
= q
->queuedata
;
3166 sector_t sector_offset
;
3167 sector_t sectors_per_obj
;
3168 sector_t obj_sector_offset
;
3172 * Find how far into its rbd object the partition-relative
3173 * bio start sector is to offset relative to the enclosing
3176 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
3177 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
3178 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
3181 * Compute the number of bytes from that offset to the end
3182 * of the object. Account for what's already used by the bio.
3184 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
3185 if (ret
> bmd
->bi_size
)
3186 ret
-= bmd
->bi_size
;
3191 * Don't send back more than was asked for. And if the bio
3192 * was empty, let the whole thing through because: "Note
3193 * that a block device *must* allow a single page to be
3194 * added to an empty bio."
3196 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
3197 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
3198 ret
= (int) bvec
->bv_len
;
3203 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3205 struct gendisk
*disk
= rbd_dev
->disk
;
3210 rbd_dev
->disk
= NULL
;
3211 if (disk
->flags
& GENHD_FL_UP
) {
3214 blk_cleanup_queue(disk
->queue
);
3219 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3220 const char *object_name
,
3221 u64 offset
, u64 length
, void *buf
)
3224 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3225 struct rbd_obj_request
*obj_request
;
3226 struct page
**pages
= NULL
;
3231 page_count
= (u32
) calc_pages_for(offset
, length
);
3232 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3234 return PTR_ERR(pages
);
3237 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
3242 obj_request
->pages
= pages
;
3243 obj_request
->page_count
= page_count
;
3245 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, obj_request
);
3246 if (!obj_request
->osd_req
)
3249 osd_req_op_extent_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_READ
,
3250 offset
, length
, 0, 0);
3251 osd_req_op_extent_osd_data_pages(obj_request
->osd_req
, 0,
3253 obj_request
->length
,
3254 obj_request
->offset
& ~PAGE_MASK
,
3256 rbd_osd_req_format_read(obj_request
);
3258 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3261 ret
= rbd_obj_request_wait(obj_request
);
3265 ret
= obj_request
->result
;
3269 rbd_assert(obj_request
->xferred
<= (u64
) SIZE_MAX
);
3270 size
= (size_t) obj_request
->xferred
;
3271 ceph_copy_from_page_vector(pages
, buf
, 0, size
);
3272 rbd_assert(size
<= (size_t)INT_MAX
);
3276 rbd_obj_request_put(obj_request
);
3278 ceph_release_page_vector(pages
, page_count
);
3284 * Read the complete header for the given rbd device. On successful
3285 * return, the rbd_dev->header field will contain up-to-date
3286 * information about the image.
3288 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
3290 struct rbd_image_header_ondisk
*ondisk
= NULL
;
3297 * The complete header will include an array of its 64-bit
3298 * snapshot ids, followed by the names of those snapshots as
3299 * a contiguous block of NUL-terminated strings. Note that
3300 * the number of snapshots could change by the time we read
3301 * it in, in which case we re-read it.
3308 size
= sizeof (*ondisk
);
3309 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
3311 ondisk
= kmalloc(size
, GFP_KERNEL
);
3315 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
3319 if ((size_t)ret
< size
) {
3321 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
3325 if (!rbd_dev_ondisk_valid(ondisk
)) {
3327 rbd_warn(rbd_dev
, "invalid header");
3331 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
3332 want_count
= snap_count
;
3333 snap_count
= le32_to_cpu(ondisk
->snap_count
);
3334 } while (snap_count
!= want_count
);
3336 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
3344 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3345 * has disappeared from the (just updated) snapshot context.
3347 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
3351 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
3354 snap_id
= rbd_dev
->spec
->snap_id
;
3355 if (snap_id
== CEPH_NOSNAP
)
3358 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
3359 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3362 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
3368 * Don't hold the lock while doing disk operations,
3369 * or lock ordering will conflict with the bdev mutex via:
3370 * rbd_add() -> blkdev_get() -> rbd_open()
3372 spin_lock_irq(&rbd_dev
->lock
);
3373 removing
= test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
3374 spin_unlock_irq(&rbd_dev
->lock
);
3376 * If the device is being removed, rbd_dev->disk has
3377 * been destroyed, so don't try to update its size
3380 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
3381 dout("setting size to %llu sectors", (unsigned long long)size
);
3382 set_capacity(rbd_dev
->disk
, size
);
3383 revalidate_disk(rbd_dev
->disk
);
3387 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
3392 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
3393 mapping_size
= rbd_dev
->mapping
.size
;
3394 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
3395 if (rbd_dev
->image_format
== 1)
3396 ret
= rbd_dev_v1_header_info(rbd_dev
);
3398 ret
= rbd_dev_v2_header_info(rbd_dev
);
3400 /* If it's a mapped snapshot, validate its EXISTS flag */
3402 rbd_exists_validate(rbd_dev
);
3403 mutex_unlock(&ctl_mutex
);
3404 if (mapping_size
!= rbd_dev
->mapping
.size
) {
3405 rbd_dev_update_size(rbd_dev
);
3411 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
3413 struct gendisk
*disk
;
3414 struct request_queue
*q
;
3417 /* create gendisk info */
3418 disk
= alloc_disk(RBD_MINORS_PER_MAJOR
);
3422 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
3424 disk
->major
= rbd_dev
->major
;
3425 disk
->first_minor
= 0;
3426 disk
->fops
= &rbd_bd_ops
;
3427 disk
->private_data
= rbd_dev
;
3429 q
= blk_init_queue(rbd_request_fn
, &rbd_dev
->lock
);
3433 /* We use the default size, but let's be explicit about it. */
3434 blk_queue_physical_block_size(q
, SECTOR_SIZE
);
3436 /* set io sizes to object size */
3437 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
3438 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
3439 blk_queue_max_segment_size(q
, segment_size
);
3440 blk_queue_io_min(q
, segment_size
);
3441 blk_queue_io_opt(q
, segment_size
);
3443 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
3446 q
->queuedata
= rbd_dev
;
3448 rbd_dev
->disk
= disk
;
3461 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
3463 return container_of(dev
, struct rbd_device
, dev
);
3466 static ssize_t
rbd_size_show(struct device
*dev
,
3467 struct device_attribute
*attr
, char *buf
)
3469 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3471 return sprintf(buf
, "%llu\n",
3472 (unsigned long long)rbd_dev
->mapping
.size
);
3476 * Note this shows the features for whatever's mapped, which is not
3477 * necessarily the base image.
3479 static ssize_t
rbd_features_show(struct device
*dev
,
3480 struct device_attribute
*attr
, char *buf
)
3482 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3484 return sprintf(buf
, "0x%016llx\n",
3485 (unsigned long long)rbd_dev
->mapping
.features
);
3488 static ssize_t
rbd_major_show(struct device
*dev
,
3489 struct device_attribute
*attr
, char *buf
)
3491 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3494 return sprintf(buf
, "%d\n", rbd_dev
->major
);
3496 return sprintf(buf
, "(none)\n");
3500 static ssize_t
rbd_client_id_show(struct device
*dev
,
3501 struct device_attribute
*attr
, char *buf
)
3503 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3505 return sprintf(buf
, "client%lld\n",
3506 ceph_client_id(rbd_dev
->rbd_client
->client
));
3509 static ssize_t
rbd_pool_show(struct device
*dev
,
3510 struct device_attribute
*attr
, char *buf
)
3512 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3514 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
3517 static ssize_t
rbd_pool_id_show(struct device
*dev
,
3518 struct device_attribute
*attr
, char *buf
)
3520 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3522 return sprintf(buf
, "%llu\n",
3523 (unsigned long long) rbd_dev
->spec
->pool_id
);
3526 static ssize_t
rbd_name_show(struct device
*dev
,
3527 struct device_attribute
*attr
, char *buf
)
3529 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3531 if (rbd_dev
->spec
->image_name
)
3532 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
3534 return sprintf(buf
, "(unknown)\n");
3537 static ssize_t
rbd_image_id_show(struct device
*dev
,
3538 struct device_attribute
*attr
, char *buf
)
3540 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3542 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
3546 * Shows the name of the currently-mapped snapshot (or
3547 * RBD_SNAP_HEAD_NAME for the base image).
3549 static ssize_t
rbd_snap_show(struct device
*dev
,
3550 struct device_attribute
*attr
,
3553 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3555 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
3559 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3560 * for the parent image. If there is no parent, simply shows
3561 * "(no parent image)".
3563 static ssize_t
rbd_parent_show(struct device
*dev
,
3564 struct device_attribute
*attr
,
3567 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3568 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
3573 return sprintf(buf
, "(no parent image)\n");
3575 count
= sprintf(bufp
, "pool_id %llu\npool_name %s\n",
3576 (unsigned long long) spec
->pool_id
, spec
->pool_name
);
3581 count
= sprintf(bufp
, "image_id %s\nimage_name %s\n", spec
->image_id
,
3582 spec
->image_name
? spec
->image_name
: "(unknown)");
3587 count
= sprintf(bufp
, "snap_id %llu\nsnap_name %s\n",
3588 (unsigned long long) spec
->snap_id
, spec
->snap_name
);
3593 count
= sprintf(bufp
, "overlap %llu\n", rbd_dev
->parent_overlap
);
3598 return (ssize_t
) (bufp
- buf
);
3601 static ssize_t
rbd_image_refresh(struct device
*dev
,
3602 struct device_attribute
*attr
,
3606 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3609 ret
= rbd_dev_refresh(rbd_dev
);
3611 rbd_warn(rbd_dev
, ": manual header refresh error (%d)\n", ret
);
3613 return ret
< 0 ? ret
: size
;
3616 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
3617 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
3618 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
3619 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
3620 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
3621 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
3622 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
3623 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
3624 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
3625 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
3626 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
3628 static struct attribute
*rbd_attrs
[] = {
3629 &dev_attr_size
.attr
,
3630 &dev_attr_features
.attr
,
3631 &dev_attr_major
.attr
,
3632 &dev_attr_client_id
.attr
,
3633 &dev_attr_pool
.attr
,
3634 &dev_attr_pool_id
.attr
,
3635 &dev_attr_name
.attr
,
3636 &dev_attr_image_id
.attr
,
3637 &dev_attr_current_snap
.attr
,
3638 &dev_attr_parent
.attr
,
3639 &dev_attr_refresh
.attr
,
3643 static struct attribute_group rbd_attr_group
= {
3647 static const struct attribute_group
*rbd_attr_groups
[] = {
3652 static void rbd_sysfs_dev_release(struct device
*dev
)
3656 static struct device_type rbd_device_type
= {
3658 .groups
= rbd_attr_groups
,
3659 .release
= rbd_sysfs_dev_release
,
3662 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
3664 kref_get(&spec
->kref
);
3669 static void rbd_spec_free(struct kref
*kref
);
3670 static void rbd_spec_put(struct rbd_spec
*spec
)
3673 kref_put(&spec
->kref
, rbd_spec_free
);
3676 static struct rbd_spec
*rbd_spec_alloc(void)
3678 struct rbd_spec
*spec
;
3680 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
3683 kref_init(&spec
->kref
);
3688 static void rbd_spec_free(struct kref
*kref
)
3690 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
3692 kfree(spec
->pool_name
);
3693 kfree(spec
->image_id
);
3694 kfree(spec
->image_name
);
3695 kfree(spec
->snap_name
);
3699 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
3700 struct rbd_spec
*spec
)
3702 struct rbd_device
*rbd_dev
;
3704 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
3708 spin_lock_init(&rbd_dev
->lock
);
3710 atomic_set(&rbd_dev
->parent_ref
, 0);
3711 INIT_LIST_HEAD(&rbd_dev
->node
);
3712 init_rwsem(&rbd_dev
->header_rwsem
);
3714 rbd_dev
->spec
= spec
;
3715 rbd_dev
->rbd_client
= rbdc
;
3717 /* Initialize the layout used for all rbd requests */
3719 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
3720 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
3721 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
3722 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
3727 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
3729 rbd_put_client(rbd_dev
->rbd_client
);
3730 rbd_spec_put(rbd_dev
->spec
);
3735 * Get the size and object order for an image snapshot, or if
3736 * snap_id is CEPH_NOSNAP, gets this information for the base
3739 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
3740 u8
*order
, u64
*snap_size
)
3742 __le64 snapid
= cpu_to_le64(snap_id
);
3747 } __attribute__ ((packed
)) size_buf
= { 0 };
3749 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3751 &snapid
, sizeof (snapid
),
3752 &size_buf
, sizeof (size_buf
));
3753 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3756 if (ret
< sizeof (size_buf
))
3760 *order
= size_buf
.order
;
3761 dout(" order %u", (unsigned int)*order
);
3763 *snap_size
= le64_to_cpu(size_buf
.size
);
3765 dout(" snap_id 0x%016llx snap_size = %llu\n",
3766 (unsigned long long)snap_id
,
3767 (unsigned long long)*snap_size
);
3772 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
3774 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
3775 &rbd_dev
->header
.obj_order
,
3776 &rbd_dev
->header
.image_size
);
3779 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
3785 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
3789 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3790 "rbd", "get_object_prefix", NULL
, 0,
3791 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
3792 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3797 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
3798 p
+ ret
, NULL
, GFP_NOIO
);
3801 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
3802 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
3803 rbd_dev
->header
.object_prefix
= NULL
;
3805 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
3813 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
3816 __le64 snapid
= cpu_to_le64(snap_id
);
3820 } __attribute__ ((packed
)) features_buf
= { 0 };
3824 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3825 "rbd", "get_features",
3826 &snapid
, sizeof (snapid
),
3827 &features_buf
, sizeof (features_buf
));
3828 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3831 if (ret
< sizeof (features_buf
))
3834 incompat
= le64_to_cpu(features_buf
.incompat
);
3835 if (incompat
& ~RBD_FEATURES_SUPPORTED
)
3838 *snap_features
= le64_to_cpu(features_buf
.features
);
3840 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3841 (unsigned long long)snap_id
,
3842 (unsigned long long)*snap_features
,
3843 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
3848 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
3850 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
3851 &rbd_dev
->header
.features
);
3854 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
3856 struct rbd_spec
*parent_spec
;
3858 void *reply_buf
= NULL
;
3867 parent_spec
= rbd_spec_alloc();
3871 size
= sizeof (__le64
) + /* pool_id */
3872 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
3873 sizeof (__le64
) + /* snap_id */
3874 sizeof (__le64
); /* overlap */
3875 reply_buf
= kmalloc(size
, GFP_KERNEL
);
3881 snapid
= cpu_to_le64(CEPH_NOSNAP
);
3882 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3883 "rbd", "get_parent",
3884 &snapid
, sizeof (snapid
),
3886 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3891 end
= reply_buf
+ ret
;
3893 ceph_decode_64_safe(&p
, end
, pool_id
, out_err
);
3894 if (pool_id
== CEPH_NOPOOL
) {
3896 * Either the parent never existed, or we have
3897 * record of it but the image got flattened so it no
3898 * longer has a parent. When the parent of a
3899 * layered image disappears we immediately set the
3900 * overlap to 0. The effect of this is that all new
3901 * requests will be treated as if the image had no
3904 if (rbd_dev
->parent_overlap
) {
3905 rbd_dev
->parent_overlap
= 0;
3907 rbd_dev_parent_put(rbd_dev
);
3908 pr_info("%s: clone image has been flattened\n",
3909 rbd_dev
->disk
->disk_name
);
3912 goto out
; /* No parent? No problem. */
3915 /* The ceph file layout needs to fit pool id in 32 bits */
3918 if (pool_id
> (u64
)U32_MAX
) {
3919 rbd_warn(NULL
, "parent pool id too large (%llu > %u)\n",
3920 (unsigned long long)pool_id
, U32_MAX
);
3923 parent_spec
->pool_id
= pool_id
;
3925 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
3926 if (IS_ERR(image_id
)) {
3927 ret
= PTR_ERR(image_id
);
3930 parent_spec
->image_id
= image_id
;
3931 ceph_decode_64_safe(&p
, end
, parent_spec
->snap_id
, out_err
);
3932 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
3935 rbd_spec_put(rbd_dev
->parent_spec
);
3936 rbd_dev
->parent_spec
= parent_spec
;
3937 parent_spec
= NULL
; /* rbd_dev now owns this */
3938 rbd_dev
->parent_overlap
= overlap
;
3940 rbd_warn(rbd_dev
, "ignoring parent of clone with overlap 0\n");
3946 rbd_spec_put(parent_spec
);
3951 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
3955 __le64 stripe_count
;
3956 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
3957 size_t size
= sizeof (striping_info_buf
);
3964 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3965 "rbd", "get_stripe_unit_count", NULL
, 0,
3966 (char *)&striping_info_buf
, size
);
3967 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3974 * We don't actually support the "fancy striping" feature
3975 * (STRIPINGV2) yet, but if the striping sizes are the
3976 * defaults the behavior is the same as before. So find
3977 * out, and only fail if the image has non-default values.
3980 obj_size
= (u64
)1 << rbd_dev
->header
.obj_order
;
3981 p
= &striping_info_buf
;
3982 stripe_unit
= ceph_decode_64(&p
);
3983 if (stripe_unit
!= obj_size
) {
3984 rbd_warn(rbd_dev
, "unsupported stripe unit "
3985 "(got %llu want %llu)",
3986 stripe_unit
, obj_size
);
3989 stripe_count
= ceph_decode_64(&p
);
3990 if (stripe_count
!= 1) {
3991 rbd_warn(rbd_dev
, "unsupported stripe count "
3992 "(got %llu want 1)", stripe_count
);
3995 rbd_dev
->header
.stripe_unit
= stripe_unit
;
3996 rbd_dev
->header
.stripe_count
= stripe_count
;
4001 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
4003 size_t image_id_size
;
4008 void *reply_buf
= NULL
;
4010 char *image_name
= NULL
;
4013 rbd_assert(!rbd_dev
->spec
->image_name
);
4015 len
= strlen(rbd_dev
->spec
->image_id
);
4016 image_id_size
= sizeof (__le32
) + len
;
4017 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
4022 end
= image_id
+ image_id_size
;
4023 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
4025 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
4026 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4030 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
4031 "rbd", "dir_get_name",
4032 image_id
, image_id_size
,
4037 end
= reply_buf
+ ret
;
4039 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
4040 if (IS_ERR(image_name
))
4043 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
4051 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4053 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4054 const char *snap_name
;
4057 /* Skip over names until we find the one we are looking for */
4059 snap_name
= rbd_dev
->header
.snap_names
;
4060 while (which
< snapc
->num_snaps
) {
4061 if (!strcmp(name
, snap_name
))
4062 return snapc
->snaps
[which
];
4063 snap_name
+= strlen(snap_name
) + 1;
4069 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4071 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4076 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
4077 const char *snap_name
;
4079 snap_id
= snapc
->snaps
[which
];
4080 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
4081 if (IS_ERR(snap_name
)) {
4082 /* ignore no-longer existing snapshots */
4083 if (PTR_ERR(snap_name
) == -ENOENT
)
4088 found
= !strcmp(name
, snap_name
);
4091 return found
? snap_id
: CEPH_NOSNAP
;
4095 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4096 * no snapshot by that name is found, or if an error occurs.
4098 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4100 if (rbd_dev
->image_format
== 1)
4101 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
4103 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
4107 * When an rbd image has a parent image, it is identified by the
4108 * pool, image, and snapshot ids (not names). This function fills
4109 * in the names for those ids. (It's OK if we can't figure out the
4110 * name for an image id, but the pool and snapshot ids should always
4111 * exist and have names.) All names in an rbd spec are dynamically
4114 * When an image being mapped (not a parent) is probed, we have the
4115 * pool name and pool id, image name and image id, and the snapshot
4116 * name. The only thing we're missing is the snapshot id.
4118 static int rbd_dev_spec_update(struct rbd_device
*rbd_dev
)
4120 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4121 struct rbd_spec
*spec
= rbd_dev
->spec
;
4122 const char *pool_name
;
4123 const char *image_name
;
4124 const char *snap_name
;
4128 * An image being mapped will have the pool name (etc.), but
4129 * we need to look up the snapshot id.
4131 if (spec
->pool_name
) {
4132 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
4135 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
4136 if (snap_id
== CEPH_NOSNAP
)
4138 spec
->snap_id
= snap_id
;
4140 spec
->snap_id
= CEPH_NOSNAP
;
4146 /* Get the pool name; we have to make our own copy of this */
4148 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
4150 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
4153 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
4157 /* Fetch the image name; tolerate failure here */
4159 image_name
= rbd_dev_image_name(rbd_dev
);
4161 rbd_warn(rbd_dev
, "unable to get image name");
4163 /* Look up the snapshot name, and make a copy */
4165 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
4166 if (IS_ERR(snap_name
)) {
4167 ret
= PTR_ERR(snap_name
);
4171 spec
->pool_name
= pool_name
;
4172 spec
->image_name
= image_name
;
4173 spec
->snap_name
= snap_name
;
4183 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
4192 struct ceph_snap_context
*snapc
;
4196 * We'll need room for the seq value (maximum snapshot id),
4197 * snapshot count, and array of that many snapshot ids.
4198 * For now we have a fixed upper limit on the number we're
4199 * prepared to receive.
4201 size
= sizeof (__le64
) + sizeof (__le32
) +
4202 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
4203 reply_buf
= kzalloc(size
, GFP_KERNEL
);
4207 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4208 "rbd", "get_snapcontext", NULL
, 0,
4210 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4215 end
= reply_buf
+ ret
;
4217 ceph_decode_64_safe(&p
, end
, seq
, out
);
4218 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
4221 * Make sure the reported number of snapshot ids wouldn't go
4222 * beyond the end of our buffer. But before checking that,
4223 * make sure the computed size of the snapshot context we
4224 * allocate is representable in a size_t.
4226 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
4231 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
4235 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
4241 for (i
= 0; i
< snap_count
; i
++)
4242 snapc
->snaps
[i
] = ceph_decode_64(&p
);
4244 ceph_put_snap_context(rbd_dev
->header
.snapc
);
4245 rbd_dev
->header
.snapc
= snapc
;
4247 dout(" snap context seq = %llu, snap_count = %u\n",
4248 (unsigned long long)seq
, (unsigned int)snap_count
);
4255 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
4266 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
4267 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4269 return ERR_PTR(-ENOMEM
);
4271 snapid
= cpu_to_le64(snap_id
);
4272 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4273 "rbd", "get_snapshot_name",
4274 &snapid
, sizeof (snapid
),
4276 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4278 snap_name
= ERR_PTR(ret
);
4283 end
= reply_buf
+ ret
;
4284 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4285 if (IS_ERR(snap_name
))
4288 dout(" snap_id 0x%016llx snap_name = %s\n",
4289 (unsigned long long)snap_id
, snap_name
);
4296 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
4298 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
4301 down_write(&rbd_dev
->header_rwsem
);
4303 ret
= rbd_dev_v2_image_size(rbd_dev
);
4308 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
4314 * If the image supports layering, get the parent info. We
4315 * need to probe the first time regardless. Thereafter we
4316 * only need to if there's a parent, to see if it has
4317 * disappeared due to the mapped image getting flattened.
4319 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
&&
4320 (first_time
|| rbd_dev
->parent_spec
)) {
4323 ret
= rbd_dev_v2_parent_info(rbd_dev
);
4328 * Print a warning if this is the initial probe and
4329 * the image has a parent. Don't print it if the
4330 * image now being probed is itself a parent. We
4331 * can tell at this point because we won't know its
4332 * pool name yet (just its pool id).
4334 warn
= rbd_dev
->parent_spec
&& rbd_dev
->spec
->pool_name
;
4335 if (first_time
&& warn
)
4336 rbd_warn(rbd_dev
, "WARNING: kernel layering "
4337 "is EXPERIMENTAL!");
4340 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
)
4341 if (rbd_dev
->mapping
.size
!= rbd_dev
->header
.image_size
)
4342 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
4344 ret
= rbd_dev_v2_snap_context(rbd_dev
);
4345 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
4347 up_write(&rbd_dev
->header_rwsem
);
4352 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
4357 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
4359 dev
= &rbd_dev
->dev
;
4360 dev
->bus
= &rbd_bus_type
;
4361 dev
->type
= &rbd_device_type
;
4362 dev
->parent
= &rbd_root_dev
;
4363 dev
->release
= rbd_dev_device_release
;
4364 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
4365 ret
= device_register(dev
);
4367 mutex_unlock(&ctl_mutex
);
4372 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
4374 device_unregister(&rbd_dev
->dev
);
4377 static atomic64_t rbd_dev_id_max
= ATOMIC64_INIT(0);
4380 * Get a unique rbd identifier for the given new rbd_dev, and add
4381 * the rbd_dev to the global list. The minimum rbd id is 1.
4383 static void rbd_dev_id_get(struct rbd_device
*rbd_dev
)
4385 rbd_dev
->dev_id
= atomic64_inc_return(&rbd_dev_id_max
);
4387 spin_lock(&rbd_dev_list_lock
);
4388 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
4389 spin_unlock(&rbd_dev_list_lock
);
4390 dout("rbd_dev %p given dev id %llu\n", rbd_dev
,
4391 (unsigned long long) rbd_dev
->dev_id
);
4395 * Remove an rbd_dev from the global list, and record that its
4396 * identifier is no longer in use.
4398 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
4400 struct list_head
*tmp
;
4401 int rbd_id
= rbd_dev
->dev_id
;
4404 rbd_assert(rbd_id
> 0);
4406 dout("rbd_dev %p released dev id %llu\n", rbd_dev
,
4407 (unsigned long long) rbd_dev
->dev_id
);
4408 spin_lock(&rbd_dev_list_lock
);
4409 list_del_init(&rbd_dev
->node
);
4412 * If the id being "put" is not the current maximum, there
4413 * is nothing special we need to do.
4415 if (rbd_id
!= atomic64_read(&rbd_dev_id_max
)) {
4416 spin_unlock(&rbd_dev_list_lock
);
4421 * We need to update the current maximum id. Search the
4422 * list to find out what it is. We're more likely to find
4423 * the maximum at the end, so search the list backward.
4426 list_for_each_prev(tmp
, &rbd_dev_list
) {
4427 struct rbd_device
*rbd_dev
;
4429 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
4430 if (rbd_dev
->dev_id
> max_id
)
4431 max_id
= rbd_dev
->dev_id
;
4433 spin_unlock(&rbd_dev_list_lock
);
4436 * The max id could have been updated by rbd_dev_id_get(), in
4437 * which case it now accurately reflects the new maximum.
4438 * Be careful not to overwrite the maximum value in that
4441 atomic64_cmpxchg(&rbd_dev_id_max
, rbd_id
, max_id
);
4442 dout(" max dev id has been reset\n");
4446 * Skips over white space at *buf, and updates *buf to point to the
4447 * first found non-space character (if any). Returns the length of
4448 * the token (string of non-white space characters) found. Note
4449 * that *buf must be terminated with '\0'.
4451 static inline size_t next_token(const char **buf
)
4454 * These are the characters that produce nonzero for
4455 * isspace() in the "C" and "POSIX" locales.
4457 const char *spaces
= " \f\n\r\t\v";
4459 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
4461 return strcspn(*buf
, spaces
); /* Return token length */
4465 * Finds the next token in *buf, and if the provided token buffer is
4466 * big enough, copies the found token into it. The result, if
4467 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4468 * must be terminated with '\0' on entry.
4470 * Returns the length of the token found (not including the '\0').
4471 * Return value will be 0 if no token is found, and it will be >=
4472 * token_size if the token would not fit.
4474 * The *buf pointer will be updated to point beyond the end of the
4475 * found token. Note that this occurs even if the token buffer is
4476 * too small to hold it.
4478 static inline size_t copy_token(const char **buf
,
4484 len
= next_token(buf
);
4485 if (len
< token_size
) {
4486 memcpy(token
, *buf
, len
);
4487 *(token
+ len
) = '\0';
4495 * Finds the next token in *buf, dynamically allocates a buffer big
4496 * enough to hold a copy of it, and copies the token into the new
4497 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4498 * that a duplicate buffer is created even for a zero-length token.
4500 * Returns a pointer to the newly-allocated duplicate, or a null
4501 * pointer if memory for the duplicate was not available. If
4502 * the lenp argument is a non-null pointer, the length of the token
4503 * (not including the '\0') is returned in *lenp.
4505 * If successful, the *buf pointer will be updated to point beyond
4506 * the end of the found token.
4508 * Note: uses GFP_KERNEL for allocation.
4510 static inline char *dup_token(const char **buf
, size_t *lenp
)
4515 len
= next_token(buf
);
4516 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
4519 *(dup
+ len
) = '\0';
4529 * Parse the options provided for an "rbd add" (i.e., rbd image
4530 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4531 * and the data written is passed here via a NUL-terminated buffer.
4532 * Returns 0 if successful or an error code otherwise.
4534 * The information extracted from these options is recorded in
4535 * the other parameters which return dynamically-allocated
4538 * The address of a pointer that will refer to a ceph options
4539 * structure. Caller must release the returned pointer using
4540 * ceph_destroy_options() when it is no longer needed.
4542 * Address of an rbd options pointer. Fully initialized by
4543 * this function; caller must release with kfree().
4545 * Address of an rbd image specification pointer. Fully
4546 * initialized by this function based on parsed options.
4547 * Caller must release with rbd_spec_put().
4549 * The options passed take this form:
4550 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4553 * A comma-separated list of one or more monitor addresses.
4554 * A monitor address is an ip address, optionally followed
4555 * by a port number (separated by a colon).
4556 * I.e.: ip1[:port1][,ip2[:port2]...]
4558 * A comma-separated list of ceph and/or rbd options.
4560 * The name of the rados pool containing the rbd image.
4562 * The name of the image in that pool to map.
4564 * An optional snapshot id. If provided, the mapping will
4565 * present data from the image at the time that snapshot was
4566 * created. The image head is used if no snapshot id is
4567 * provided. Snapshot mappings are always read-only.
4569 static int rbd_add_parse_args(const char *buf
,
4570 struct ceph_options
**ceph_opts
,
4571 struct rbd_options
**opts
,
4572 struct rbd_spec
**rbd_spec
)
4576 const char *mon_addrs
;
4578 size_t mon_addrs_size
;
4579 struct rbd_spec
*spec
= NULL
;
4580 struct rbd_options
*rbd_opts
= NULL
;
4581 struct ceph_options
*copts
;
4584 /* The first four tokens are required */
4586 len
= next_token(&buf
);
4588 rbd_warn(NULL
, "no monitor address(es) provided");
4592 mon_addrs_size
= len
+ 1;
4596 options
= dup_token(&buf
, NULL
);
4600 rbd_warn(NULL
, "no options provided");
4604 spec
= rbd_spec_alloc();
4608 spec
->pool_name
= dup_token(&buf
, NULL
);
4609 if (!spec
->pool_name
)
4611 if (!*spec
->pool_name
) {
4612 rbd_warn(NULL
, "no pool name provided");
4616 spec
->image_name
= dup_token(&buf
, NULL
);
4617 if (!spec
->image_name
)
4619 if (!*spec
->image_name
) {
4620 rbd_warn(NULL
, "no image name provided");
4625 * Snapshot name is optional; default is to use "-"
4626 * (indicating the head/no snapshot).
4628 len
= next_token(&buf
);
4630 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
4631 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
4632 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
4633 ret
= -ENAMETOOLONG
;
4636 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
4639 *(snap_name
+ len
) = '\0';
4640 spec
->snap_name
= snap_name
;
4642 /* Initialize all rbd options to the defaults */
4644 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
4648 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
4650 copts
= ceph_parse_options(options
, mon_addrs
,
4651 mon_addrs
+ mon_addrs_size
- 1,
4652 parse_rbd_opts_token
, rbd_opts
);
4653 if (IS_ERR(copts
)) {
4654 ret
= PTR_ERR(copts
);
4675 * An rbd format 2 image has a unique identifier, distinct from the
4676 * name given to it by the user. Internally, that identifier is
4677 * what's used to specify the names of objects related to the image.
4679 * A special "rbd id" object is used to map an rbd image name to its
4680 * id. If that object doesn't exist, then there is no v2 rbd image
4681 * with the supplied name.
4683 * This function will record the given rbd_dev's image_id field if
4684 * it can be determined, and in that case will return 0. If any
4685 * errors occur a negative errno will be returned and the rbd_dev's
4686 * image_id field will be unchanged (and should be NULL).
4688 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
4697 * When probing a parent image, the image id is already
4698 * known (and the image name likely is not). There's no
4699 * need to fetch the image id again in this case. We
4700 * do still need to set the image format though.
4702 if (rbd_dev
->spec
->image_id
) {
4703 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
4709 * First, see if the format 2 image id file exists, and if
4710 * so, get the image's persistent id from it.
4712 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
4713 object_name
= kmalloc(size
, GFP_NOIO
);
4716 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
4717 dout("rbd id object name is %s\n", object_name
);
4719 /* Response will be an encoded string, which includes a length */
4721 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
4722 response
= kzalloc(size
, GFP_NOIO
);
4728 /* If it doesn't exist we'll assume it's a format 1 image */
4730 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
4731 "rbd", "get_id", NULL
, 0,
4732 response
, RBD_IMAGE_ID_LEN_MAX
);
4733 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4734 if (ret
== -ENOENT
) {
4735 image_id
= kstrdup("", GFP_KERNEL
);
4736 ret
= image_id
? 0 : -ENOMEM
;
4738 rbd_dev
->image_format
= 1;
4739 } else if (ret
> sizeof (__le32
)) {
4742 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
4744 ret
= IS_ERR(image_id
) ? PTR_ERR(image_id
) : 0;
4746 rbd_dev
->image_format
= 2;
4752 rbd_dev
->spec
->image_id
= image_id
;
4753 dout("image_id is %s\n", image_id
);
4763 * Undo whatever state changes are made by v1 or v2 header info
4766 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
4768 struct rbd_image_header
*header
;
4770 /* Drop parent reference unless it's already been done (or none) */
4772 if (rbd_dev
->parent_overlap
)
4773 rbd_dev_parent_put(rbd_dev
);
4775 /* Free dynamic fields from the header, then zero it out */
4777 header
= &rbd_dev
->header
;
4778 ceph_put_snap_context(header
->snapc
);
4779 kfree(header
->snap_sizes
);
4780 kfree(header
->snap_names
);
4781 kfree(header
->object_prefix
);
4782 memset(header
, 0, sizeof (*header
));
4785 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
4789 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
4794 * Get the and check features for the image. Currently the
4795 * features are assumed to never change.
4797 ret
= rbd_dev_v2_features(rbd_dev
);
4801 /* If the image supports fancy striping, get its parameters */
4803 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
4804 ret
= rbd_dev_v2_striping_info(rbd_dev
);
4808 /* No support for crypto and compression type format 2 images */
4812 rbd_dev
->header
.features
= 0;
4813 kfree(rbd_dev
->header
.object_prefix
);
4814 rbd_dev
->header
.object_prefix
= NULL
;
4819 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
)
4821 struct rbd_device
*parent
= NULL
;
4822 struct rbd_spec
*parent_spec
;
4823 struct rbd_client
*rbdc
;
4826 if (!rbd_dev
->parent_spec
)
4829 * We need to pass a reference to the client and the parent
4830 * spec when creating the parent rbd_dev. Images related by
4831 * parent/child relationships always share both.
4833 parent_spec
= rbd_spec_get(rbd_dev
->parent_spec
);
4834 rbdc
= __rbd_get_client(rbd_dev
->rbd_client
);
4837 parent
= rbd_dev_create(rbdc
, parent_spec
);
4841 ret
= rbd_dev_image_probe(parent
, false);
4844 rbd_dev
->parent
= parent
;
4845 atomic_set(&rbd_dev
->parent_ref
, 1);
4850 rbd_dev_unparent(rbd_dev
);
4851 kfree(rbd_dev
->header_name
);
4852 rbd_dev_destroy(parent
);
4854 rbd_put_client(rbdc
);
4855 rbd_spec_put(parent_spec
);
4861 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
4865 /* generate unique id: find highest unique id, add one */
4866 rbd_dev_id_get(rbd_dev
);
4868 /* Fill in the device name, now that we have its id. */
4869 BUILD_BUG_ON(DEV_NAME_LEN
4870 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
4871 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
4873 /* Get our block major device number. */
4875 ret
= register_blkdev(0, rbd_dev
->name
);
4878 rbd_dev
->major
= ret
;
4880 /* Set up the blkdev mapping. */
4882 ret
= rbd_init_disk(rbd_dev
);
4884 goto err_out_blkdev
;
4886 ret
= rbd_dev_mapping_set(rbd_dev
);
4889 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
4891 ret
= rbd_bus_add_dev(rbd_dev
);
4893 goto err_out_mapping
;
4895 /* Everything's ready. Announce the disk to the world. */
4897 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
4898 add_disk(rbd_dev
->disk
);
4900 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
4901 (unsigned long long) rbd_dev
->mapping
.size
);
4906 rbd_dev_mapping_clear(rbd_dev
);
4908 rbd_free_disk(rbd_dev
);
4910 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
4912 rbd_dev_id_put(rbd_dev
);
4913 rbd_dev_mapping_clear(rbd_dev
);
4918 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
4920 struct rbd_spec
*spec
= rbd_dev
->spec
;
4923 /* Record the header object name for this rbd image. */
4925 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4927 if (rbd_dev
->image_format
== 1)
4928 size
= strlen(spec
->image_name
) + sizeof (RBD_SUFFIX
);
4930 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(spec
->image_id
);
4932 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
4933 if (!rbd_dev
->header_name
)
4936 if (rbd_dev
->image_format
== 1)
4937 sprintf(rbd_dev
->header_name
, "%s%s",
4938 spec
->image_name
, RBD_SUFFIX
);
4940 sprintf(rbd_dev
->header_name
, "%s%s",
4941 RBD_HEADER_PREFIX
, spec
->image_id
);
4945 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
4947 rbd_dev_unprobe(rbd_dev
);
4948 kfree(rbd_dev
->header_name
);
4949 rbd_dev
->header_name
= NULL
;
4950 rbd_dev
->image_format
= 0;
4951 kfree(rbd_dev
->spec
->image_id
);
4952 rbd_dev
->spec
->image_id
= NULL
;
4954 rbd_dev_destroy(rbd_dev
);
4958 * Probe for the existence of the header object for the given rbd
4959 * device. If this image is the one being mapped (i.e., not a
4960 * parent), initiate a watch on its header object before using that
4961 * object to get detailed information about the rbd image.
4963 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
)
4969 * Get the id from the image id object. Unless there's an
4970 * error, rbd_dev->spec->image_id will be filled in with
4971 * a dynamically-allocated string, and rbd_dev->image_format
4972 * will be set to either 1 or 2.
4974 ret
= rbd_dev_image_id(rbd_dev
);
4977 rbd_assert(rbd_dev
->spec
->image_id
);
4978 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4980 ret
= rbd_dev_header_name(rbd_dev
);
4982 goto err_out_format
;
4985 ret
= rbd_dev_header_watch_sync(rbd_dev
, true);
4987 goto out_header_name
;
4990 if (rbd_dev
->image_format
== 1)
4991 ret
= rbd_dev_v1_header_info(rbd_dev
);
4993 ret
= rbd_dev_v2_header_info(rbd_dev
);
4997 ret
= rbd_dev_spec_update(rbd_dev
);
5001 ret
= rbd_dev_probe_parent(rbd_dev
);
5005 dout("discovered format %u image, header name is %s\n",
5006 rbd_dev
->image_format
, rbd_dev
->header_name
);
5010 rbd_dev_unprobe(rbd_dev
);
5013 tmp
= rbd_dev_header_watch_sync(rbd_dev
, false);
5015 rbd_warn(rbd_dev
, "unable to tear down "
5016 "watch request (%d)\n", tmp
);
5019 kfree(rbd_dev
->header_name
);
5020 rbd_dev
->header_name
= NULL
;
5022 rbd_dev
->image_format
= 0;
5023 kfree(rbd_dev
->spec
->image_id
);
5024 rbd_dev
->spec
->image_id
= NULL
;
5026 dout("probe failed, returning %d\n", ret
);
5031 static ssize_t
rbd_add(struct bus_type
*bus
,
5035 struct rbd_device
*rbd_dev
= NULL
;
5036 struct ceph_options
*ceph_opts
= NULL
;
5037 struct rbd_options
*rbd_opts
= NULL
;
5038 struct rbd_spec
*spec
= NULL
;
5039 struct rbd_client
*rbdc
;
5040 struct ceph_osd_client
*osdc
;
5044 if (!try_module_get(THIS_MODULE
))
5047 /* parse add command */
5048 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
5050 goto err_out_module
;
5051 read_only
= rbd_opts
->read_only
;
5053 rbd_opts
= NULL
; /* done with this */
5055 rbdc
= rbd_get_client(ceph_opts
);
5062 osdc
= &rbdc
->client
->osdc
;
5063 rc
= ceph_pg_poolid_by_name(osdc
->osdmap
, spec
->pool_name
);
5065 goto err_out_client
;
5066 spec
->pool_id
= (u64
)rc
;
5068 /* The ceph file layout needs to fit pool id in 32 bits */
5070 if (spec
->pool_id
> (u64
)U32_MAX
) {
5071 rbd_warn(NULL
, "pool id too large (%llu > %u)\n",
5072 (unsigned long long)spec
->pool_id
, U32_MAX
);
5074 goto err_out_client
;
5077 rbd_dev
= rbd_dev_create(rbdc
, spec
);
5079 goto err_out_client
;
5080 rbdc
= NULL
; /* rbd_dev now owns this */
5081 spec
= NULL
; /* rbd_dev now owns this */
5083 rc
= rbd_dev_image_probe(rbd_dev
, true);
5085 goto err_out_rbd_dev
;
5087 /* If we are mapping a snapshot it must be marked read-only */
5089 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
5091 rbd_dev
->mapping
.read_only
= read_only
;
5093 rc
= rbd_dev_device_setup(rbd_dev
);
5095 rbd_dev_image_release(rbd_dev
);
5096 goto err_out_module
;
5102 rbd_dev_destroy(rbd_dev
);
5104 rbd_put_client(rbdc
);
5108 module_put(THIS_MODULE
);
5110 dout("Error adding device %s\n", buf
);
5115 static void rbd_dev_device_release(struct device
*dev
)
5117 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5119 rbd_free_disk(rbd_dev
);
5120 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5121 rbd_dev_mapping_clear(rbd_dev
);
5122 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5124 rbd_dev_id_put(rbd_dev
);
5125 rbd_dev_mapping_clear(rbd_dev
);
5128 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
5130 while (rbd_dev
->parent
) {
5131 struct rbd_device
*first
= rbd_dev
;
5132 struct rbd_device
*second
= first
->parent
;
5133 struct rbd_device
*third
;
5136 * Follow to the parent with no grandparent and
5139 while (second
&& (third
= second
->parent
)) {
5144 rbd_dev_image_release(second
);
5145 first
->parent
= NULL
;
5146 first
->parent_overlap
= 0;
5148 rbd_assert(first
->parent_spec
);
5149 rbd_spec_put(first
->parent_spec
);
5150 first
->parent_spec
= NULL
;
5154 static ssize_t
rbd_remove(struct bus_type
*bus
,
5158 struct rbd_device
*rbd_dev
= NULL
;
5159 struct list_head
*tmp
;
5162 bool already
= false;
5165 ret
= strict_strtoul(buf
, 10, &ul
);
5169 /* convert to int; abort if we lost anything in the conversion */
5174 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
5177 spin_lock(&rbd_dev_list_lock
);
5178 list_for_each(tmp
, &rbd_dev_list
) {
5179 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
5180 if (rbd_dev
->dev_id
== dev_id
) {
5186 spin_lock_irq(&rbd_dev
->lock
);
5187 if (rbd_dev
->open_count
)
5190 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
5192 spin_unlock_irq(&rbd_dev
->lock
);
5194 spin_unlock(&rbd_dev_list_lock
);
5195 if (ret
< 0 || already
)
5198 ret
= rbd_dev_header_watch_sync(rbd_dev
, false);
5200 rbd_warn(rbd_dev
, "failed to cancel watch event (%d)\n", ret
);
5203 * flush remaining watch callbacks - these must be complete
5204 * before the osd_client is shutdown
5206 dout("%s: flushing notifies", __func__
);
5207 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
5209 * Don't free anything from rbd_dev->disk until after all
5210 * notifies are completely processed. Otherwise
5211 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5212 * in a potential use after free of rbd_dev->disk or rbd_dev.
5214 rbd_bus_del_dev(rbd_dev
);
5215 rbd_dev_image_release(rbd_dev
);
5216 module_put(THIS_MODULE
);
5219 mutex_unlock(&ctl_mutex
);
5225 * create control files in sysfs
5228 static int rbd_sysfs_init(void)
5232 ret
= device_register(&rbd_root_dev
);
5236 ret
= bus_register(&rbd_bus_type
);
5238 device_unregister(&rbd_root_dev
);
5243 static void rbd_sysfs_cleanup(void)
5245 bus_unregister(&rbd_bus_type
);
5246 device_unregister(&rbd_root_dev
);
5249 static int rbd_slab_init(void)
5251 rbd_assert(!rbd_img_request_cache
);
5252 rbd_img_request_cache
= kmem_cache_create("rbd_img_request",
5253 sizeof (struct rbd_img_request
),
5254 __alignof__(struct rbd_img_request
),
5256 if (!rbd_img_request_cache
)
5259 rbd_assert(!rbd_obj_request_cache
);
5260 rbd_obj_request_cache
= kmem_cache_create("rbd_obj_request",
5261 sizeof (struct rbd_obj_request
),
5262 __alignof__(struct rbd_obj_request
),
5264 if (!rbd_obj_request_cache
)
5267 rbd_assert(!rbd_segment_name_cache
);
5268 rbd_segment_name_cache
= kmem_cache_create("rbd_segment_name",
5269 MAX_OBJ_NAME_SIZE
+ 1, 1, 0, NULL
);
5270 if (rbd_segment_name_cache
)
5273 if (rbd_obj_request_cache
) {
5274 kmem_cache_destroy(rbd_obj_request_cache
);
5275 rbd_obj_request_cache
= NULL
;
5278 kmem_cache_destroy(rbd_img_request_cache
);
5279 rbd_img_request_cache
= NULL
;
5284 static void rbd_slab_exit(void)
5286 rbd_assert(rbd_segment_name_cache
);
5287 kmem_cache_destroy(rbd_segment_name_cache
);
5288 rbd_segment_name_cache
= NULL
;
5290 rbd_assert(rbd_obj_request_cache
);
5291 kmem_cache_destroy(rbd_obj_request_cache
);
5292 rbd_obj_request_cache
= NULL
;
5294 rbd_assert(rbd_img_request_cache
);
5295 kmem_cache_destroy(rbd_img_request_cache
);
5296 rbd_img_request_cache
= NULL
;
5299 static int __init
rbd_init(void)
5303 if (!libceph_compatible(NULL
)) {
5304 rbd_warn(NULL
, "libceph incompatibility (quitting)");
5308 rc
= rbd_slab_init();
5311 rc
= rbd_sysfs_init();
5315 pr_info("loaded " RBD_DRV_NAME_LONG
"\n");
5320 static void __exit
rbd_exit(void)
5322 rbd_sysfs_cleanup();
5326 module_init(rbd_init
);
5327 module_exit(rbd_exit
);
5329 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5330 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5331 MODULE_DESCRIPTION("rados block device");
5333 /* following authorship retained from original osdblk.c */
5334 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5336 MODULE_LICENSE("GPL");