rbd: require stable pages if message data CRCs are enabled
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / rbd.c
1
2 /*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25 For usage instructions, please refer to:
26
27 Documentation/ABI/testing/sysfs-bus-rbd
28
29 */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
37
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/fs.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44
45 #include "rbd_types.h"
46
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
48
49 /*
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
54 */
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
57
58 /*
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
63 */
64 static int atomic_inc_return_safe(atomic_t *v)
65 {
66 unsigned int counter;
67
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
70 return (int)counter;
71
72 atomic_dec(v);
73
74 return -EINVAL;
75 }
76
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
79 {
80 int counter;
81
82 counter = atomic_dec_return(v);
83 if (counter >= 0)
84 return counter;
85
86 atomic_inc(v);
87
88 return -EINVAL;
89 }
90
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
93
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
95
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
99
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
101
102 #define RBD_SNAP_HEAD_NAME "-"
103
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
105
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
109
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
111
112 /* Feature bits */
113
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
118
119 /* Features supported by this (client software) implementation. */
120
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
122
123 /*
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
128 */
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
131
132 /*
133 * block device image metadata (in-memory version)
134 */
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
137 char *object_prefix;
138 __u8 obj_order;
139 __u8 crypt_type;
140 __u8 comp_type;
141 u64 stripe_unit;
142 u64 stripe_count;
143 u64 features; /* Might be changeable someday? */
144
145 /* The remaining fields need to be updated occasionally */
146 u64 image_size;
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
150 };
151
152 /*
153 * An rbd image specification.
154 *
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
158 *
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
163 *
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
169 *
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
173 *
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
176 */
177 struct rbd_spec {
178 u64 pool_id;
179 const char *pool_name;
180
181 const char *image_id;
182 const char *image_name;
183
184 u64 snap_id;
185 const char *snap_name;
186
187 struct kref kref;
188 };
189
190 /*
191 * an instance of the client. multiple devices may share an rbd client.
192 */
193 struct rbd_client {
194 struct ceph_client *client;
195 struct kref kref;
196 struct list_head node;
197 };
198
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
201
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
203
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
206
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
209 };
210
211 enum obj_req_flags {
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
216 };
217
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
222 unsigned long flags;
223
224 /*
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
227 *
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
230 *
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
234 *
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
239 */
240 union {
241 struct rbd_obj_request *obj_request; /* STAT op */
242 struct {
243 struct rbd_img_request *img_request;
244 u64 img_offset;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
247 };
248 };
249 u32 which; /* posn image request list */
250
251 enum obj_request_type type;
252 union {
253 struct bio *bio_list;
254 struct {
255 struct page **pages;
256 u32 page_count;
257 };
258 };
259 struct page **copyup_pages;
260 u32 copyup_page_count;
261
262 struct ceph_osd_request *osd_req;
263
264 u64 xferred; /* bytes transferred */
265 int result;
266
267 rbd_obj_callback_t callback;
268 struct completion completion;
269
270 struct kref kref;
271 };
272
273 enum img_req_flags {
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
277 };
278
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
283 unsigned long flags;
284 union {
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
287 };
288 union {
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
291 };
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
295 u32 next_completion;
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
299
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
302
303 struct kref kref;
304 };
305
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
312
313 struct rbd_mapping {
314 u64 size;
315 u64 features;
316 bool read_only;
317 };
318
319 /*
320 * a single device
321 */
322 struct rbd_device {
323 int dev_id; /* blkdev unique id */
324
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
327
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
330
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
332
333 spinlock_t lock; /* queue, flags, open_count */
334
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
338
339 char *header_name;
340
341 struct ceph_file_layout layout;
342
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
345
346 struct rbd_spec *parent_spec;
347 u64 parent_overlap;
348 atomic_t parent_ref;
349 struct rbd_device *parent;
350
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
353
354 struct rbd_mapping mapping;
355
356 struct list_head node;
357
358 /* sysfs related */
359 struct device dev;
360 unsigned long open_count; /* protected by lock */
361 };
362
363 /*
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
366 *
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
369 */
370 enum rbd_dev_flags {
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
373 };
374
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
376
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
379
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
382
383 /* Slab caches for frequently-allocated structures */
384
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
388
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
390
391 static void rbd_dev_device_release(struct device *dev);
392
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
394 size_t count);
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
396 size_t count);
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
399
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
403 __ATTR_NULL
404 };
405
406 static struct bus_type rbd_bus_type = {
407 .name = "rbd",
408 .bus_attrs = rbd_bus_attrs,
409 };
410
411 static void rbd_root_dev_release(struct device *dev)
412 {
413 }
414
415 static struct device rbd_root_dev = {
416 .init_name = "rbd",
417 .release = rbd_root_dev_release,
418 };
419
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
422 {
423 struct va_format vaf;
424 va_list args;
425
426 va_start(args, fmt);
427 vaf.fmt = fmt;
428 vaf.va = &args;
429
430 if (!rbd_dev)
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
441 else /* punt */
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
444 va_end(args);
445 }
446
447 #ifdef RBD_DEBUG
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
451 "at line %d:\n\n" \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
454 BUG(); \
455 }
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
459
460 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
461 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
462 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
463 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
464
465 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
467 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
468 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
469 u64 snap_id);
470 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
471 u8 *order, u64 *snap_size);
472 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
473 u64 *snap_features);
474 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
475
476 static int rbd_open(struct block_device *bdev, fmode_t mode)
477 {
478 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
479 bool removing = false;
480
481 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
482 return -EROFS;
483
484 spin_lock_irq(&rbd_dev->lock);
485 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
486 removing = true;
487 else
488 rbd_dev->open_count++;
489 spin_unlock_irq(&rbd_dev->lock);
490 if (removing)
491 return -ENOENT;
492
493 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
494 (void) get_device(&rbd_dev->dev);
495 set_device_ro(bdev, rbd_dev->mapping.read_only);
496 mutex_unlock(&ctl_mutex);
497
498 return 0;
499 }
500
501 static void rbd_release(struct gendisk *disk, fmode_t mode)
502 {
503 struct rbd_device *rbd_dev = disk->private_data;
504 unsigned long open_count_before;
505
506 spin_lock_irq(&rbd_dev->lock);
507 open_count_before = rbd_dev->open_count--;
508 spin_unlock_irq(&rbd_dev->lock);
509 rbd_assert(open_count_before > 0);
510
511 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
512 put_device(&rbd_dev->dev);
513 mutex_unlock(&ctl_mutex);
514 }
515
516 static const struct block_device_operations rbd_bd_ops = {
517 .owner = THIS_MODULE,
518 .open = rbd_open,
519 .release = rbd_release,
520 };
521
522 /*
523 * Initialize an rbd client instance. Success or not, this function
524 * consumes ceph_opts.
525 */
526 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 {
528 struct rbd_client *rbdc;
529 int ret = -ENOMEM;
530
531 dout("%s:\n", __func__);
532 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
533 if (!rbdc)
534 goto out_opt;
535
536 kref_init(&rbdc->kref);
537 INIT_LIST_HEAD(&rbdc->node);
538
539 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
540
541 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
542 if (IS_ERR(rbdc->client))
543 goto out_mutex;
544 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
545
546 ret = ceph_open_session(rbdc->client);
547 if (ret < 0)
548 goto out_err;
549
550 spin_lock(&rbd_client_list_lock);
551 list_add_tail(&rbdc->node, &rbd_client_list);
552 spin_unlock(&rbd_client_list_lock);
553
554 mutex_unlock(&ctl_mutex);
555 dout("%s: rbdc %p\n", __func__, rbdc);
556
557 return rbdc;
558
559 out_err:
560 ceph_destroy_client(rbdc->client);
561 out_mutex:
562 mutex_unlock(&ctl_mutex);
563 kfree(rbdc);
564 out_opt:
565 if (ceph_opts)
566 ceph_destroy_options(ceph_opts);
567 dout("%s: error %d\n", __func__, ret);
568
569 return ERR_PTR(ret);
570 }
571
572 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
573 {
574 kref_get(&rbdc->kref);
575
576 return rbdc;
577 }
578
579 /*
580 * Find a ceph client with specific addr and configuration. If
581 * found, bump its reference count.
582 */
583 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
584 {
585 struct rbd_client *client_node;
586 bool found = false;
587
588 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
589 return NULL;
590
591 spin_lock(&rbd_client_list_lock);
592 list_for_each_entry(client_node, &rbd_client_list, node) {
593 if (!ceph_compare_options(ceph_opts, client_node->client)) {
594 __rbd_get_client(client_node);
595
596 found = true;
597 break;
598 }
599 }
600 spin_unlock(&rbd_client_list_lock);
601
602 return found ? client_node : NULL;
603 }
604
605 /*
606 * mount options
607 */
608 enum {
609 Opt_last_int,
610 /* int args above */
611 Opt_last_string,
612 /* string args above */
613 Opt_read_only,
614 Opt_read_write,
615 /* Boolean args above */
616 Opt_last_bool,
617 };
618
619 static match_table_t rbd_opts_tokens = {
620 /* int args above */
621 /* string args above */
622 {Opt_read_only, "read_only"},
623 {Opt_read_only, "ro"}, /* Alternate spelling */
624 {Opt_read_write, "read_write"},
625 {Opt_read_write, "rw"}, /* Alternate spelling */
626 /* Boolean args above */
627 {-1, NULL}
628 };
629
630 struct rbd_options {
631 bool read_only;
632 };
633
634 #define RBD_READ_ONLY_DEFAULT false
635
636 static int parse_rbd_opts_token(char *c, void *private)
637 {
638 struct rbd_options *rbd_opts = private;
639 substring_t argstr[MAX_OPT_ARGS];
640 int token, intval, ret;
641
642 token = match_token(c, rbd_opts_tokens, argstr);
643 if (token < 0)
644 return -EINVAL;
645
646 if (token < Opt_last_int) {
647 ret = match_int(&argstr[0], &intval);
648 if (ret < 0) {
649 pr_err("bad mount option arg (not int) "
650 "at '%s'\n", c);
651 return ret;
652 }
653 dout("got int token %d val %d\n", token, intval);
654 } else if (token > Opt_last_int && token < Opt_last_string) {
655 dout("got string token %d val %s\n", token,
656 argstr[0].from);
657 } else if (token > Opt_last_string && token < Opt_last_bool) {
658 dout("got Boolean token %d\n", token);
659 } else {
660 dout("got token %d\n", token);
661 }
662
663 switch (token) {
664 case Opt_read_only:
665 rbd_opts->read_only = true;
666 break;
667 case Opt_read_write:
668 rbd_opts->read_only = false;
669 break;
670 default:
671 rbd_assert(false);
672 break;
673 }
674 return 0;
675 }
676
677 /*
678 * Get a ceph client with specific addr and configuration, if one does
679 * not exist create it. Either way, ceph_opts is consumed by this
680 * function.
681 */
682 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
683 {
684 struct rbd_client *rbdc;
685
686 rbdc = rbd_client_find(ceph_opts);
687 if (rbdc) /* using an existing client */
688 ceph_destroy_options(ceph_opts);
689 else
690 rbdc = rbd_client_create(ceph_opts);
691
692 return rbdc;
693 }
694
695 /*
696 * Destroy ceph client
697 *
698 * Caller must hold rbd_client_list_lock.
699 */
700 static void rbd_client_release(struct kref *kref)
701 {
702 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
703
704 dout("%s: rbdc %p\n", __func__, rbdc);
705 spin_lock(&rbd_client_list_lock);
706 list_del(&rbdc->node);
707 spin_unlock(&rbd_client_list_lock);
708
709 ceph_destroy_client(rbdc->client);
710 kfree(rbdc);
711 }
712
713 /*
714 * Drop reference to ceph client node. If it's not referenced anymore, release
715 * it.
716 */
717 static void rbd_put_client(struct rbd_client *rbdc)
718 {
719 if (rbdc)
720 kref_put(&rbdc->kref, rbd_client_release);
721 }
722
723 static bool rbd_image_format_valid(u32 image_format)
724 {
725 return image_format == 1 || image_format == 2;
726 }
727
728 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
729 {
730 size_t size;
731 u32 snap_count;
732
733 /* The header has to start with the magic rbd header text */
734 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
735 return false;
736
737 /* The bio layer requires at least sector-sized I/O */
738
739 if (ondisk->options.order < SECTOR_SHIFT)
740 return false;
741
742 /* If we use u64 in a few spots we may be able to loosen this */
743
744 if (ondisk->options.order > 8 * sizeof (int) - 1)
745 return false;
746
747 /*
748 * The size of a snapshot header has to fit in a size_t, and
749 * that limits the number of snapshots.
750 */
751 snap_count = le32_to_cpu(ondisk->snap_count);
752 size = SIZE_MAX - sizeof (struct ceph_snap_context);
753 if (snap_count > size / sizeof (__le64))
754 return false;
755
756 /*
757 * Not only that, but the size of the entire the snapshot
758 * header must also be representable in a size_t.
759 */
760 size -= snap_count * sizeof (__le64);
761 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
762 return false;
763
764 return true;
765 }
766
767 /*
768 * Fill an rbd image header with information from the given format 1
769 * on-disk header.
770 */
771 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
772 struct rbd_image_header_ondisk *ondisk)
773 {
774 struct rbd_image_header *header = &rbd_dev->header;
775 bool first_time = header->object_prefix == NULL;
776 struct ceph_snap_context *snapc;
777 char *object_prefix = NULL;
778 char *snap_names = NULL;
779 u64 *snap_sizes = NULL;
780 u32 snap_count;
781 size_t size;
782 int ret = -ENOMEM;
783 u32 i;
784
785 /* Allocate this now to avoid having to handle failure below */
786
787 if (first_time) {
788 size_t len;
789
790 len = strnlen(ondisk->object_prefix,
791 sizeof (ondisk->object_prefix));
792 object_prefix = kmalloc(len + 1, GFP_KERNEL);
793 if (!object_prefix)
794 return -ENOMEM;
795 memcpy(object_prefix, ondisk->object_prefix, len);
796 object_prefix[len] = '\0';
797 }
798
799 /* Allocate the snapshot context and fill it in */
800
801 snap_count = le32_to_cpu(ondisk->snap_count);
802 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
803 if (!snapc)
804 goto out_err;
805 snapc->seq = le64_to_cpu(ondisk->snap_seq);
806 if (snap_count) {
807 struct rbd_image_snap_ondisk *snaps;
808 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
809
810 /* We'll keep a copy of the snapshot names... */
811
812 if (snap_names_len > (u64)SIZE_MAX)
813 goto out_2big;
814 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
815 if (!snap_names)
816 goto out_err;
817
818 /* ...as well as the array of their sizes. */
819
820 size = snap_count * sizeof (*header->snap_sizes);
821 snap_sizes = kmalloc(size, GFP_KERNEL);
822 if (!snap_sizes)
823 goto out_err;
824
825 /*
826 * Copy the names, and fill in each snapshot's id
827 * and size.
828 *
829 * Note that rbd_dev_v1_header_info() guarantees the
830 * ondisk buffer we're working with has
831 * snap_names_len bytes beyond the end of the
832 * snapshot id array, this memcpy() is safe.
833 */
834 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
835 snaps = ondisk->snaps;
836 for (i = 0; i < snap_count; i++) {
837 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
838 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
839 }
840 }
841
842 /* We won't fail any more, fill in the header */
843
844 down_write(&rbd_dev->header_rwsem);
845 if (first_time) {
846 header->object_prefix = object_prefix;
847 header->obj_order = ondisk->options.order;
848 header->crypt_type = ondisk->options.crypt_type;
849 header->comp_type = ondisk->options.comp_type;
850 /* The rest aren't used for format 1 images */
851 header->stripe_unit = 0;
852 header->stripe_count = 0;
853 header->features = 0;
854 } else {
855 ceph_put_snap_context(header->snapc);
856 kfree(header->snap_names);
857 kfree(header->snap_sizes);
858 }
859
860 /* The remaining fields always get updated (when we refresh) */
861
862 header->image_size = le64_to_cpu(ondisk->image_size);
863 header->snapc = snapc;
864 header->snap_names = snap_names;
865 header->snap_sizes = snap_sizes;
866
867 /* Make sure mapping size is consistent with header info */
868
869 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
870 if (rbd_dev->mapping.size != header->image_size)
871 rbd_dev->mapping.size = header->image_size;
872
873 up_write(&rbd_dev->header_rwsem);
874
875 return 0;
876 out_2big:
877 ret = -EIO;
878 out_err:
879 kfree(snap_sizes);
880 kfree(snap_names);
881 ceph_put_snap_context(snapc);
882 kfree(object_prefix);
883
884 return ret;
885 }
886
887 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
888 {
889 const char *snap_name;
890
891 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
892
893 /* Skip over names until we find the one we are looking for */
894
895 snap_name = rbd_dev->header.snap_names;
896 while (which--)
897 snap_name += strlen(snap_name) + 1;
898
899 return kstrdup(snap_name, GFP_KERNEL);
900 }
901
902 /*
903 * Snapshot id comparison function for use with qsort()/bsearch().
904 * Note that result is for snapshots in *descending* order.
905 */
906 static int snapid_compare_reverse(const void *s1, const void *s2)
907 {
908 u64 snap_id1 = *(u64 *)s1;
909 u64 snap_id2 = *(u64 *)s2;
910
911 if (snap_id1 < snap_id2)
912 return 1;
913 return snap_id1 == snap_id2 ? 0 : -1;
914 }
915
916 /*
917 * Search a snapshot context to see if the given snapshot id is
918 * present.
919 *
920 * Returns the position of the snapshot id in the array if it's found,
921 * or BAD_SNAP_INDEX otherwise.
922 *
923 * Note: The snapshot array is in kept sorted (by the osd) in
924 * reverse order, highest snapshot id first.
925 */
926 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
927 {
928 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
929 u64 *found;
930
931 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
932 sizeof (snap_id), snapid_compare_reverse);
933
934 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
935 }
936
937 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
938 u64 snap_id)
939 {
940 u32 which;
941 const char *snap_name;
942
943 which = rbd_dev_snap_index(rbd_dev, snap_id);
944 if (which == BAD_SNAP_INDEX)
945 return ERR_PTR(-ENOENT);
946
947 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
948 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
949 }
950
951 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
952 {
953 if (snap_id == CEPH_NOSNAP)
954 return RBD_SNAP_HEAD_NAME;
955
956 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
957 if (rbd_dev->image_format == 1)
958 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
959
960 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
961 }
962
963 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
964 u64 *snap_size)
965 {
966 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
967 if (snap_id == CEPH_NOSNAP) {
968 *snap_size = rbd_dev->header.image_size;
969 } else if (rbd_dev->image_format == 1) {
970 u32 which;
971
972 which = rbd_dev_snap_index(rbd_dev, snap_id);
973 if (which == BAD_SNAP_INDEX)
974 return -ENOENT;
975
976 *snap_size = rbd_dev->header.snap_sizes[which];
977 } else {
978 u64 size = 0;
979 int ret;
980
981 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
982 if (ret)
983 return ret;
984
985 *snap_size = size;
986 }
987 return 0;
988 }
989
990 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
991 u64 *snap_features)
992 {
993 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
994 if (snap_id == CEPH_NOSNAP) {
995 *snap_features = rbd_dev->header.features;
996 } else if (rbd_dev->image_format == 1) {
997 *snap_features = 0; /* No features for format 1 */
998 } else {
999 u64 features = 0;
1000 int ret;
1001
1002 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1003 if (ret)
1004 return ret;
1005
1006 *snap_features = features;
1007 }
1008 return 0;
1009 }
1010
1011 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1012 {
1013 u64 snap_id = rbd_dev->spec->snap_id;
1014 u64 size = 0;
1015 u64 features = 0;
1016 int ret;
1017
1018 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1019 if (ret)
1020 return ret;
1021 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1022 if (ret)
1023 return ret;
1024
1025 rbd_dev->mapping.size = size;
1026 rbd_dev->mapping.features = features;
1027
1028 return 0;
1029 }
1030
1031 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1032 {
1033 rbd_dev->mapping.size = 0;
1034 rbd_dev->mapping.features = 0;
1035 }
1036
1037 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1038 {
1039 char *name;
1040 u64 segment;
1041 int ret;
1042 char *name_format;
1043
1044 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1045 if (!name)
1046 return NULL;
1047 segment = offset >> rbd_dev->header.obj_order;
1048 name_format = "%s.%012llx";
1049 if (rbd_dev->image_format == 2)
1050 name_format = "%s.%016llx";
1051 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1052 rbd_dev->header.object_prefix, segment);
1053 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1054 pr_err("error formatting segment name for #%llu (%d)\n",
1055 segment, ret);
1056 kfree(name);
1057 name = NULL;
1058 }
1059
1060 return name;
1061 }
1062
1063 static void rbd_segment_name_free(const char *name)
1064 {
1065 /* The explicit cast here is needed to drop the const qualifier */
1066
1067 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1068 }
1069
1070 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1071 {
1072 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1073
1074 return offset & (segment_size - 1);
1075 }
1076
1077 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1078 u64 offset, u64 length)
1079 {
1080 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1081
1082 offset &= segment_size - 1;
1083
1084 rbd_assert(length <= U64_MAX - offset);
1085 if (offset + length > segment_size)
1086 length = segment_size - offset;
1087
1088 return length;
1089 }
1090
1091 /*
1092 * returns the size of an object in the image
1093 */
1094 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1095 {
1096 return 1 << header->obj_order;
1097 }
1098
1099 /*
1100 * bio helpers
1101 */
1102
1103 static void bio_chain_put(struct bio *chain)
1104 {
1105 struct bio *tmp;
1106
1107 while (chain) {
1108 tmp = chain;
1109 chain = chain->bi_next;
1110 bio_put(tmp);
1111 }
1112 }
1113
1114 /*
1115 * zeros a bio chain, starting at specific offset
1116 */
1117 static void zero_bio_chain(struct bio *chain, int start_ofs)
1118 {
1119 struct bio_vec *bv;
1120 unsigned long flags;
1121 void *buf;
1122 int i;
1123 int pos = 0;
1124
1125 while (chain) {
1126 bio_for_each_segment(bv, chain, i) {
1127 if (pos + bv->bv_len > start_ofs) {
1128 int remainder = max(start_ofs - pos, 0);
1129 buf = bvec_kmap_irq(bv, &flags);
1130 memset(buf + remainder, 0,
1131 bv->bv_len - remainder);
1132 flush_dcache_page(bv->bv_page);
1133 bvec_kunmap_irq(buf, &flags);
1134 }
1135 pos += bv->bv_len;
1136 }
1137
1138 chain = chain->bi_next;
1139 }
1140 }
1141
1142 /*
1143 * similar to zero_bio_chain(), zeros data defined by a page array,
1144 * starting at the given byte offset from the start of the array and
1145 * continuing up to the given end offset. The pages array is
1146 * assumed to be big enough to hold all bytes up to the end.
1147 */
1148 static void zero_pages(struct page **pages, u64 offset, u64 end)
1149 {
1150 struct page **page = &pages[offset >> PAGE_SHIFT];
1151
1152 rbd_assert(end > offset);
1153 rbd_assert(end - offset <= (u64)SIZE_MAX);
1154 while (offset < end) {
1155 size_t page_offset;
1156 size_t length;
1157 unsigned long flags;
1158 void *kaddr;
1159
1160 page_offset = (size_t)(offset & ~PAGE_MASK);
1161 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1162 local_irq_save(flags);
1163 kaddr = kmap_atomic(*page);
1164 memset(kaddr + page_offset, 0, length);
1165 flush_dcache_page(*page);
1166 kunmap_atomic(kaddr);
1167 local_irq_restore(flags);
1168
1169 offset += length;
1170 page++;
1171 }
1172 }
1173
1174 /*
1175 * Clone a portion of a bio, starting at the given byte offset
1176 * and continuing for the number of bytes indicated.
1177 */
1178 static struct bio *bio_clone_range(struct bio *bio_src,
1179 unsigned int offset,
1180 unsigned int len,
1181 gfp_t gfpmask)
1182 {
1183 struct bio_vec *bv;
1184 unsigned int resid;
1185 unsigned short idx;
1186 unsigned int voff;
1187 unsigned short end_idx;
1188 unsigned short vcnt;
1189 struct bio *bio;
1190
1191 /* Handle the easy case for the caller */
1192
1193 if (!offset && len == bio_src->bi_size)
1194 return bio_clone(bio_src, gfpmask);
1195
1196 if (WARN_ON_ONCE(!len))
1197 return NULL;
1198 if (WARN_ON_ONCE(len > bio_src->bi_size))
1199 return NULL;
1200 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1201 return NULL;
1202
1203 /* Find first affected segment... */
1204
1205 resid = offset;
1206 bio_for_each_segment(bv, bio_src, idx) {
1207 if (resid < bv->bv_len)
1208 break;
1209 resid -= bv->bv_len;
1210 }
1211 voff = resid;
1212
1213 /* ...and the last affected segment */
1214
1215 resid += len;
1216 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1217 if (resid <= bv->bv_len)
1218 break;
1219 resid -= bv->bv_len;
1220 }
1221 vcnt = end_idx - idx + 1;
1222
1223 /* Build the clone */
1224
1225 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1226 if (!bio)
1227 return NULL; /* ENOMEM */
1228
1229 bio->bi_bdev = bio_src->bi_bdev;
1230 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1231 bio->bi_rw = bio_src->bi_rw;
1232 bio->bi_flags |= 1 << BIO_CLONED;
1233
1234 /*
1235 * Copy over our part of the bio_vec, then update the first
1236 * and last (or only) entries.
1237 */
1238 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1239 vcnt * sizeof (struct bio_vec));
1240 bio->bi_io_vec[0].bv_offset += voff;
1241 if (vcnt > 1) {
1242 bio->bi_io_vec[0].bv_len -= voff;
1243 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1244 } else {
1245 bio->bi_io_vec[0].bv_len = len;
1246 }
1247
1248 bio->bi_vcnt = vcnt;
1249 bio->bi_size = len;
1250 bio->bi_idx = 0;
1251
1252 return bio;
1253 }
1254
1255 /*
1256 * Clone a portion of a bio chain, starting at the given byte offset
1257 * into the first bio in the source chain and continuing for the
1258 * number of bytes indicated. The result is another bio chain of
1259 * exactly the given length, or a null pointer on error.
1260 *
1261 * The bio_src and offset parameters are both in-out. On entry they
1262 * refer to the first source bio and the offset into that bio where
1263 * the start of data to be cloned is located.
1264 *
1265 * On return, bio_src is updated to refer to the bio in the source
1266 * chain that contains first un-cloned byte, and *offset will
1267 * contain the offset of that byte within that bio.
1268 */
1269 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1270 unsigned int *offset,
1271 unsigned int len,
1272 gfp_t gfpmask)
1273 {
1274 struct bio *bi = *bio_src;
1275 unsigned int off = *offset;
1276 struct bio *chain = NULL;
1277 struct bio **end;
1278
1279 /* Build up a chain of clone bios up to the limit */
1280
1281 if (!bi || off >= bi->bi_size || !len)
1282 return NULL; /* Nothing to clone */
1283
1284 end = &chain;
1285 while (len) {
1286 unsigned int bi_size;
1287 struct bio *bio;
1288
1289 if (!bi) {
1290 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1291 goto out_err; /* EINVAL; ran out of bio's */
1292 }
1293 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1294 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1295 if (!bio)
1296 goto out_err; /* ENOMEM */
1297
1298 *end = bio;
1299 end = &bio->bi_next;
1300
1301 off += bi_size;
1302 if (off == bi->bi_size) {
1303 bi = bi->bi_next;
1304 off = 0;
1305 }
1306 len -= bi_size;
1307 }
1308 *bio_src = bi;
1309 *offset = off;
1310
1311 return chain;
1312 out_err:
1313 bio_chain_put(chain);
1314
1315 return NULL;
1316 }
1317
1318 /*
1319 * The default/initial value for all object request flags is 0. For
1320 * each flag, once its value is set to 1 it is never reset to 0
1321 * again.
1322 */
1323 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1324 {
1325 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1326 struct rbd_device *rbd_dev;
1327
1328 rbd_dev = obj_request->img_request->rbd_dev;
1329 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1330 obj_request);
1331 }
1332 }
1333
1334 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1335 {
1336 smp_mb();
1337 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1338 }
1339
1340 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1341 {
1342 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1343 struct rbd_device *rbd_dev = NULL;
1344
1345 if (obj_request_img_data_test(obj_request))
1346 rbd_dev = obj_request->img_request->rbd_dev;
1347 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1348 obj_request);
1349 }
1350 }
1351
1352 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1353 {
1354 smp_mb();
1355 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1356 }
1357
1358 /*
1359 * This sets the KNOWN flag after (possibly) setting the EXISTS
1360 * flag. The latter is set based on the "exists" value provided.
1361 *
1362 * Note that for our purposes once an object exists it never goes
1363 * away again. It's possible that the response from two existence
1364 * checks are separated by the creation of the target object, and
1365 * the first ("doesn't exist") response arrives *after* the second
1366 * ("does exist"). In that case we ignore the second one.
1367 */
1368 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1369 bool exists)
1370 {
1371 if (exists)
1372 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1373 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1374 smp_mb();
1375 }
1376
1377 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1378 {
1379 smp_mb();
1380 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1381 }
1382
1383 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1384 {
1385 smp_mb();
1386 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1387 }
1388
1389 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1390 {
1391 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1392
1393 return obj_request->img_offset <
1394 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1395 }
1396
1397 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1398 {
1399 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1400 atomic_read(&obj_request->kref.refcount));
1401 kref_get(&obj_request->kref);
1402 }
1403
1404 static void rbd_obj_request_destroy(struct kref *kref);
1405 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1406 {
1407 rbd_assert(obj_request != NULL);
1408 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1409 atomic_read(&obj_request->kref.refcount));
1410 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1411 }
1412
1413 static void rbd_img_request_get(struct rbd_img_request *img_request)
1414 {
1415 dout("%s: img %p (was %d)\n", __func__, img_request,
1416 atomic_read(&img_request->kref.refcount));
1417 kref_get(&img_request->kref);
1418 }
1419
1420 static bool img_request_child_test(struct rbd_img_request *img_request);
1421 static void rbd_parent_request_destroy(struct kref *kref);
1422 static void rbd_img_request_destroy(struct kref *kref);
1423 static void rbd_img_request_put(struct rbd_img_request *img_request)
1424 {
1425 rbd_assert(img_request != NULL);
1426 dout("%s: img %p (was %d)\n", __func__, img_request,
1427 atomic_read(&img_request->kref.refcount));
1428 if (img_request_child_test(img_request))
1429 kref_put(&img_request->kref, rbd_parent_request_destroy);
1430 else
1431 kref_put(&img_request->kref, rbd_img_request_destroy);
1432 }
1433
1434 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1435 struct rbd_obj_request *obj_request)
1436 {
1437 rbd_assert(obj_request->img_request == NULL);
1438
1439 /* Image request now owns object's original reference */
1440 obj_request->img_request = img_request;
1441 obj_request->which = img_request->obj_request_count;
1442 rbd_assert(!obj_request_img_data_test(obj_request));
1443 obj_request_img_data_set(obj_request);
1444 rbd_assert(obj_request->which != BAD_WHICH);
1445 img_request->obj_request_count++;
1446 list_add_tail(&obj_request->links, &img_request->obj_requests);
1447 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1448 obj_request->which);
1449 }
1450
1451 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1452 struct rbd_obj_request *obj_request)
1453 {
1454 rbd_assert(obj_request->which != BAD_WHICH);
1455
1456 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1457 obj_request->which);
1458 list_del(&obj_request->links);
1459 rbd_assert(img_request->obj_request_count > 0);
1460 img_request->obj_request_count--;
1461 rbd_assert(obj_request->which == img_request->obj_request_count);
1462 obj_request->which = BAD_WHICH;
1463 rbd_assert(obj_request_img_data_test(obj_request));
1464 rbd_assert(obj_request->img_request == img_request);
1465 obj_request->img_request = NULL;
1466 obj_request->callback = NULL;
1467 rbd_obj_request_put(obj_request);
1468 }
1469
1470 static bool obj_request_type_valid(enum obj_request_type type)
1471 {
1472 switch (type) {
1473 case OBJ_REQUEST_NODATA:
1474 case OBJ_REQUEST_BIO:
1475 case OBJ_REQUEST_PAGES:
1476 return true;
1477 default:
1478 return false;
1479 }
1480 }
1481
1482 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1483 struct rbd_obj_request *obj_request)
1484 {
1485 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1486
1487 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1488 }
1489
1490 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1491 {
1492
1493 dout("%s: img %p\n", __func__, img_request);
1494
1495 /*
1496 * If no error occurred, compute the aggregate transfer
1497 * count for the image request. We could instead use
1498 * atomic64_cmpxchg() to update it as each object request
1499 * completes; not clear which way is better off hand.
1500 */
1501 if (!img_request->result) {
1502 struct rbd_obj_request *obj_request;
1503 u64 xferred = 0;
1504
1505 for_each_obj_request(img_request, obj_request)
1506 xferred += obj_request->xferred;
1507 img_request->xferred = xferred;
1508 }
1509
1510 if (img_request->callback)
1511 img_request->callback(img_request);
1512 else
1513 rbd_img_request_put(img_request);
1514 }
1515
1516 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1517
1518 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1519 {
1520 dout("%s: obj %p\n", __func__, obj_request);
1521
1522 return wait_for_completion_interruptible(&obj_request->completion);
1523 }
1524
1525 /*
1526 * The default/initial value for all image request flags is 0. Each
1527 * is conditionally set to 1 at image request initialization time
1528 * and currently never change thereafter.
1529 */
1530 static void img_request_write_set(struct rbd_img_request *img_request)
1531 {
1532 set_bit(IMG_REQ_WRITE, &img_request->flags);
1533 smp_mb();
1534 }
1535
1536 static bool img_request_write_test(struct rbd_img_request *img_request)
1537 {
1538 smp_mb();
1539 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1540 }
1541
1542 static void img_request_child_set(struct rbd_img_request *img_request)
1543 {
1544 set_bit(IMG_REQ_CHILD, &img_request->flags);
1545 smp_mb();
1546 }
1547
1548 static void img_request_child_clear(struct rbd_img_request *img_request)
1549 {
1550 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1551 smp_mb();
1552 }
1553
1554 static bool img_request_child_test(struct rbd_img_request *img_request)
1555 {
1556 smp_mb();
1557 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1558 }
1559
1560 static void img_request_layered_set(struct rbd_img_request *img_request)
1561 {
1562 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1563 smp_mb();
1564 }
1565
1566 static void img_request_layered_clear(struct rbd_img_request *img_request)
1567 {
1568 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1569 smp_mb();
1570 }
1571
1572 static bool img_request_layered_test(struct rbd_img_request *img_request)
1573 {
1574 smp_mb();
1575 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1576 }
1577
1578 static void
1579 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1580 {
1581 u64 xferred = obj_request->xferred;
1582 u64 length = obj_request->length;
1583
1584 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1585 obj_request, obj_request->img_request, obj_request->result,
1586 xferred, length);
1587 /*
1588 * ENOENT means a hole in the image. We zero-fill the entire
1589 * length of the request. A short read also implies zero-fill
1590 * to the end of the request. An error requires the whole
1591 * length of the request to be reported finished with an error
1592 * to the block layer. In each case we update the xferred
1593 * count to indicate the whole request was satisfied.
1594 */
1595 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1596 if (obj_request->result == -ENOENT) {
1597 if (obj_request->type == OBJ_REQUEST_BIO)
1598 zero_bio_chain(obj_request->bio_list, 0);
1599 else
1600 zero_pages(obj_request->pages, 0, length);
1601 obj_request->result = 0;
1602 } else if (xferred < length && !obj_request->result) {
1603 if (obj_request->type == OBJ_REQUEST_BIO)
1604 zero_bio_chain(obj_request->bio_list, xferred);
1605 else
1606 zero_pages(obj_request->pages, xferred, length);
1607 }
1608 obj_request->xferred = length;
1609 obj_request_done_set(obj_request);
1610 }
1611
1612 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1613 {
1614 dout("%s: obj %p cb %p\n", __func__, obj_request,
1615 obj_request->callback);
1616 if (obj_request->callback)
1617 obj_request->callback(obj_request);
1618 else
1619 complete_all(&obj_request->completion);
1620 }
1621
1622 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1623 {
1624 dout("%s: obj %p\n", __func__, obj_request);
1625 obj_request_done_set(obj_request);
1626 }
1627
1628 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1629 {
1630 struct rbd_img_request *img_request = NULL;
1631 struct rbd_device *rbd_dev = NULL;
1632 bool layered = false;
1633
1634 if (obj_request_img_data_test(obj_request)) {
1635 img_request = obj_request->img_request;
1636 layered = img_request && img_request_layered_test(img_request);
1637 rbd_dev = img_request->rbd_dev;
1638 }
1639
1640 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1641 obj_request, img_request, obj_request->result,
1642 obj_request->xferred, obj_request->length);
1643 if (layered && obj_request->result == -ENOENT &&
1644 obj_request->img_offset < rbd_dev->parent_overlap)
1645 rbd_img_parent_read(obj_request);
1646 else if (img_request)
1647 rbd_img_obj_request_read_callback(obj_request);
1648 else
1649 obj_request_done_set(obj_request);
1650 }
1651
1652 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1653 {
1654 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1655 obj_request->result, obj_request->length);
1656 /*
1657 * There is no such thing as a successful short write. Set
1658 * it to our originally-requested length.
1659 */
1660 obj_request->xferred = obj_request->length;
1661 obj_request_done_set(obj_request);
1662 }
1663
1664 /*
1665 * For a simple stat call there's nothing to do. We'll do more if
1666 * this is part of a write sequence for a layered image.
1667 */
1668 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1669 {
1670 dout("%s: obj %p\n", __func__, obj_request);
1671 obj_request_done_set(obj_request);
1672 }
1673
1674 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1675 {
1676 dout("%s: obj %p\n", __func__, obj_request);
1677
1678 if (obj_request_img_data_test(obj_request))
1679 rbd_osd_copyup_callback(obj_request);
1680 else
1681 obj_request_done_set(obj_request);
1682 }
1683
1684 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1685 struct ceph_msg *msg)
1686 {
1687 struct rbd_obj_request *obj_request = osd_req->r_priv;
1688 u16 opcode;
1689
1690 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1691 rbd_assert(osd_req == obj_request->osd_req);
1692 if (obj_request_img_data_test(obj_request)) {
1693 rbd_assert(obj_request->img_request);
1694 rbd_assert(obj_request->which != BAD_WHICH);
1695 } else {
1696 rbd_assert(obj_request->which == BAD_WHICH);
1697 }
1698
1699 if (osd_req->r_result < 0)
1700 obj_request->result = osd_req->r_result;
1701
1702 BUG_ON(osd_req->r_num_ops > 2);
1703
1704 /*
1705 * We support a 64-bit length, but ultimately it has to be
1706 * passed to blk_end_request(), which takes an unsigned int.
1707 */
1708 obj_request->xferred = osd_req->r_reply_op_len[0];
1709 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1710 opcode = osd_req->r_ops[0].op;
1711 switch (opcode) {
1712 case CEPH_OSD_OP_READ:
1713 rbd_osd_read_callback(obj_request);
1714 break;
1715 case CEPH_OSD_OP_WRITE:
1716 rbd_osd_write_callback(obj_request);
1717 break;
1718 case CEPH_OSD_OP_STAT:
1719 rbd_osd_stat_callback(obj_request);
1720 break;
1721 case CEPH_OSD_OP_CALL:
1722 rbd_osd_call_callback(obj_request);
1723 break;
1724 case CEPH_OSD_OP_NOTIFY_ACK:
1725 case CEPH_OSD_OP_WATCH:
1726 rbd_osd_trivial_callback(obj_request);
1727 break;
1728 default:
1729 rbd_warn(NULL, "%s: unsupported op %hu\n",
1730 obj_request->object_name, (unsigned short) opcode);
1731 break;
1732 }
1733
1734 if (obj_request_done_test(obj_request))
1735 rbd_obj_request_complete(obj_request);
1736 }
1737
1738 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1739 {
1740 struct rbd_img_request *img_request = obj_request->img_request;
1741 struct ceph_osd_request *osd_req = obj_request->osd_req;
1742 u64 snap_id;
1743
1744 rbd_assert(osd_req != NULL);
1745
1746 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1747 ceph_osdc_build_request(osd_req, obj_request->offset,
1748 NULL, snap_id, NULL);
1749 }
1750
1751 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1752 {
1753 struct rbd_img_request *img_request = obj_request->img_request;
1754 struct ceph_osd_request *osd_req = obj_request->osd_req;
1755 struct ceph_snap_context *snapc;
1756 struct timespec mtime = CURRENT_TIME;
1757
1758 rbd_assert(osd_req != NULL);
1759
1760 snapc = img_request ? img_request->snapc : NULL;
1761 ceph_osdc_build_request(osd_req, obj_request->offset,
1762 snapc, CEPH_NOSNAP, &mtime);
1763 }
1764
1765 static struct ceph_osd_request *rbd_osd_req_create(
1766 struct rbd_device *rbd_dev,
1767 bool write_request,
1768 struct rbd_obj_request *obj_request)
1769 {
1770 struct ceph_snap_context *snapc = NULL;
1771 struct ceph_osd_client *osdc;
1772 struct ceph_osd_request *osd_req;
1773
1774 if (obj_request_img_data_test(obj_request)) {
1775 struct rbd_img_request *img_request = obj_request->img_request;
1776
1777 rbd_assert(write_request ==
1778 img_request_write_test(img_request));
1779 if (write_request)
1780 snapc = img_request->snapc;
1781 }
1782
1783 /* Allocate and initialize the request, for the single op */
1784
1785 osdc = &rbd_dev->rbd_client->client->osdc;
1786 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1787 if (!osd_req)
1788 return NULL; /* ENOMEM */
1789
1790 if (write_request)
1791 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1792 else
1793 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1794
1795 osd_req->r_callback = rbd_osd_req_callback;
1796 osd_req->r_priv = obj_request;
1797
1798 osd_req->r_oid_len = strlen(obj_request->object_name);
1799 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1800 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1801
1802 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1803
1804 return osd_req;
1805 }
1806
1807 /*
1808 * Create a copyup osd request based on the information in the
1809 * object request supplied. A copyup request has two osd ops,
1810 * a copyup method call, and a "normal" write request.
1811 */
1812 static struct ceph_osd_request *
1813 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1814 {
1815 struct rbd_img_request *img_request;
1816 struct ceph_snap_context *snapc;
1817 struct rbd_device *rbd_dev;
1818 struct ceph_osd_client *osdc;
1819 struct ceph_osd_request *osd_req;
1820
1821 rbd_assert(obj_request_img_data_test(obj_request));
1822 img_request = obj_request->img_request;
1823 rbd_assert(img_request);
1824 rbd_assert(img_request_write_test(img_request));
1825
1826 /* Allocate and initialize the request, for the two ops */
1827
1828 snapc = img_request->snapc;
1829 rbd_dev = img_request->rbd_dev;
1830 osdc = &rbd_dev->rbd_client->client->osdc;
1831 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1832 if (!osd_req)
1833 return NULL; /* ENOMEM */
1834
1835 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1836 osd_req->r_callback = rbd_osd_req_callback;
1837 osd_req->r_priv = obj_request;
1838
1839 osd_req->r_oid_len = strlen(obj_request->object_name);
1840 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1841 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1842
1843 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1844
1845 return osd_req;
1846 }
1847
1848
1849 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1850 {
1851 ceph_osdc_put_request(osd_req);
1852 }
1853
1854 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1855
1856 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1857 u64 offset, u64 length,
1858 enum obj_request_type type)
1859 {
1860 struct rbd_obj_request *obj_request;
1861 size_t size;
1862 char *name;
1863
1864 rbd_assert(obj_request_type_valid(type));
1865
1866 size = strlen(object_name) + 1;
1867 name = kmalloc(size, GFP_NOIO);
1868 if (!name)
1869 return NULL;
1870
1871 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1872 if (!obj_request) {
1873 kfree(name);
1874 return NULL;
1875 }
1876
1877 obj_request->object_name = memcpy(name, object_name, size);
1878 obj_request->offset = offset;
1879 obj_request->length = length;
1880 obj_request->flags = 0;
1881 obj_request->which = BAD_WHICH;
1882 obj_request->type = type;
1883 INIT_LIST_HEAD(&obj_request->links);
1884 init_completion(&obj_request->completion);
1885 kref_init(&obj_request->kref);
1886
1887 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1888 offset, length, (int)type, obj_request);
1889
1890 return obj_request;
1891 }
1892
1893 static void rbd_obj_request_destroy(struct kref *kref)
1894 {
1895 struct rbd_obj_request *obj_request;
1896
1897 obj_request = container_of(kref, struct rbd_obj_request, kref);
1898
1899 dout("%s: obj %p\n", __func__, obj_request);
1900
1901 rbd_assert(obj_request->img_request == NULL);
1902 rbd_assert(obj_request->which == BAD_WHICH);
1903
1904 if (obj_request->osd_req)
1905 rbd_osd_req_destroy(obj_request->osd_req);
1906
1907 rbd_assert(obj_request_type_valid(obj_request->type));
1908 switch (obj_request->type) {
1909 case OBJ_REQUEST_NODATA:
1910 break; /* Nothing to do */
1911 case OBJ_REQUEST_BIO:
1912 if (obj_request->bio_list)
1913 bio_chain_put(obj_request->bio_list);
1914 break;
1915 case OBJ_REQUEST_PAGES:
1916 if (obj_request->pages)
1917 ceph_release_page_vector(obj_request->pages,
1918 obj_request->page_count);
1919 break;
1920 }
1921
1922 kfree(obj_request->object_name);
1923 obj_request->object_name = NULL;
1924 kmem_cache_free(rbd_obj_request_cache, obj_request);
1925 }
1926
1927 /* It's OK to call this for a device with no parent */
1928
1929 static void rbd_spec_put(struct rbd_spec *spec);
1930 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1931 {
1932 rbd_dev_remove_parent(rbd_dev);
1933 rbd_spec_put(rbd_dev->parent_spec);
1934 rbd_dev->parent_spec = NULL;
1935 rbd_dev->parent_overlap = 0;
1936 }
1937
1938 /*
1939 * Parent image reference counting is used to determine when an
1940 * image's parent fields can be safely torn down--after there are no
1941 * more in-flight requests to the parent image. When the last
1942 * reference is dropped, cleaning them up is safe.
1943 */
1944 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1945 {
1946 int counter;
1947
1948 if (!rbd_dev->parent_spec)
1949 return;
1950
1951 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1952 if (counter > 0)
1953 return;
1954
1955 /* Last reference; clean up parent data structures */
1956
1957 if (!counter)
1958 rbd_dev_unparent(rbd_dev);
1959 else
1960 rbd_warn(rbd_dev, "parent reference underflow\n");
1961 }
1962
1963 /*
1964 * If an image has a non-zero parent overlap, get a reference to its
1965 * parent.
1966 *
1967 * We must get the reference before checking for the overlap to
1968 * coordinate properly with zeroing the parent overlap in
1969 * rbd_dev_v2_parent_info() when an image gets flattened. We
1970 * drop it again if there is no overlap.
1971 *
1972 * Returns true if the rbd device has a parent with a non-zero
1973 * overlap and a reference for it was successfully taken, or
1974 * false otherwise.
1975 */
1976 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1977 {
1978 int counter;
1979
1980 if (!rbd_dev->parent_spec)
1981 return false;
1982
1983 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1984 if (counter > 0 && rbd_dev->parent_overlap)
1985 return true;
1986
1987 /* Image was flattened, but parent is not yet torn down */
1988
1989 if (counter < 0)
1990 rbd_warn(rbd_dev, "parent reference overflow\n");
1991
1992 return false;
1993 }
1994
1995 /*
1996 * Caller is responsible for filling in the list of object requests
1997 * that comprises the image request, and the Linux request pointer
1998 * (if there is one).
1999 */
2000 static struct rbd_img_request *rbd_img_request_create(
2001 struct rbd_device *rbd_dev,
2002 u64 offset, u64 length,
2003 bool write_request)
2004 {
2005 struct rbd_img_request *img_request;
2006
2007 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2008 if (!img_request)
2009 return NULL;
2010
2011 if (write_request) {
2012 down_read(&rbd_dev->header_rwsem);
2013 ceph_get_snap_context(rbd_dev->header.snapc);
2014 up_read(&rbd_dev->header_rwsem);
2015 }
2016
2017 img_request->rq = NULL;
2018 img_request->rbd_dev = rbd_dev;
2019 img_request->offset = offset;
2020 img_request->length = length;
2021 img_request->flags = 0;
2022 if (write_request) {
2023 img_request_write_set(img_request);
2024 img_request->snapc = rbd_dev->header.snapc;
2025 } else {
2026 img_request->snap_id = rbd_dev->spec->snap_id;
2027 }
2028 if (rbd_dev_parent_get(rbd_dev))
2029 img_request_layered_set(img_request);
2030 spin_lock_init(&img_request->completion_lock);
2031 img_request->next_completion = 0;
2032 img_request->callback = NULL;
2033 img_request->result = 0;
2034 img_request->obj_request_count = 0;
2035 INIT_LIST_HEAD(&img_request->obj_requests);
2036 kref_init(&img_request->kref);
2037
2038 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2039 write_request ? "write" : "read", offset, length,
2040 img_request);
2041
2042 return img_request;
2043 }
2044
2045 static void rbd_img_request_destroy(struct kref *kref)
2046 {
2047 struct rbd_img_request *img_request;
2048 struct rbd_obj_request *obj_request;
2049 struct rbd_obj_request *next_obj_request;
2050
2051 img_request = container_of(kref, struct rbd_img_request, kref);
2052
2053 dout("%s: img %p\n", __func__, img_request);
2054
2055 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2056 rbd_img_obj_request_del(img_request, obj_request);
2057 rbd_assert(img_request->obj_request_count == 0);
2058
2059 if (img_request_layered_test(img_request)) {
2060 img_request_layered_clear(img_request);
2061 rbd_dev_parent_put(img_request->rbd_dev);
2062 }
2063
2064 if (img_request_write_test(img_request))
2065 ceph_put_snap_context(img_request->snapc);
2066
2067 kmem_cache_free(rbd_img_request_cache, img_request);
2068 }
2069
2070 static struct rbd_img_request *rbd_parent_request_create(
2071 struct rbd_obj_request *obj_request,
2072 u64 img_offset, u64 length)
2073 {
2074 struct rbd_img_request *parent_request;
2075 struct rbd_device *rbd_dev;
2076
2077 rbd_assert(obj_request->img_request);
2078 rbd_dev = obj_request->img_request->rbd_dev;
2079
2080 parent_request = rbd_img_request_create(rbd_dev->parent,
2081 img_offset, length, false);
2082 if (!parent_request)
2083 return NULL;
2084
2085 img_request_child_set(parent_request);
2086 rbd_obj_request_get(obj_request);
2087 parent_request->obj_request = obj_request;
2088
2089 return parent_request;
2090 }
2091
2092 static void rbd_parent_request_destroy(struct kref *kref)
2093 {
2094 struct rbd_img_request *parent_request;
2095 struct rbd_obj_request *orig_request;
2096
2097 parent_request = container_of(kref, struct rbd_img_request, kref);
2098 orig_request = parent_request->obj_request;
2099
2100 parent_request->obj_request = NULL;
2101 rbd_obj_request_put(orig_request);
2102 img_request_child_clear(parent_request);
2103
2104 rbd_img_request_destroy(kref);
2105 }
2106
2107 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2108 {
2109 struct rbd_img_request *img_request;
2110 unsigned int xferred;
2111 int result;
2112 bool more;
2113
2114 rbd_assert(obj_request_img_data_test(obj_request));
2115 img_request = obj_request->img_request;
2116
2117 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2118 xferred = (unsigned int)obj_request->xferred;
2119 result = obj_request->result;
2120 if (result) {
2121 struct rbd_device *rbd_dev = img_request->rbd_dev;
2122
2123 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2124 img_request_write_test(img_request) ? "write" : "read",
2125 obj_request->length, obj_request->img_offset,
2126 obj_request->offset);
2127 rbd_warn(rbd_dev, " result %d xferred %x\n",
2128 result, xferred);
2129 if (!img_request->result)
2130 img_request->result = result;
2131 /*
2132 * Need to end I/O on the entire obj_request worth of
2133 * bytes in case of error.
2134 */
2135 xferred = obj_request->length;
2136 }
2137
2138 /* Image object requests don't own their page array */
2139
2140 if (obj_request->type == OBJ_REQUEST_PAGES) {
2141 obj_request->pages = NULL;
2142 obj_request->page_count = 0;
2143 }
2144
2145 if (img_request_child_test(img_request)) {
2146 rbd_assert(img_request->obj_request != NULL);
2147 more = obj_request->which < img_request->obj_request_count - 1;
2148 } else {
2149 rbd_assert(img_request->rq != NULL);
2150 more = blk_end_request(img_request->rq, result, xferred);
2151 }
2152
2153 return more;
2154 }
2155
2156 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2157 {
2158 struct rbd_img_request *img_request;
2159 u32 which = obj_request->which;
2160 bool more = true;
2161
2162 rbd_assert(obj_request_img_data_test(obj_request));
2163 img_request = obj_request->img_request;
2164
2165 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2166 rbd_assert(img_request != NULL);
2167 rbd_assert(img_request->obj_request_count > 0);
2168 rbd_assert(which != BAD_WHICH);
2169 rbd_assert(which < img_request->obj_request_count);
2170
2171 spin_lock_irq(&img_request->completion_lock);
2172 if (which != img_request->next_completion)
2173 goto out;
2174
2175 for_each_obj_request_from(img_request, obj_request) {
2176 rbd_assert(more);
2177 rbd_assert(which < img_request->obj_request_count);
2178
2179 if (!obj_request_done_test(obj_request))
2180 break;
2181 more = rbd_img_obj_end_request(obj_request);
2182 which++;
2183 }
2184
2185 rbd_assert(more ^ (which == img_request->obj_request_count));
2186 img_request->next_completion = which;
2187 out:
2188 spin_unlock_irq(&img_request->completion_lock);
2189 rbd_img_request_put(img_request);
2190
2191 if (!more)
2192 rbd_img_request_complete(img_request);
2193 }
2194
2195 /*
2196 * Split up an image request into one or more object requests, each
2197 * to a different object. The "type" parameter indicates whether
2198 * "data_desc" is the pointer to the head of a list of bio
2199 * structures, or the base of a page array. In either case this
2200 * function assumes data_desc describes memory sufficient to hold
2201 * all data described by the image request.
2202 */
2203 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2204 enum obj_request_type type,
2205 void *data_desc)
2206 {
2207 struct rbd_device *rbd_dev = img_request->rbd_dev;
2208 struct rbd_obj_request *obj_request = NULL;
2209 struct rbd_obj_request *next_obj_request;
2210 bool write_request = img_request_write_test(img_request);
2211 struct bio *bio_list = 0;
2212 unsigned int bio_offset = 0;
2213 struct page **pages = 0;
2214 u64 img_offset;
2215 u64 resid;
2216 u16 opcode;
2217
2218 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2219 (int)type, data_desc);
2220
2221 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2222 img_offset = img_request->offset;
2223 resid = img_request->length;
2224 rbd_assert(resid > 0);
2225
2226 if (type == OBJ_REQUEST_BIO) {
2227 bio_list = data_desc;
2228 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2229 } else {
2230 rbd_assert(type == OBJ_REQUEST_PAGES);
2231 pages = data_desc;
2232 }
2233
2234 while (resid) {
2235 struct ceph_osd_request *osd_req;
2236 const char *object_name;
2237 u64 offset;
2238 u64 length;
2239
2240 object_name = rbd_segment_name(rbd_dev, img_offset);
2241 if (!object_name)
2242 goto out_unwind;
2243 offset = rbd_segment_offset(rbd_dev, img_offset);
2244 length = rbd_segment_length(rbd_dev, img_offset, resid);
2245 obj_request = rbd_obj_request_create(object_name,
2246 offset, length, type);
2247 /* object request has its own copy of the object name */
2248 rbd_segment_name_free(object_name);
2249 if (!obj_request)
2250 goto out_unwind;
2251 /*
2252 * set obj_request->img_request before creating the
2253 * osd_request so that it gets the right snapc
2254 */
2255 rbd_img_obj_request_add(img_request, obj_request);
2256
2257 if (type == OBJ_REQUEST_BIO) {
2258 unsigned int clone_size;
2259
2260 rbd_assert(length <= (u64)UINT_MAX);
2261 clone_size = (unsigned int)length;
2262 obj_request->bio_list =
2263 bio_chain_clone_range(&bio_list,
2264 &bio_offset,
2265 clone_size,
2266 GFP_ATOMIC);
2267 if (!obj_request->bio_list)
2268 goto out_partial;
2269 } else {
2270 unsigned int page_count;
2271
2272 obj_request->pages = pages;
2273 page_count = (u32)calc_pages_for(offset, length);
2274 obj_request->page_count = page_count;
2275 if ((offset + length) & ~PAGE_MASK)
2276 page_count--; /* more on last page */
2277 pages += page_count;
2278 }
2279
2280 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2281 obj_request);
2282 if (!osd_req)
2283 goto out_partial;
2284 obj_request->osd_req = osd_req;
2285 obj_request->callback = rbd_img_obj_callback;
2286 rbd_img_request_get(img_request);
2287
2288 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2289 0, 0);
2290 if (type == OBJ_REQUEST_BIO)
2291 osd_req_op_extent_osd_data_bio(osd_req, 0,
2292 obj_request->bio_list, length);
2293 else
2294 osd_req_op_extent_osd_data_pages(osd_req, 0,
2295 obj_request->pages, length,
2296 offset & ~PAGE_MASK, false, false);
2297
2298 if (write_request)
2299 rbd_osd_req_format_write(obj_request);
2300 else
2301 rbd_osd_req_format_read(obj_request);
2302
2303 obj_request->img_offset = img_offset;
2304
2305 img_offset += length;
2306 resid -= length;
2307 }
2308
2309 return 0;
2310
2311 out_partial:
2312 rbd_obj_request_put(obj_request);
2313 out_unwind:
2314 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2315 rbd_img_obj_request_del(img_request, obj_request);
2316
2317 return -ENOMEM;
2318 }
2319
2320 static void
2321 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2322 {
2323 struct rbd_img_request *img_request;
2324 struct rbd_device *rbd_dev;
2325 struct page **pages;
2326 u32 page_count;
2327
2328 dout("%s: obj %p\n", __func__, obj_request);
2329
2330 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2331 rbd_assert(obj_request_img_data_test(obj_request));
2332 img_request = obj_request->img_request;
2333 rbd_assert(img_request);
2334
2335 rbd_dev = img_request->rbd_dev;
2336 rbd_assert(rbd_dev);
2337
2338 pages = obj_request->copyup_pages;
2339 rbd_assert(pages != NULL);
2340 obj_request->copyup_pages = NULL;
2341 page_count = obj_request->copyup_page_count;
2342 rbd_assert(page_count);
2343 obj_request->copyup_page_count = 0;
2344 ceph_release_page_vector(pages, page_count);
2345
2346 /*
2347 * We want the transfer count to reflect the size of the
2348 * original write request. There is no such thing as a
2349 * successful short write, so if the request was successful
2350 * we can just set it to the originally-requested length.
2351 */
2352 if (!obj_request->result)
2353 obj_request->xferred = obj_request->length;
2354
2355 obj_request_done_set(obj_request);
2356 }
2357
2358 static void
2359 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2360 {
2361 struct rbd_obj_request *orig_request;
2362 struct ceph_osd_request *osd_req;
2363 struct ceph_osd_client *osdc;
2364 struct rbd_device *rbd_dev;
2365 struct page **pages;
2366 u32 page_count;
2367 int img_result;
2368 u64 parent_length;
2369 u64 offset;
2370 u64 length;
2371
2372 rbd_assert(img_request_child_test(img_request));
2373
2374 /* First get what we need from the image request */
2375
2376 pages = img_request->copyup_pages;
2377 rbd_assert(pages != NULL);
2378 img_request->copyup_pages = NULL;
2379 page_count = img_request->copyup_page_count;
2380 rbd_assert(page_count);
2381 img_request->copyup_page_count = 0;
2382
2383 orig_request = img_request->obj_request;
2384 rbd_assert(orig_request != NULL);
2385 rbd_assert(obj_request_type_valid(orig_request->type));
2386 img_result = img_request->result;
2387 parent_length = img_request->length;
2388 rbd_assert(parent_length == img_request->xferred);
2389 rbd_img_request_put(img_request);
2390
2391 rbd_assert(orig_request->img_request);
2392 rbd_dev = orig_request->img_request->rbd_dev;
2393 rbd_assert(rbd_dev);
2394
2395 /*
2396 * If the overlap has become 0 (most likely because the
2397 * image has been flattened) we need to free the pages
2398 * and re-submit the original write request.
2399 */
2400 if (!rbd_dev->parent_overlap) {
2401 struct ceph_osd_client *osdc;
2402
2403 ceph_release_page_vector(pages, page_count);
2404 osdc = &rbd_dev->rbd_client->client->osdc;
2405 img_result = rbd_obj_request_submit(osdc, orig_request);
2406 if (!img_result)
2407 return;
2408 }
2409
2410 if (img_result)
2411 goto out_err;
2412
2413 /*
2414 * The original osd request is of no use to use any more.
2415 * We need a new one that can hold the two ops in a copyup
2416 * request. Allocate the new copyup osd request for the
2417 * original request, and release the old one.
2418 */
2419 img_result = -ENOMEM;
2420 osd_req = rbd_osd_req_create_copyup(orig_request);
2421 if (!osd_req)
2422 goto out_err;
2423 rbd_osd_req_destroy(orig_request->osd_req);
2424 orig_request->osd_req = osd_req;
2425 orig_request->copyup_pages = pages;
2426 orig_request->copyup_page_count = page_count;
2427
2428 /* Initialize the copyup op */
2429
2430 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2431 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2432 false, false);
2433
2434 /* Then the original write request op */
2435
2436 offset = orig_request->offset;
2437 length = orig_request->length;
2438 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2439 offset, length, 0, 0);
2440 if (orig_request->type == OBJ_REQUEST_BIO)
2441 osd_req_op_extent_osd_data_bio(osd_req, 1,
2442 orig_request->bio_list, length);
2443 else
2444 osd_req_op_extent_osd_data_pages(osd_req, 1,
2445 orig_request->pages, length,
2446 offset & ~PAGE_MASK, false, false);
2447
2448 rbd_osd_req_format_write(orig_request);
2449
2450 /* All set, send it off. */
2451
2452 osdc = &rbd_dev->rbd_client->client->osdc;
2453 img_result = rbd_obj_request_submit(osdc, orig_request);
2454 if (!img_result)
2455 return;
2456 out_err:
2457 /* Record the error code and complete the request */
2458
2459 orig_request->result = img_result;
2460 orig_request->xferred = 0;
2461 obj_request_done_set(orig_request);
2462 rbd_obj_request_complete(orig_request);
2463 }
2464
2465 /*
2466 * Read from the parent image the range of data that covers the
2467 * entire target of the given object request. This is used for
2468 * satisfying a layered image write request when the target of an
2469 * object request from the image request does not exist.
2470 *
2471 * A page array big enough to hold the returned data is allocated
2472 * and supplied to rbd_img_request_fill() as the "data descriptor."
2473 * When the read completes, this page array will be transferred to
2474 * the original object request for the copyup operation.
2475 *
2476 * If an error occurs, record it as the result of the original
2477 * object request and mark it done so it gets completed.
2478 */
2479 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2480 {
2481 struct rbd_img_request *img_request = NULL;
2482 struct rbd_img_request *parent_request = NULL;
2483 struct rbd_device *rbd_dev;
2484 u64 img_offset;
2485 u64 length;
2486 struct page **pages = NULL;
2487 u32 page_count;
2488 int result;
2489
2490 rbd_assert(obj_request_img_data_test(obj_request));
2491 rbd_assert(obj_request_type_valid(obj_request->type));
2492
2493 img_request = obj_request->img_request;
2494 rbd_assert(img_request != NULL);
2495 rbd_dev = img_request->rbd_dev;
2496 rbd_assert(rbd_dev->parent != NULL);
2497
2498 /*
2499 * Determine the byte range covered by the object in the
2500 * child image to which the original request was to be sent.
2501 */
2502 img_offset = obj_request->img_offset - obj_request->offset;
2503 length = (u64)1 << rbd_dev->header.obj_order;
2504
2505 /*
2506 * There is no defined parent data beyond the parent
2507 * overlap, so limit what we read at that boundary if
2508 * necessary.
2509 */
2510 if (img_offset + length > rbd_dev->parent_overlap) {
2511 rbd_assert(img_offset < rbd_dev->parent_overlap);
2512 length = rbd_dev->parent_overlap - img_offset;
2513 }
2514
2515 /*
2516 * Allocate a page array big enough to receive the data read
2517 * from the parent.
2518 */
2519 page_count = (u32)calc_pages_for(0, length);
2520 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2521 if (IS_ERR(pages)) {
2522 result = PTR_ERR(pages);
2523 pages = NULL;
2524 goto out_err;
2525 }
2526
2527 result = -ENOMEM;
2528 parent_request = rbd_parent_request_create(obj_request,
2529 img_offset, length);
2530 if (!parent_request)
2531 goto out_err;
2532
2533 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2534 if (result)
2535 goto out_err;
2536 parent_request->copyup_pages = pages;
2537 parent_request->copyup_page_count = page_count;
2538
2539 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2540 result = rbd_img_request_submit(parent_request);
2541 if (!result)
2542 return 0;
2543
2544 parent_request->copyup_pages = NULL;
2545 parent_request->copyup_page_count = 0;
2546 parent_request->obj_request = NULL;
2547 rbd_obj_request_put(obj_request);
2548 out_err:
2549 if (pages)
2550 ceph_release_page_vector(pages, page_count);
2551 if (parent_request)
2552 rbd_img_request_put(parent_request);
2553 obj_request->result = result;
2554 obj_request->xferred = 0;
2555 obj_request_done_set(obj_request);
2556
2557 return result;
2558 }
2559
2560 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2561 {
2562 struct rbd_obj_request *orig_request;
2563 struct rbd_device *rbd_dev;
2564 int result;
2565
2566 rbd_assert(!obj_request_img_data_test(obj_request));
2567
2568 /*
2569 * All we need from the object request is the original
2570 * request and the result of the STAT op. Grab those, then
2571 * we're done with the request.
2572 */
2573 orig_request = obj_request->obj_request;
2574 obj_request->obj_request = NULL;
2575 rbd_assert(orig_request);
2576 rbd_assert(orig_request->img_request);
2577
2578 result = obj_request->result;
2579 obj_request->result = 0;
2580
2581 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2582 obj_request, orig_request, result,
2583 obj_request->xferred, obj_request->length);
2584 rbd_obj_request_put(obj_request);
2585
2586 /*
2587 * If the overlap has become 0 (most likely because the
2588 * image has been flattened) we need to free the pages
2589 * and re-submit the original write request.
2590 */
2591 rbd_dev = orig_request->img_request->rbd_dev;
2592 if (!rbd_dev->parent_overlap) {
2593 struct ceph_osd_client *osdc;
2594
2595 rbd_obj_request_put(orig_request);
2596 osdc = &rbd_dev->rbd_client->client->osdc;
2597 result = rbd_obj_request_submit(osdc, orig_request);
2598 if (!result)
2599 return;
2600 }
2601
2602 /*
2603 * Our only purpose here is to determine whether the object
2604 * exists, and we don't want to treat the non-existence as
2605 * an error. If something else comes back, transfer the
2606 * error to the original request and complete it now.
2607 */
2608 if (!result) {
2609 obj_request_existence_set(orig_request, true);
2610 } else if (result == -ENOENT) {
2611 obj_request_existence_set(orig_request, false);
2612 } else if (result) {
2613 orig_request->result = result;
2614 goto out;
2615 }
2616
2617 /*
2618 * Resubmit the original request now that we have recorded
2619 * whether the target object exists.
2620 */
2621 orig_request->result = rbd_img_obj_request_submit(orig_request);
2622 out:
2623 if (orig_request->result)
2624 rbd_obj_request_complete(orig_request);
2625 rbd_obj_request_put(orig_request);
2626 }
2627
2628 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2629 {
2630 struct rbd_obj_request *stat_request;
2631 struct rbd_device *rbd_dev;
2632 struct ceph_osd_client *osdc;
2633 struct page **pages = NULL;
2634 u32 page_count;
2635 size_t size;
2636 int ret;
2637
2638 /*
2639 * The response data for a STAT call consists of:
2640 * le64 length;
2641 * struct {
2642 * le32 tv_sec;
2643 * le32 tv_nsec;
2644 * } mtime;
2645 */
2646 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2647 page_count = (u32)calc_pages_for(0, size);
2648 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2649 if (IS_ERR(pages))
2650 return PTR_ERR(pages);
2651
2652 ret = -ENOMEM;
2653 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2654 OBJ_REQUEST_PAGES);
2655 if (!stat_request)
2656 goto out;
2657
2658 rbd_obj_request_get(obj_request);
2659 stat_request->obj_request = obj_request;
2660 stat_request->pages = pages;
2661 stat_request->page_count = page_count;
2662
2663 rbd_assert(obj_request->img_request);
2664 rbd_dev = obj_request->img_request->rbd_dev;
2665 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2666 stat_request);
2667 if (!stat_request->osd_req)
2668 goto out;
2669 stat_request->callback = rbd_img_obj_exists_callback;
2670
2671 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2672 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2673 false, false);
2674 rbd_osd_req_format_read(stat_request);
2675
2676 osdc = &rbd_dev->rbd_client->client->osdc;
2677 ret = rbd_obj_request_submit(osdc, stat_request);
2678 out:
2679 if (ret)
2680 rbd_obj_request_put(obj_request);
2681
2682 return ret;
2683 }
2684
2685 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2686 {
2687 struct rbd_img_request *img_request;
2688 struct rbd_device *rbd_dev;
2689 bool known;
2690
2691 rbd_assert(obj_request_img_data_test(obj_request));
2692
2693 img_request = obj_request->img_request;
2694 rbd_assert(img_request);
2695 rbd_dev = img_request->rbd_dev;
2696
2697 /*
2698 * Only writes to layered images need special handling.
2699 * Reads and non-layered writes are simple object requests.
2700 * Layered writes that start beyond the end of the overlap
2701 * with the parent have no parent data, so they too are
2702 * simple object requests. Finally, if the target object is
2703 * known to already exist, its parent data has already been
2704 * copied, so a write to the object can also be handled as a
2705 * simple object request.
2706 */
2707 if (!img_request_write_test(img_request) ||
2708 !img_request_layered_test(img_request) ||
2709 !obj_request_overlaps_parent(obj_request) ||
2710 ((known = obj_request_known_test(obj_request)) &&
2711 obj_request_exists_test(obj_request))) {
2712
2713 struct rbd_device *rbd_dev;
2714 struct ceph_osd_client *osdc;
2715
2716 rbd_dev = obj_request->img_request->rbd_dev;
2717 osdc = &rbd_dev->rbd_client->client->osdc;
2718
2719 return rbd_obj_request_submit(osdc, obj_request);
2720 }
2721
2722 /*
2723 * It's a layered write. The target object might exist but
2724 * we may not know that yet. If we know it doesn't exist,
2725 * start by reading the data for the full target object from
2726 * the parent so we can use it for a copyup to the target.
2727 */
2728 if (known)
2729 return rbd_img_obj_parent_read_full(obj_request);
2730
2731 /* We don't know whether the target exists. Go find out. */
2732
2733 return rbd_img_obj_exists_submit(obj_request);
2734 }
2735
2736 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2737 {
2738 struct rbd_obj_request *obj_request;
2739 struct rbd_obj_request *next_obj_request;
2740
2741 dout("%s: img %p\n", __func__, img_request);
2742 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2743 int ret;
2744
2745 ret = rbd_img_obj_request_submit(obj_request);
2746 if (ret)
2747 return ret;
2748 }
2749
2750 return 0;
2751 }
2752
2753 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2754 {
2755 struct rbd_obj_request *obj_request;
2756 struct rbd_device *rbd_dev;
2757 u64 obj_end;
2758 u64 img_xferred;
2759 int img_result;
2760
2761 rbd_assert(img_request_child_test(img_request));
2762
2763 /* First get what we need from the image request and release it */
2764
2765 obj_request = img_request->obj_request;
2766 img_xferred = img_request->xferred;
2767 img_result = img_request->result;
2768 rbd_img_request_put(img_request);
2769
2770 /*
2771 * If the overlap has become 0 (most likely because the
2772 * image has been flattened) we need to re-submit the
2773 * original request.
2774 */
2775 rbd_assert(obj_request);
2776 rbd_assert(obj_request->img_request);
2777 rbd_dev = obj_request->img_request->rbd_dev;
2778 if (!rbd_dev->parent_overlap) {
2779 struct ceph_osd_client *osdc;
2780
2781 osdc = &rbd_dev->rbd_client->client->osdc;
2782 img_result = rbd_obj_request_submit(osdc, obj_request);
2783 if (!img_result)
2784 return;
2785 }
2786
2787 obj_request->result = img_result;
2788 if (obj_request->result)
2789 goto out;
2790
2791 /*
2792 * We need to zero anything beyond the parent overlap
2793 * boundary. Since rbd_img_obj_request_read_callback()
2794 * will zero anything beyond the end of a short read, an
2795 * easy way to do this is to pretend the data from the
2796 * parent came up short--ending at the overlap boundary.
2797 */
2798 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2799 obj_end = obj_request->img_offset + obj_request->length;
2800 if (obj_end > rbd_dev->parent_overlap) {
2801 u64 xferred = 0;
2802
2803 if (obj_request->img_offset < rbd_dev->parent_overlap)
2804 xferred = rbd_dev->parent_overlap -
2805 obj_request->img_offset;
2806
2807 obj_request->xferred = min(img_xferred, xferred);
2808 } else {
2809 obj_request->xferred = img_xferred;
2810 }
2811 out:
2812 rbd_img_obj_request_read_callback(obj_request);
2813 rbd_obj_request_complete(obj_request);
2814 }
2815
2816 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2817 {
2818 struct rbd_img_request *img_request;
2819 int result;
2820
2821 rbd_assert(obj_request_img_data_test(obj_request));
2822 rbd_assert(obj_request->img_request != NULL);
2823 rbd_assert(obj_request->result == (s32) -ENOENT);
2824 rbd_assert(obj_request_type_valid(obj_request->type));
2825
2826 /* rbd_read_finish(obj_request, obj_request->length); */
2827 img_request = rbd_parent_request_create(obj_request,
2828 obj_request->img_offset,
2829 obj_request->length);
2830 result = -ENOMEM;
2831 if (!img_request)
2832 goto out_err;
2833
2834 if (obj_request->type == OBJ_REQUEST_BIO)
2835 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2836 obj_request->bio_list);
2837 else
2838 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2839 obj_request->pages);
2840 if (result)
2841 goto out_err;
2842
2843 img_request->callback = rbd_img_parent_read_callback;
2844 result = rbd_img_request_submit(img_request);
2845 if (result)
2846 goto out_err;
2847
2848 return;
2849 out_err:
2850 if (img_request)
2851 rbd_img_request_put(img_request);
2852 obj_request->result = result;
2853 obj_request->xferred = 0;
2854 obj_request_done_set(obj_request);
2855 }
2856
2857 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2858 {
2859 struct rbd_obj_request *obj_request;
2860 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2861 int ret;
2862
2863 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2864 OBJ_REQUEST_NODATA);
2865 if (!obj_request)
2866 return -ENOMEM;
2867
2868 ret = -ENOMEM;
2869 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2870 if (!obj_request->osd_req)
2871 goto out;
2872
2873 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2874 notify_id, 0, 0);
2875 rbd_osd_req_format_read(obj_request);
2876
2877 ret = rbd_obj_request_submit(osdc, obj_request);
2878 if (ret)
2879 goto out;
2880 ret = rbd_obj_request_wait(obj_request);
2881 out:
2882 rbd_obj_request_put(obj_request);
2883
2884 return ret;
2885 }
2886
2887 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2888 {
2889 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2890 int ret;
2891
2892 if (!rbd_dev)
2893 return;
2894
2895 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2896 rbd_dev->header_name, (unsigned long long)notify_id,
2897 (unsigned int)opcode);
2898 ret = rbd_dev_refresh(rbd_dev);
2899 if (ret)
2900 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2901
2902 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2903 }
2904
2905 /*
2906 * Request sync osd watch/unwatch. The value of "start" determines
2907 * whether a watch request is being initiated or torn down.
2908 */
2909 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2910 {
2911 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2912 struct rbd_obj_request *obj_request;
2913 int ret;
2914
2915 rbd_assert(start ^ !!rbd_dev->watch_event);
2916 rbd_assert(start ^ !!rbd_dev->watch_request);
2917
2918 if (start) {
2919 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2920 &rbd_dev->watch_event);
2921 if (ret < 0)
2922 return ret;
2923 rbd_assert(rbd_dev->watch_event != NULL);
2924 }
2925
2926 ret = -ENOMEM;
2927 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2928 OBJ_REQUEST_NODATA);
2929 if (!obj_request)
2930 goto out_cancel;
2931
2932 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2933 if (!obj_request->osd_req)
2934 goto out_cancel;
2935
2936 if (start)
2937 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2938 else
2939 ceph_osdc_unregister_linger_request(osdc,
2940 rbd_dev->watch_request->osd_req);
2941
2942 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2943 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2944 rbd_osd_req_format_write(obj_request);
2945
2946 ret = rbd_obj_request_submit(osdc, obj_request);
2947 if (ret)
2948 goto out_cancel;
2949 ret = rbd_obj_request_wait(obj_request);
2950 if (ret)
2951 goto out_cancel;
2952 ret = obj_request->result;
2953 if (ret)
2954 goto out_cancel;
2955
2956 /*
2957 * A watch request is set to linger, so the underlying osd
2958 * request won't go away until we unregister it. We retain
2959 * a pointer to the object request during that time (in
2960 * rbd_dev->watch_request), so we'll keep a reference to
2961 * it. We'll drop that reference (below) after we've
2962 * unregistered it.
2963 */
2964 if (start) {
2965 rbd_dev->watch_request = obj_request;
2966
2967 return 0;
2968 }
2969
2970 /* We have successfully torn down the watch request */
2971
2972 rbd_obj_request_put(rbd_dev->watch_request);
2973 rbd_dev->watch_request = NULL;
2974 out_cancel:
2975 /* Cancel the event if we're tearing down, or on error */
2976 ceph_osdc_cancel_event(rbd_dev->watch_event);
2977 rbd_dev->watch_event = NULL;
2978 if (obj_request)
2979 rbd_obj_request_put(obj_request);
2980
2981 return ret;
2982 }
2983
2984 /*
2985 * Synchronous osd object method call. Returns the number of bytes
2986 * returned in the outbound buffer, or a negative error code.
2987 */
2988 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2989 const char *object_name,
2990 const char *class_name,
2991 const char *method_name,
2992 const void *outbound,
2993 size_t outbound_size,
2994 void *inbound,
2995 size_t inbound_size)
2996 {
2997 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2998 struct rbd_obj_request *obj_request;
2999 struct page **pages;
3000 u32 page_count;
3001 int ret;
3002
3003 /*
3004 * Method calls are ultimately read operations. The result
3005 * should placed into the inbound buffer provided. They
3006 * also supply outbound data--parameters for the object
3007 * method. Currently if this is present it will be a
3008 * snapshot id.
3009 */
3010 page_count = (u32)calc_pages_for(0, inbound_size);
3011 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3012 if (IS_ERR(pages))
3013 return PTR_ERR(pages);
3014
3015 ret = -ENOMEM;
3016 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3017 OBJ_REQUEST_PAGES);
3018 if (!obj_request)
3019 goto out;
3020
3021 obj_request->pages = pages;
3022 obj_request->page_count = page_count;
3023
3024 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3025 if (!obj_request->osd_req)
3026 goto out;
3027
3028 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3029 class_name, method_name);
3030 if (outbound_size) {
3031 struct ceph_pagelist *pagelist;
3032
3033 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3034 if (!pagelist)
3035 goto out;
3036
3037 ceph_pagelist_init(pagelist);
3038 ceph_pagelist_append(pagelist, outbound, outbound_size);
3039 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3040 pagelist);
3041 }
3042 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3043 obj_request->pages, inbound_size,
3044 0, false, false);
3045 rbd_osd_req_format_read(obj_request);
3046
3047 ret = rbd_obj_request_submit(osdc, obj_request);
3048 if (ret)
3049 goto out;
3050 ret = rbd_obj_request_wait(obj_request);
3051 if (ret)
3052 goto out;
3053
3054 ret = obj_request->result;
3055 if (ret < 0)
3056 goto out;
3057
3058 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3059 ret = (int)obj_request->xferred;
3060 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3061 out:
3062 if (obj_request)
3063 rbd_obj_request_put(obj_request);
3064 else
3065 ceph_release_page_vector(pages, page_count);
3066
3067 return ret;
3068 }
3069
3070 static void rbd_request_fn(struct request_queue *q)
3071 __releases(q->queue_lock) __acquires(q->queue_lock)
3072 {
3073 struct rbd_device *rbd_dev = q->queuedata;
3074 bool read_only = rbd_dev->mapping.read_only;
3075 struct request *rq;
3076 int result;
3077
3078 while ((rq = blk_fetch_request(q))) {
3079 bool write_request = rq_data_dir(rq) == WRITE;
3080 struct rbd_img_request *img_request;
3081 u64 offset;
3082 u64 length;
3083
3084 /* Ignore any non-FS requests that filter through. */
3085
3086 if (rq->cmd_type != REQ_TYPE_FS) {
3087 dout("%s: non-fs request type %d\n", __func__,
3088 (int) rq->cmd_type);
3089 __blk_end_request_all(rq, 0);
3090 continue;
3091 }
3092
3093 /* Ignore/skip any zero-length requests */
3094
3095 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3096 length = (u64) blk_rq_bytes(rq);
3097
3098 if (!length) {
3099 dout("%s: zero-length request\n", __func__);
3100 __blk_end_request_all(rq, 0);
3101 continue;
3102 }
3103
3104 spin_unlock_irq(q->queue_lock);
3105
3106 /* Disallow writes to a read-only device */
3107
3108 if (write_request) {
3109 result = -EROFS;
3110 if (read_only)
3111 goto end_request;
3112 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3113 }
3114
3115 /*
3116 * Quit early if the mapped snapshot no longer
3117 * exists. It's still possible the snapshot will
3118 * have disappeared by the time our request arrives
3119 * at the osd, but there's no sense in sending it if
3120 * we already know.
3121 */
3122 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3123 dout("request for non-existent snapshot");
3124 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3125 result = -ENXIO;
3126 goto end_request;
3127 }
3128
3129 result = -EINVAL;
3130 if (offset && length > U64_MAX - offset + 1) {
3131 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3132 offset, length);
3133 goto end_request; /* Shouldn't happen */
3134 }
3135
3136 result = -EIO;
3137 if (offset + length > rbd_dev->mapping.size) {
3138 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3139 offset, length, rbd_dev->mapping.size);
3140 goto end_request;
3141 }
3142
3143 result = -ENOMEM;
3144 img_request = rbd_img_request_create(rbd_dev, offset, length,
3145 write_request);
3146 if (!img_request)
3147 goto end_request;
3148
3149 img_request->rq = rq;
3150
3151 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3152 rq->bio);
3153 if (!result)
3154 result = rbd_img_request_submit(img_request);
3155 if (result)
3156 rbd_img_request_put(img_request);
3157 end_request:
3158 spin_lock_irq(q->queue_lock);
3159 if (result < 0) {
3160 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3161 write_request ? "write" : "read",
3162 length, offset, result);
3163
3164 __blk_end_request_all(rq, result);
3165 }
3166 }
3167 }
3168
3169 /*
3170 * a queue callback. Makes sure that we don't create a bio that spans across
3171 * multiple osd objects. One exception would be with a single page bios,
3172 * which we handle later at bio_chain_clone_range()
3173 */
3174 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3175 struct bio_vec *bvec)
3176 {
3177 struct rbd_device *rbd_dev = q->queuedata;
3178 sector_t sector_offset;
3179 sector_t sectors_per_obj;
3180 sector_t obj_sector_offset;
3181 int ret;
3182
3183 /*
3184 * Find how far into its rbd object the partition-relative
3185 * bio start sector is to offset relative to the enclosing
3186 * device.
3187 */
3188 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3189 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3190 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3191
3192 /*
3193 * Compute the number of bytes from that offset to the end
3194 * of the object. Account for what's already used by the bio.
3195 */
3196 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3197 if (ret > bmd->bi_size)
3198 ret -= bmd->bi_size;
3199 else
3200 ret = 0;
3201
3202 /*
3203 * Don't send back more than was asked for. And if the bio
3204 * was empty, let the whole thing through because: "Note
3205 * that a block device *must* allow a single page to be
3206 * added to an empty bio."
3207 */
3208 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3209 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3210 ret = (int) bvec->bv_len;
3211
3212 return ret;
3213 }
3214
3215 static void rbd_free_disk(struct rbd_device *rbd_dev)
3216 {
3217 struct gendisk *disk = rbd_dev->disk;
3218
3219 if (!disk)
3220 return;
3221
3222 rbd_dev->disk = NULL;
3223 if (disk->flags & GENHD_FL_UP) {
3224 del_gendisk(disk);
3225 if (disk->queue)
3226 blk_cleanup_queue(disk->queue);
3227 }
3228 put_disk(disk);
3229 }
3230
3231 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3232 const char *object_name,
3233 u64 offset, u64 length, void *buf)
3234
3235 {
3236 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3237 struct rbd_obj_request *obj_request;
3238 struct page **pages = NULL;
3239 u32 page_count;
3240 size_t size;
3241 int ret;
3242
3243 page_count = (u32) calc_pages_for(offset, length);
3244 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3245 if (IS_ERR(pages))
3246 return PTR_ERR(pages);
3247
3248 ret = -ENOMEM;
3249 obj_request = rbd_obj_request_create(object_name, offset, length,
3250 OBJ_REQUEST_PAGES);
3251 if (!obj_request)
3252 goto out;
3253
3254 obj_request->pages = pages;
3255 obj_request->page_count = page_count;
3256
3257 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3258 if (!obj_request->osd_req)
3259 goto out;
3260
3261 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3262 offset, length, 0, 0);
3263 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3264 obj_request->pages,
3265 obj_request->length,
3266 obj_request->offset & ~PAGE_MASK,
3267 false, false);
3268 rbd_osd_req_format_read(obj_request);
3269
3270 ret = rbd_obj_request_submit(osdc, obj_request);
3271 if (ret)
3272 goto out;
3273 ret = rbd_obj_request_wait(obj_request);
3274 if (ret)
3275 goto out;
3276
3277 ret = obj_request->result;
3278 if (ret < 0)
3279 goto out;
3280
3281 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3282 size = (size_t) obj_request->xferred;
3283 ceph_copy_from_page_vector(pages, buf, 0, size);
3284 rbd_assert(size <= (size_t)INT_MAX);
3285 ret = (int)size;
3286 out:
3287 if (obj_request)
3288 rbd_obj_request_put(obj_request);
3289 else
3290 ceph_release_page_vector(pages, page_count);
3291
3292 return ret;
3293 }
3294
3295 /*
3296 * Read the complete header for the given rbd device. On successful
3297 * return, the rbd_dev->header field will contain up-to-date
3298 * information about the image.
3299 */
3300 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3301 {
3302 struct rbd_image_header_ondisk *ondisk = NULL;
3303 u32 snap_count = 0;
3304 u64 names_size = 0;
3305 u32 want_count;
3306 int ret;
3307
3308 /*
3309 * The complete header will include an array of its 64-bit
3310 * snapshot ids, followed by the names of those snapshots as
3311 * a contiguous block of NUL-terminated strings. Note that
3312 * the number of snapshots could change by the time we read
3313 * it in, in which case we re-read it.
3314 */
3315 do {
3316 size_t size;
3317
3318 kfree(ondisk);
3319
3320 size = sizeof (*ondisk);
3321 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3322 size += names_size;
3323 ondisk = kmalloc(size, GFP_KERNEL);
3324 if (!ondisk)
3325 return -ENOMEM;
3326
3327 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3328 0, size, ondisk);
3329 if (ret < 0)
3330 goto out;
3331 if ((size_t)ret < size) {
3332 ret = -ENXIO;
3333 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3334 size, ret);
3335 goto out;
3336 }
3337 if (!rbd_dev_ondisk_valid(ondisk)) {
3338 ret = -ENXIO;
3339 rbd_warn(rbd_dev, "invalid header");
3340 goto out;
3341 }
3342
3343 names_size = le64_to_cpu(ondisk->snap_names_len);
3344 want_count = snap_count;
3345 snap_count = le32_to_cpu(ondisk->snap_count);
3346 } while (snap_count != want_count);
3347
3348 ret = rbd_header_from_disk(rbd_dev, ondisk);
3349 out:
3350 kfree(ondisk);
3351
3352 return ret;
3353 }
3354
3355 /*
3356 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3357 * has disappeared from the (just updated) snapshot context.
3358 */
3359 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3360 {
3361 u64 snap_id;
3362
3363 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3364 return;
3365
3366 snap_id = rbd_dev->spec->snap_id;
3367 if (snap_id == CEPH_NOSNAP)
3368 return;
3369
3370 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3371 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3372 }
3373
3374 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3375 {
3376 sector_t size;
3377 bool removing;
3378
3379 /*
3380 * Don't hold the lock while doing disk operations,
3381 * or lock ordering will conflict with the bdev mutex via:
3382 * rbd_add() -> blkdev_get() -> rbd_open()
3383 */
3384 spin_lock_irq(&rbd_dev->lock);
3385 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3386 spin_unlock_irq(&rbd_dev->lock);
3387 /*
3388 * If the device is being removed, rbd_dev->disk has
3389 * been destroyed, so don't try to update its size
3390 */
3391 if (!removing) {
3392 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3393 dout("setting size to %llu sectors", (unsigned long long)size);
3394 set_capacity(rbd_dev->disk, size);
3395 revalidate_disk(rbd_dev->disk);
3396 }
3397 }
3398
3399 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3400 {
3401 u64 mapping_size;
3402 int ret;
3403
3404 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3405 mapping_size = rbd_dev->mapping.size;
3406 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3407 if (rbd_dev->image_format == 1)
3408 ret = rbd_dev_v1_header_info(rbd_dev);
3409 else
3410 ret = rbd_dev_v2_header_info(rbd_dev);
3411
3412 /* If it's a mapped snapshot, validate its EXISTS flag */
3413
3414 rbd_exists_validate(rbd_dev);
3415 mutex_unlock(&ctl_mutex);
3416 if (mapping_size != rbd_dev->mapping.size) {
3417 rbd_dev_update_size(rbd_dev);
3418 }
3419
3420 return ret;
3421 }
3422
3423 static int rbd_init_disk(struct rbd_device *rbd_dev)
3424 {
3425 struct gendisk *disk;
3426 struct request_queue *q;
3427 u64 segment_size;
3428
3429 /* create gendisk info */
3430 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3431 if (!disk)
3432 return -ENOMEM;
3433
3434 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3435 rbd_dev->dev_id);
3436 disk->major = rbd_dev->major;
3437 disk->first_minor = 0;
3438 disk->fops = &rbd_bd_ops;
3439 disk->private_data = rbd_dev;
3440
3441 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3442 if (!q)
3443 goto out_disk;
3444
3445 /* We use the default size, but let's be explicit about it. */
3446 blk_queue_physical_block_size(q, SECTOR_SIZE);
3447
3448 /* set io sizes to object size */
3449 segment_size = rbd_obj_bytes(&rbd_dev->header);
3450 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3451 blk_queue_max_segment_size(q, segment_size);
3452 blk_queue_io_min(q, segment_size);
3453 blk_queue_io_opt(q, segment_size);
3454
3455 blk_queue_merge_bvec(q, rbd_merge_bvec);
3456 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3457 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3458
3459 disk->queue = q;
3460
3461 q->queuedata = rbd_dev;
3462
3463 rbd_dev->disk = disk;
3464
3465 return 0;
3466 out_disk:
3467 put_disk(disk);
3468
3469 return -ENOMEM;
3470 }
3471
3472 /*
3473 sysfs
3474 */
3475
3476 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3477 {
3478 return container_of(dev, struct rbd_device, dev);
3479 }
3480
3481 static ssize_t rbd_size_show(struct device *dev,
3482 struct device_attribute *attr, char *buf)
3483 {
3484 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3485
3486 return sprintf(buf, "%llu\n",
3487 (unsigned long long)rbd_dev->mapping.size);
3488 }
3489
3490 /*
3491 * Note this shows the features for whatever's mapped, which is not
3492 * necessarily the base image.
3493 */
3494 static ssize_t rbd_features_show(struct device *dev,
3495 struct device_attribute *attr, char *buf)
3496 {
3497 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3498
3499 return sprintf(buf, "0x%016llx\n",
3500 (unsigned long long)rbd_dev->mapping.features);
3501 }
3502
3503 static ssize_t rbd_major_show(struct device *dev,
3504 struct device_attribute *attr, char *buf)
3505 {
3506 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3507
3508 if (rbd_dev->major)
3509 return sprintf(buf, "%d\n", rbd_dev->major);
3510
3511 return sprintf(buf, "(none)\n");
3512
3513 }
3514
3515 static ssize_t rbd_client_id_show(struct device *dev,
3516 struct device_attribute *attr, char *buf)
3517 {
3518 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3519
3520 return sprintf(buf, "client%lld\n",
3521 ceph_client_id(rbd_dev->rbd_client->client));
3522 }
3523
3524 static ssize_t rbd_pool_show(struct device *dev,
3525 struct device_attribute *attr, char *buf)
3526 {
3527 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3528
3529 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3530 }
3531
3532 static ssize_t rbd_pool_id_show(struct device *dev,
3533 struct device_attribute *attr, char *buf)
3534 {
3535 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3536
3537 return sprintf(buf, "%llu\n",
3538 (unsigned long long) rbd_dev->spec->pool_id);
3539 }
3540
3541 static ssize_t rbd_name_show(struct device *dev,
3542 struct device_attribute *attr, char *buf)
3543 {
3544 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3545
3546 if (rbd_dev->spec->image_name)
3547 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3548
3549 return sprintf(buf, "(unknown)\n");
3550 }
3551
3552 static ssize_t rbd_image_id_show(struct device *dev,
3553 struct device_attribute *attr, char *buf)
3554 {
3555 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3556
3557 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3558 }
3559
3560 /*
3561 * Shows the name of the currently-mapped snapshot (or
3562 * RBD_SNAP_HEAD_NAME for the base image).
3563 */
3564 static ssize_t rbd_snap_show(struct device *dev,
3565 struct device_attribute *attr,
3566 char *buf)
3567 {
3568 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3569
3570 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3571 }
3572
3573 /*
3574 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3575 * for the parent image. If there is no parent, simply shows
3576 * "(no parent image)".
3577 */
3578 static ssize_t rbd_parent_show(struct device *dev,
3579 struct device_attribute *attr,
3580 char *buf)
3581 {
3582 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3583 struct rbd_spec *spec = rbd_dev->parent_spec;
3584 int count;
3585 char *bufp = buf;
3586
3587 if (!spec)
3588 return sprintf(buf, "(no parent image)\n");
3589
3590 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3591 (unsigned long long) spec->pool_id, spec->pool_name);
3592 if (count < 0)
3593 return count;
3594 bufp += count;
3595
3596 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3597 spec->image_name ? spec->image_name : "(unknown)");
3598 if (count < 0)
3599 return count;
3600 bufp += count;
3601
3602 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3603 (unsigned long long) spec->snap_id, spec->snap_name);
3604 if (count < 0)
3605 return count;
3606 bufp += count;
3607
3608 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3609 if (count < 0)
3610 return count;
3611 bufp += count;
3612
3613 return (ssize_t) (bufp - buf);
3614 }
3615
3616 static ssize_t rbd_image_refresh(struct device *dev,
3617 struct device_attribute *attr,
3618 const char *buf,
3619 size_t size)
3620 {
3621 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3622 int ret;
3623
3624 ret = rbd_dev_refresh(rbd_dev);
3625 if (ret)
3626 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3627
3628 return ret < 0 ? ret : size;
3629 }
3630
3631 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3632 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3633 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3634 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3635 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3636 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3637 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3638 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3639 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3640 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3641 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3642
3643 static struct attribute *rbd_attrs[] = {
3644 &dev_attr_size.attr,
3645 &dev_attr_features.attr,
3646 &dev_attr_major.attr,
3647 &dev_attr_client_id.attr,
3648 &dev_attr_pool.attr,
3649 &dev_attr_pool_id.attr,
3650 &dev_attr_name.attr,
3651 &dev_attr_image_id.attr,
3652 &dev_attr_current_snap.attr,
3653 &dev_attr_parent.attr,
3654 &dev_attr_refresh.attr,
3655 NULL
3656 };
3657
3658 static struct attribute_group rbd_attr_group = {
3659 .attrs = rbd_attrs,
3660 };
3661
3662 static const struct attribute_group *rbd_attr_groups[] = {
3663 &rbd_attr_group,
3664 NULL
3665 };
3666
3667 static void rbd_sysfs_dev_release(struct device *dev)
3668 {
3669 }
3670
3671 static struct device_type rbd_device_type = {
3672 .name = "rbd",
3673 .groups = rbd_attr_groups,
3674 .release = rbd_sysfs_dev_release,
3675 };
3676
3677 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3678 {
3679 kref_get(&spec->kref);
3680
3681 return spec;
3682 }
3683
3684 static void rbd_spec_free(struct kref *kref);
3685 static void rbd_spec_put(struct rbd_spec *spec)
3686 {
3687 if (spec)
3688 kref_put(&spec->kref, rbd_spec_free);
3689 }
3690
3691 static struct rbd_spec *rbd_spec_alloc(void)
3692 {
3693 struct rbd_spec *spec;
3694
3695 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3696 if (!spec)
3697 return NULL;
3698 kref_init(&spec->kref);
3699
3700 return spec;
3701 }
3702
3703 static void rbd_spec_free(struct kref *kref)
3704 {
3705 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3706
3707 kfree(spec->pool_name);
3708 kfree(spec->image_id);
3709 kfree(spec->image_name);
3710 kfree(spec->snap_name);
3711 kfree(spec);
3712 }
3713
3714 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3715 struct rbd_spec *spec)
3716 {
3717 struct rbd_device *rbd_dev;
3718
3719 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3720 if (!rbd_dev)
3721 return NULL;
3722
3723 spin_lock_init(&rbd_dev->lock);
3724 rbd_dev->flags = 0;
3725 atomic_set(&rbd_dev->parent_ref, 0);
3726 INIT_LIST_HEAD(&rbd_dev->node);
3727 init_rwsem(&rbd_dev->header_rwsem);
3728
3729 rbd_dev->spec = spec;
3730 rbd_dev->rbd_client = rbdc;
3731
3732 /* Initialize the layout used for all rbd requests */
3733
3734 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3735 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3736 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3737 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3738
3739 return rbd_dev;
3740 }
3741
3742 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3743 {
3744 rbd_put_client(rbd_dev->rbd_client);
3745 rbd_spec_put(rbd_dev->spec);
3746 kfree(rbd_dev);
3747 }
3748
3749 /*
3750 * Get the size and object order for an image snapshot, or if
3751 * snap_id is CEPH_NOSNAP, gets this information for the base
3752 * image.
3753 */
3754 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3755 u8 *order, u64 *snap_size)
3756 {
3757 __le64 snapid = cpu_to_le64(snap_id);
3758 int ret;
3759 struct {
3760 u8 order;
3761 __le64 size;
3762 } __attribute__ ((packed)) size_buf = { 0 };
3763
3764 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3765 "rbd", "get_size",
3766 &snapid, sizeof (snapid),
3767 &size_buf, sizeof (size_buf));
3768 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3769 if (ret < 0)
3770 return ret;
3771 if (ret < sizeof (size_buf))
3772 return -ERANGE;
3773
3774 if (order) {
3775 *order = size_buf.order;
3776 dout(" order %u", (unsigned int)*order);
3777 }
3778 *snap_size = le64_to_cpu(size_buf.size);
3779
3780 dout(" snap_id 0x%016llx snap_size = %llu\n",
3781 (unsigned long long)snap_id,
3782 (unsigned long long)*snap_size);
3783
3784 return 0;
3785 }
3786
3787 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3788 {
3789 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3790 &rbd_dev->header.obj_order,
3791 &rbd_dev->header.image_size);
3792 }
3793
3794 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3795 {
3796 void *reply_buf;
3797 int ret;
3798 void *p;
3799
3800 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3801 if (!reply_buf)
3802 return -ENOMEM;
3803
3804 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3805 "rbd", "get_object_prefix", NULL, 0,
3806 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3807 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3808 if (ret < 0)
3809 goto out;
3810
3811 p = reply_buf;
3812 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3813 p + ret, NULL, GFP_NOIO);
3814 ret = 0;
3815
3816 if (IS_ERR(rbd_dev->header.object_prefix)) {
3817 ret = PTR_ERR(rbd_dev->header.object_prefix);
3818 rbd_dev->header.object_prefix = NULL;
3819 } else {
3820 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3821 }
3822 out:
3823 kfree(reply_buf);
3824
3825 return ret;
3826 }
3827
3828 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3829 u64 *snap_features)
3830 {
3831 __le64 snapid = cpu_to_le64(snap_id);
3832 struct {
3833 __le64 features;
3834 __le64 incompat;
3835 } __attribute__ ((packed)) features_buf = { 0 };
3836 u64 incompat;
3837 int ret;
3838
3839 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3840 "rbd", "get_features",
3841 &snapid, sizeof (snapid),
3842 &features_buf, sizeof (features_buf));
3843 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3844 if (ret < 0)
3845 return ret;
3846 if (ret < sizeof (features_buf))
3847 return -ERANGE;
3848
3849 incompat = le64_to_cpu(features_buf.incompat);
3850 if (incompat & ~RBD_FEATURES_SUPPORTED)
3851 return -ENXIO;
3852
3853 *snap_features = le64_to_cpu(features_buf.features);
3854
3855 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3856 (unsigned long long)snap_id,
3857 (unsigned long long)*snap_features,
3858 (unsigned long long)le64_to_cpu(features_buf.incompat));
3859
3860 return 0;
3861 }
3862
3863 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3864 {
3865 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3866 &rbd_dev->header.features);
3867 }
3868
3869 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3870 {
3871 struct rbd_spec *parent_spec;
3872 size_t size;
3873 void *reply_buf = NULL;
3874 __le64 snapid;
3875 void *p;
3876 void *end;
3877 u64 pool_id;
3878 char *image_id;
3879 u64 overlap;
3880 int ret;
3881
3882 parent_spec = rbd_spec_alloc();
3883 if (!parent_spec)
3884 return -ENOMEM;
3885
3886 size = sizeof (__le64) + /* pool_id */
3887 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3888 sizeof (__le64) + /* snap_id */
3889 sizeof (__le64); /* overlap */
3890 reply_buf = kmalloc(size, GFP_KERNEL);
3891 if (!reply_buf) {
3892 ret = -ENOMEM;
3893 goto out_err;
3894 }
3895
3896 snapid = cpu_to_le64(CEPH_NOSNAP);
3897 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3898 "rbd", "get_parent",
3899 &snapid, sizeof (snapid),
3900 reply_buf, size);
3901 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3902 if (ret < 0)
3903 goto out_err;
3904
3905 p = reply_buf;
3906 end = reply_buf + ret;
3907 ret = -ERANGE;
3908 ceph_decode_64_safe(&p, end, pool_id, out_err);
3909 if (pool_id == CEPH_NOPOOL) {
3910 /*
3911 * Either the parent never existed, or we have
3912 * record of it but the image got flattened so it no
3913 * longer has a parent. When the parent of a
3914 * layered image disappears we immediately set the
3915 * overlap to 0. The effect of this is that all new
3916 * requests will be treated as if the image had no
3917 * parent.
3918 */
3919 if (rbd_dev->parent_overlap) {
3920 rbd_dev->parent_overlap = 0;
3921 smp_mb();
3922 rbd_dev_parent_put(rbd_dev);
3923 pr_info("%s: clone image has been flattened\n",
3924 rbd_dev->disk->disk_name);
3925 }
3926
3927 goto out; /* No parent? No problem. */
3928 }
3929
3930 /* The ceph file layout needs to fit pool id in 32 bits */
3931
3932 ret = -EIO;
3933 if (pool_id > (u64)U32_MAX) {
3934 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3935 (unsigned long long)pool_id, U32_MAX);
3936 goto out_err;
3937 }
3938 parent_spec->pool_id = pool_id;
3939
3940 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3941 if (IS_ERR(image_id)) {
3942 ret = PTR_ERR(image_id);
3943 goto out_err;
3944 }
3945 parent_spec->image_id = image_id;
3946 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3947 ceph_decode_64_safe(&p, end, overlap, out_err);
3948
3949 if (overlap) {
3950 rbd_spec_put(rbd_dev->parent_spec);
3951 rbd_dev->parent_spec = parent_spec;
3952 parent_spec = NULL; /* rbd_dev now owns this */
3953 rbd_dev->parent_overlap = overlap;
3954 } else {
3955 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3956 }
3957 out:
3958 ret = 0;
3959 out_err:
3960 kfree(reply_buf);
3961 rbd_spec_put(parent_spec);
3962
3963 return ret;
3964 }
3965
3966 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3967 {
3968 struct {
3969 __le64 stripe_unit;
3970 __le64 stripe_count;
3971 } __attribute__ ((packed)) striping_info_buf = { 0 };
3972 size_t size = sizeof (striping_info_buf);
3973 void *p;
3974 u64 obj_size;
3975 u64 stripe_unit;
3976 u64 stripe_count;
3977 int ret;
3978
3979 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3980 "rbd", "get_stripe_unit_count", NULL, 0,
3981 (char *)&striping_info_buf, size);
3982 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3983 if (ret < 0)
3984 return ret;
3985 if (ret < size)
3986 return -ERANGE;
3987
3988 /*
3989 * We don't actually support the "fancy striping" feature
3990 * (STRIPINGV2) yet, but if the striping sizes are the
3991 * defaults the behavior is the same as before. So find
3992 * out, and only fail if the image has non-default values.
3993 */
3994 ret = -EINVAL;
3995 obj_size = (u64)1 << rbd_dev->header.obj_order;
3996 p = &striping_info_buf;
3997 stripe_unit = ceph_decode_64(&p);
3998 if (stripe_unit != obj_size) {
3999 rbd_warn(rbd_dev, "unsupported stripe unit "
4000 "(got %llu want %llu)",
4001 stripe_unit, obj_size);
4002 return -EINVAL;
4003 }
4004 stripe_count = ceph_decode_64(&p);
4005 if (stripe_count != 1) {
4006 rbd_warn(rbd_dev, "unsupported stripe count "
4007 "(got %llu want 1)", stripe_count);
4008 return -EINVAL;
4009 }
4010 rbd_dev->header.stripe_unit = stripe_unit;
4011 rbd_dev->header.stripe_count = stripe_count;
4012
4013 return 0;
4014 }
4015
4016 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4017 {
4018 size_t image_id_size;
4019 char *image_id;
4020 void *p;
4021 void *end;
4022 size_t size;
4023 void *reply_buf = NULL;
4024 size_t len = 0;
4025 char *image_name = NULL;
4026 int ret;
4027
4028 rbd_assert(!rbd_dev->spec->image_name);
4029
4030 len = strlen(rbd_dev->spec->image_id);
4031 image_id_size = sizeof (__le32) + len;
4032 image_id = kmalloc(image_id_size, GFP_KERNEL);
4033 if (!image_id)
4034 return NULL;
4035
4036 p = image_id;
4037 end = image_id + image_id_size;
4038 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4039
4040 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4041 reply_buf = kmalloc(size, GFP_KERNEL);
4042 if (!reply_buf)
4043 goto out;
4044
4045 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4046 "rbd", "dir_get_name",
4047 image_id, image_id_size,
4048 reply_buf, size);
4049 if (ret < 0)
4050 goto out;
4051 p = reply_buf;
4052 end = reply_buf + ret;
4053
4054 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4055 if (IS_ERR(image_name))
4056 image_name = NULL;
4057 else
4058 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4059 out:
4060 kfree(reply_buf);
4061 kfree(image_id);
4062
4063 return image_name;
4064 }
4065
4066 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4067 {
4068 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4069 const char *snap_name;
4070 u32 which = 0;
4071
4072 /* Skip over names until we find the one we are looking for */
4073
4074 snap_name = rbd_dev->header.snap_names;
4075 while (which < snapc->num_snaps) {
4076 if (!strcmp(name, snap_name))
4077 return snapc->snaps[which];
4078 snap_name += strlen(snap_name) + 1;
4079 which++;
4080 }
4081 return CEPH_NOSNAP;
4082 }
4083
4084 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4085 {
4086 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4087 u32 which;
4088 bool found = false;
4089 u64 snap_id;
4090
4091 for (which = 0; !found && which < snapc->num_snaps; which++) {
4092 const char *snap_name;
4093
4094 snap_id = snapc->snaps[which];
4095 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4096 if (IS_ERR(snap_name)) {
4097 /* ignore no-longer existing snapshots */
4098 if (PTR_ERR(snap_name) == -ENOENT)
4099 continue;
4100 else
4101 break;
4102 }
4103 found = !strcmp(name, snap_name);
4104 kfree(snap_name);
4105 }
4106 return found ? snap_id : CEPH_NOSNAP;
4107 }
4108
4109 /*
4110 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4111 * no snapshot by that name is found, or if an error occurs.
4112 */
4113 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4114 {
4115 if (rbd_dev->image_format == 1)
4116 return rbd_v1_snap_id_by_name(rbd_dev, name);
4117
4118 return rbd_v2_snap_id_by_name(rbd_dev, name);
4119 }
4120
4121 /*
4122 * When an rbd image has a parent image, it is identified by the
4123 * pool, image, and snapshot ids (not names). This function fills
4124 * in the names for those ids. (It's OK if we can't figure out the
4125 * name for an image id, but the pool and snapshot ids should always
4126 * exist and have names.) All names in an rbd spec are dynamically
4127 * allocated.
4128 *
4129 * When an image being mapped (not a parent) is probed, we have the
4130 * pool name and pool id, image name and image id, and the snapshot
4131 * name. The only thing we're missing is the snapshot id.
4132 */
4133 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4134 {
4135 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4136 struct rbd_spec *spec = rbd_dev->spec;
4137 const char *pool_name;
4138 const char *image_name;
4139 const char *snap_name;
4140 int ret;
4141
4142 /*
4143 * An image being mapped will have the pool name (etc.), but
4144 * we need to look up the snapshot id.
4145 */
4146 if (spec->pool_name) {
4147 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4148 u64 snap_id;
4149
4150 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4151 if (snap_id == CEPH_NOSNAP)
4152 return -ENOENT;
4153 spec->snap_id = snap_id;
4154 } else {
4155 spec->snap_id = CEPH_NOSNAP;
4156 }
4157
4158 return 0;
4159 }
4160
4161 /* Get the pool name; we have to make our own copy of this */
4162
4163 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4164 if (!pool_name) {
4165 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4166 return -EIO;
4167 }
4168 pool_name = kstrdup(pool_name, GFP_KERNEL);
4169 if (!pool_name)
4170 return -ENOMEM;
4171
4172 /* Fetch the image name; tolerate failure here */
4173
4174 image_name = rbd_dev_image_name(rbd_dev);
4175 if (!image_name)
4176 rbd_warn(rbd_dev, "unable to get image name");
4177
4178 /* Look up the snapshot name, and make a copy */
4179
4180 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4181 if (IS_ERR(snap_name)) {
4182 ret = PTR_ERR(snap_name);
4183 goto out_err;
4184 }
4185
4186 spec->pool_name = pool_name;
4187 spec->image_name = image_name;
4188 spec->snap_name = snap_name;
4189
4190 return 0;
4191 out_err:
4192 kfree(image_name);
4193 kfree(pool_name);
4194
4195 return ret;
4196 }
4197
4198 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4199 {
4200 size_t size;
4201 int ret;
4202 void *reply_buf;
4203 void *p;
4204 void *end;
4205 u64 seq;
4206 u32 snap_count;
4207 struct ceph_snap_context *snapc;
4208 u32 i;
4209
4210 /*
4211 * We'll need room for the seq value (maximum snapshot id),
4212 * snapshot count, and array of that many snapshot ids.
4213 * For now we have a fixed upper limit on the number we're
4214 * prepared to receive.
4215 */
4216 size = sizeof (__le64) + sizeof (__le32) +
4217 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4218 reply_buf = kzalloc(size, GFP_KERNEL);
4219 if (!reply_buf)
4220 return -ENOMEM;
4221
4222 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4223 "rbd", "get_snapcontext", NULL, 0,
4224 reply_buf, size);
4225 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4226 if (ret < 0)
4227 goto out;
4228
4229 p = reply_buf;
4230 end = reply_buf + ret;
4231 ret = -ERANGE;
4232 ceph_decode_64_safe(&p, end, seq, out);
4233 ceph_decode_32_safe(&p, end, snap_count, out);
4234
4235 /*
4236 * Make sure the reported number of snapshot ids wouldn't go
4237 * beyond the end of our buffer. But before checking that,
4238 * make sure the computed size of the snapshot context we
4239 * allocate is representable in a size_t.
4240 */
4241 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4242 / sizeof (u64)) {
4243 ret = -EINVAL;
4244 goto out;
4245 }
4246 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4247 goto out;
4248 ret = 0;
4249
4250 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4251 if (!snapc) {
4252 ret = -ENOMEM;
4253 goto out;
4254 }
4255 snapc->seq = seq;
4256 for (i = 0; i < snap_count; i++)
4257 snapc->snaps[i] = ceph_decode_64(&p);
4258
4259 ceph_put_snap_context(rbd_dev->header.snapc);
4260 rbd_dev->header.snapc = snapc;
4261
4262 dout(" snap context seq = %llu, snap_count = %u\n",
4263 (unsigned long long)seq, (unsigned int)snap_count);
4264 out:
4265 kfree(reply_buf);
4266
4267 return ret;
4268 }
4269
4270 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4271 u64 snap_id)
4272 {
4273 size_t size;
4274 void *reply_buf;
4275 __le64 snapid;
4276 int ret;
4277 void *p;
4278 void *end;
4279 char *snap_name;
4280
4281 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4282 reply_buf = kmalloc(size, GFP_KERNEL);
4283 if (!reply_buf)
4284 return ERR_PTR(-ENOMEM);
4285
4286 snapid = cpu_to_le64(snap_id);
4287 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4288 "rbd", "get_snapshot_name",
4289 &snapid, sizeof (snapid),
4290 reply_buf, size);
4291 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4292 if (ret < 0) {
4293 snap_name = ERR_PTR(ret);
4294 goto out;
4295 }
4296
4297 p = reply_buf;
4298 end = reply_buf + ret;
4299 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4300 if (IS_ERR(snap_name))
4301 goto out;
4302
4303 dout(" snap_id 0x%016llx snap_name = %s\n",
4304 (unsigned long long)snap_id, snap_name);
4305 out:
4306 kfree(reply_buf);
4307
4308 return snap_name;
4309 }
4310
4311 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4312 {
4313 bool first_time = rbd_dev->header.object_prefix == NULL;
4314 int ret;
4315
4316 down_write(&rbd_dev->header_rwsem);
4317
4318 ret = rbd_dev_v2_image_size(rbd_dev);
4319 if (ret)
4320 goto out;
4321
4322 if (first_time) {
4323 ret = rbd_dev_v2_header_onetime(rbd_dev);
4324 if (ret)
4325 goto out;
4326 }
4327
4328 /*
4329 * If the image supports layering, get the parent info. We
4330 * need to probe the first time regardless. Thereafter we
4331 * only need to if there's a parent, to see if it has
4332 * disappeared due to the mapped image getting flattened.
4333 */
4334 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4335 (first_time || rbd_dev->parent_spec)) {
4336 bool warn;
4337
4338 ret = rbd_dev_v2_parent_info(rbd_dev);
4339 if (ret)
4340 goto out;
4341
4342 /*
4343 * Print a warning if this is the initial probe and
4344 * the image has a parent. Don't print it if the
4345 * image now being probed is itself a parent. We
4346 * can tell at this point because we won't know its
4347 * pool name yet (just its pool id).
4348 */
4349 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4350 if (first_time && warn)
4351 rbd_warn(rbd_dev, "WARNING: kernel layering "
4352 "is EXPERIMENTAL!");
4353 }
4354
4355 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4356 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4357 rbd_dev->mapping.size = rbd_dev->header.image_size;
4358
4359 ret = rbd_dev_v2_snap_context(rbd_dev);
4360 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4361 out:
4362 up_write(&rbd_dev->header_rwsem);
4363
4364 return ret;
4365 }
4366
4367 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4368 {
4369 struct device *dev;
4370 int ret;
4371
4372 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4373
4374 dev = &rbd_dev->dev;
4375 dev->bus = &rbd_bus_type;
4376 dev->type = &rbd_device_type;
4377 dev->parent = &rbd_root_dev;
4378 dev->release = rbd_dev_device_release;
4379 dev_set_name(dev, "%d", rbd_dev->dev_id);
4380 ret = device_register(dev);
4381
4382 mutex_unlock(&ctl_mutex);
4383
4384 return ret;
4385 }
4386
4387 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4388 {
4389 device_unregister(&rbd_dev->dev);
4390 }
4391
4392 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4393
4394 /*
4395 * Get a unique rbd identifier for the given new rbd_dev, and add
4396 * the rbd_dev to the global list. The minimum rbd id is 1.
4397 */
4398 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4399 {
4400 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4401
4402 spin_lock(&rbd_dev_list_lock);
4403 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4404 spin_unlock(&rbd_dev_list_lock);
4405 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4406 (unsigned long long) rbd_dev->dev_id);
4407 }
4408
4409 /*
4410 * Remove an rbd_dev from the global list, and record that its
4411 * identifier is no longer in use.
4412 */
4413 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4414 {
4415 struct list_head *tmp;
4416 int rbd_id = rbd_dev->dev_id;
4417 int max_id;
4418
4419 rbd_assert(rbd_id > 0);
4420
4421 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4422 (unsigned long long) rbd_dev->dev_id);
4423 spin_lock(&rbd_dev_list_lock);
4424 list_del_init(&rbd_dev->node);
4425
4426 /*
4427 * If the id being "put" is not the current maximum, there
4428 * is nothing special we need to do.
4429 */
4430 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4431 spin_unlock(&rbd_dev_list_lock);
4432 return;
4433 }
4434
4435 /*
4436 * We need to update the current maximum id. Search the
4437 * list to find out what it is. We're more likely to find
4438 * the maximum at the end, so search the list backward.
4439 */
4440 max_id = 0;
4441 list_for_each_prev(tmp, &rbd_dev_list) {
4442 struct rbd_device *rbd_dev;
4443
4444 rbd_dev = list_entry(tmp, struct rbd_device, node);
4445 if (rbd_dev->dev_id > max_id)
4446 max_id = rbd_dev->dev_id;
4447 }
4448 spin_unlock(&rbd_dev_list_lock);
4449
4450 /*
4451 * The max id could have been updated by rbd_dev_id_get(), in
4452 * which case it now accurately reflects the new maximum.
4453 * Be careful not to overwrite the maximum value in that
4454 * case.
4455 */
4456 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4457 dout(" max dev id has been reset\n");
4458 }
4459
4460 /*
4461 * Skips over white space at *buf, and updates *buf to point to the
4462 * first found non-space character (if any). Returns the length of
4463 * the token (string of non-white space characters) found. Note
4464 * that *buf must be terminated with '\0'.
4465 */
4466 static inline size_t next_token(const char **buf)
4467 {
4468 /*
4469 * These are the characters that produce nonzero for
4470 * isspace() in the "C" and "POSIX" locales.
4471 */
4472 const char *spaces = " \f\n\r\t\v";
4473
4474 *buf += strspn(*buf, spaces); /* Find start of token */
4475
4476 return strcspn(*buf, spaces); /* Return token length */
4477 }
4478
4479 /*
4480 * Finds the next token in *buf, and if the provided token buffer is
4481 * big enough, copies the found token into it. The result, if
4482 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4483 * must be terminated with '\0' on entry.
4484 *
4485 * Returns the length of the token found (not including the '\0').
4486 * Return value will be 0 if no token is found, and it will be >=
4487 * token_size if the token would not fit.
4488 *
4489 * The *buf pointer will be updated to point beyond the end of the
4490 * found token. Note that this occurs even if the token buffer is
4491 * too small to hold it.
4492 */
4493 static inline size_t copy_token(const char **buf,
4494 char *token,
4495 size_t token_size)
4496 {
4497 size_t len;
4498
4499 len = next_token(buf);
4500 if (len < token_size) {
4501 memcpy(token, *buf, len);
4502 *(token + len) = '\0';
4503 }
4504 *buf += len;
4505
4506 return len;
4507 }
4508
4509 /*
4510 * Finds the next token in *buf, dynamically allocates a buffer big
4511 * enough to hold a copy of it, and copies the token into the new
4512 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4513 * that a duplicate buffer is created even for a zero-length token.
4514 *
4515 * Returns a pointer to the newly-allocated duplicate, or a null
4516 * pointer if memory for the duplicate was not available. If
4517 * the lenp argument is a non-null pointer, the length of the token
4518 * (not including the '\0') is returned in *lenp.
4519 *
4520 * If successful, the *buf pointer will be updated to point beyond
4521 * the end of the found token.
4522 *
4523 * Note: uses GFP_KERNEL for allocation.
4524 */
4525 static inline char *dup_token(const char **buf, size_t *lenp)
4526 {
4527 char *dup;
4528 size_t len;
4529
4530 len = next_token(buf);
4531 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4532 if (!dup)
4533 return NULL;
4534 *(dup + len) = '\0';
4535 *buf += len;
4536
4537 if (lenp)
4538 *lenp = len;
4539
4540 return dup;
4541 }
4542
4543 /*
4544 * Parse the options provided for an "rbd add" (i.e., rbd image
4545 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4546 * and the data written is passed here via a NUL-terminated buffer.
4547 * Returns 0 if successful or an error code otherwise.
4548 *
4549 * The information extracted from these options is recorded in
4550 * the other parameters which return dynamically-allocated
4551 * structures:
4552 * ceph_opts
4553 * The address of a pointer that will refer to a ceph options
4554 * structure. Caller must release the returned pointer using
4555 * ceph_destroy_options() when it is no longer needed.
4556 * rbd_opts
4557 * Address of an rbd options pointer. Fully initialized by
4558 * this function; caller must release with kfree().
4559 * spec
4560 * Address of an rbd image specification pointer. Fully
4561 * initialized by this function based on parsed options.
4562 * Caller must release with rbd_spec_put().
4563 *
4564 * The options passed take this form:
4565 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4566 * where:
4567 * <mon_addrs>
4568 * A comma-separated list of one or more monitor addresses.
4569 * A monitor address is an ip address, optionally followed
4570 * by a port number (separated by a colon).
4571 * I.e.: ip1[:port1][,ip2[:port2]...]
4572 * <options>
4573 * A comma-separated list of ceph and/or rbd options.
4574 * <pool_name>
4575 * The name of the rados pool containing the rbd image.
4576 * <image_name>
4577 * The name of the image in that pool to map.
4578 * <snap_id>
4579 * An optional snapshot id. If provided, the mapping will
4580 * present data from the image at the time that snapshot was
4581 * created. The image head is used if no snapshot id is
4582 * provided. Snapshot mappings are always read-only.
4583 */
4584 static int rbd_add_parse_args(const char *buf,
4585 struct ceph_options **ceph_opts,
4586 struct rbd_options **opts,
4587 struct rbd_spec **rbd_spec)
4588 {
4589 size_t len;
4590 char *options;
4591 const char *mon_addrs;
4592 char *snap_name;
4593 size_t mon_addrs_size;
4594 struct rbd_spec *spec = NULL;
4595 struct rbd_options *rbd_opts = NULL;
4596 struct ceph_options *copts;
4597 int ret;
4598
4599 /* The first four tokens are required */
4600
4601 len = next_token(&buf);
4602 if (!len) {
4603 rbd_warn(NULL, "no monitor address(es) provided");
4604 return -EINVAL;
4605 }
4606 mon_addrs = buf;
4607 mon_addrs_size = len + 1;
4608 buf += len;
4609
4610 ret = -EINVAL;
4611 options = dup_token(&buf, NULL);
4612 if (!options)
4613 return -ENOMEM;
4614 if (!*options) {
4615 rbd_warn(NULL, "no options provided");
4616 goto out_err;
4617 }
4618
4619 spec = rbd_spec_alloc();
4620 if (!spec)
4621 goto out_mem;
4622
4623 spec->pool_name = dup_token(&buf, NULL);
4624 if (!spec->pool_name)
4625 goto out_mem;
4626 if (!*spec->pool_name) {
4627 rbd_warn(NULL, "no pool name provided");
4628 goto out_err;
4629 }
4630
4631 spec->image_name = dup_token(&buf, NULL);
4632 if (!spec->image_name)
4633 goto out_mem;
4634 if (!*spec->image_name) {
4635 rbd_warn(NULL, "no image name provided");
4636 goto out_err;
4637 }
4638
4639 /*
4640 * Snapshot name is optional; default is to use "-"
4641 * (indicating the head/no snapshot).
4642 */
4643 len = next_token(&buf);
4644 if (!len) {
4645 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4646 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4647 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4648 ret = -ENAMETOOLONG;
4649 goto out_err;
4650 }
4651 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4652 if (!snap_name)
4653 goto out_mem;
4654 *(snap_name + len) = '\0';
4655 spec->snap_name = snap_name;
4656
4657 /* Initialize all rbd options to the defaults */
4658
4659 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4660 if (!rbd_opts)
4661 goto out_mem;
4662
4663 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4664
4665 copts = ceph_parse_options(options, mon_addrs,
4666 mon_addrs + mon_addrs_size - 1,
4667 parse_rbd_opts_token, rbd_opts);
4668 if (IS_ERR(copts)) {
4669 ret = PTR_ERR(copts);
4670 goto out_err;
4671 }
4672 kfree(options);
4673
4674 *ceph_opts = copts;
4675 *opts = rbd_opts;
4676 *rbd_spec = spec;
4677
4678 return 0;
4679 out_mem:
4680 ret = -ENOMEM;
4681 out_err:
4682 kfree(rbd_opts);
4683 rbd_spec_put(spec);
4684 kfree(options);
4685
4686 return ret;
4687 }
4688
4689 /*
4690 * An rbd format 2 image has a unique identifier, distinct from the
4691 * name given to it by the user. Internally, that identifier is
4692 * what's used to specify the names of objects related to the image.
4693 *
4694 * A special "rbd id" object is used to map an rbd image name to its
4695 * id. If that object doesn't exist, then there is no v2 rbd image
4696 * with the supplied name.
4697 *
4698 * This function will record the given rbd_dev's image_id field if
4699 * it can be determined, and in that case will return 0. If any
4700 * errors occur a negative errno will be returned and the rbd_dev's
4701 * image_id field will be unchanged (and should be NULL).
4702 */
4703 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4704 {
4705 int ret;
4706 size_t size;
4707 char *object_name;
4708 void *response;
4709 char *image_id;
4710
4711 /*
4712 * When probing a parent image, the image id is already
4713 * known (and the image name likely is not). There's no
4714 * need to fetch the image id again in this case. We
4715 * do still need to set the image format though.
4716 */
4717 if (rbd_dev->spec->image_id) {
4718 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4719
4720 return 0;
4721 }
4722
4723 /*
4724 * First, see if the format 2 image id file exists, and if
4725 * so, get the image's persistent id from it.
4726 */
4727 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4728 object_name = kmalloc(size, GFP_NOIO);
4729 if (!object_name)
4730 return -ENOMEM;
4731 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4732 dout("rbd id object name is %s\n", object_name);
4733
4734 /* Response will be an encoded string, which includes a length */
4735
4736 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4737 response = kzalloc(size, GFP_NOIO);
4738 if (!response) {
4739 ret = -ENOMEM;
4740 goto out;
4741 }
4742
4743 /* If it doesn't exist we'll assume it's a format 1 image */
4744
4745 ret = rbd_obj_method_sync(rbd_dev, object_name,
4746 "rbd", "get_id", NULL, 0,
4747 response, RBD_IMAGE_ID_LEN_MAX);
4748 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4749 if (ret == -ENOENT) {
4750 image_id = kstrdup("", GFP_KERNEL);
4751 ret = image_id ? 0 : -ENOMEM;
4752 if (!ret)
4753 rbd_dev->image_format = 1;
4754 } else if (ret > sizeof (__le32)) {
4755 void *p = response;
4756
4757 image_id = ceph_extract_encoded_string(&p, p + ret,
4758 NULL, GFP_NOIO);
4759 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4760 if (!ret)
4761 rbd_dev->image_format = 2;
4762 } else {
4763 ret = -EINVAL;
4764 }
4765
4766 if (!ret) {
4767 rbd_dev->spec->image_id = image_id;
4768 dout("image_id is %s\n", image_id);
4769 }
4770 out:
4771 kfree(response);
4772 kfree(object_name);
4773
4774 return ret;
4775 }
4776
4777 /*
4778 * Undo whatever state changes are made by v1 or v2 header info
4779 * call.
4780 */
4781 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4782 {
4783 struct rbd_image_header *header;
4784
4785 /* Drop parent reference unless it's already been done (or none) */
4786
4787 if (rbd_dev->parent_overlap)
4788 rbd_dev_parent_put(rbd_dev);
4789
4790 /* Free dynamic fields from the header, then zero it out */
4791
4792 header = &rbd_dev->header;
4793 ceph_put_snap_context(header->snapc);
4794 kfree(header->snap_sizes);
4795 kfree(header->snap_names);
4796 kfree(header->object_prefix);
4797 memset(header, 0, sizeof (*header));
4798 }
4799
4800 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4801 {
4802 int ret;
4803
4804 ret = rbd_dev_v2_object_prefix(rbd_dev);
4805 if (ret)
4806 goto out_err;
4807
4808 /*
4809 * Get the and check features for the image. Currently the
4810 * features are assumed to never change.
4811 */
4812 ret = rbd_dev_v2_features(rbd_dev);
4813 if (ret)
4814 goto out_err;
4815
4816 /* If the image supports fancy striping, get its parameters */
4817
4818 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4819 ret = rbd_dev_v2_striping_info(rbd_dev);
4820 if (ret < 0)
4821 goto out_err;
4822 }
4823 /* No support for crypto and compression type format 2 images */
4824
4825 return 0;
4826 out_err:
4827 rbd_dev->header.features = 0;
4828 kfree(rbd_dev->header.object_prefix);
4829 rbd_dev->header.object_prefix = NULL;
4830
4831 return ret;
4832 }
4833
4834 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4835 {
4836 struct rbd_device *parent = NULL;
4837 struct rbd_spec *parent_spec;
4838 struct rbd_client *rbdc;
4839 int ret;
4840
4841 if (!rbd_dev->parent_spec)
4842 return 0;
4843 /*
4844 * We need to pass a reference to the client and the parent
4845 * spec when creating the parent rbd_dev. Images related by
4846 * parent/child relationships always share both.
4847 */
4848 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4849 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4850
4851 ret = -ENOMEM;
4852 parent = rbd_dev_create(rbdc, parent_spec);
4853 if (!parent)
4854 goto out_err;
4855
4856 ret = rbd_dev_image_probe(parent, false);
4857 if (ret < 0)
4858 goto out_err;
4859 rbd_dev->parent = parent;
4860 atomic_set(&rbd_dev->parent_ref, 1);
4861
4862 return 0;
4863 out_err:
4864 if (parent) {
4865 rbd_dev_unparent(rbd_dev);
4866 rbd_dev_destroy(parent);
4867 } else {
4868 rbd_put_client(rbdc);
4869 rbd_spec_put(parent_spec);
4870 }
4871
4872 return ret;
4873 }
4874
4875 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4876 {
4877 int ret;
4878
4879 /* generate unique id: find highest unique id, add one */
4880 rbd_dev_id_get(rbd_dev);
4881
4882 /* Fill in the device name, now that we have its id. */
4883 BUILD_BUG_ON(DEV_NAME_LEN
4884 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4885 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4886
4887 /* Get our block major device number. */
4888
4889 ret = register_blkdev(0, rbd_dev->name);
4890 if (ret < 0)
4891 goto err_out_id;
4892 rbd_dev->major = ret;
4893
4894 /* Set up the blkdev mapping. */
4895
4896 ret = rbd_init_disk(rbd_dev);
4897 if (ret)
4898 goto err_out_blkdev;
4899
4900 ret = rbd_dev_mapping_set(rbd_dev);
4901 if (ret)
4902 goto err_out_disk;
4903 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4904
4905 ret = rbd_bus_add_dev(rbd_dev);
4906 if (ret)
4907 goto err_out_mapping;
4908
4909 /* Everything's ready. Announce the disk to the world. */
4910
4911 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4912 add_disk(rbd_dev->disk);
4913
4914 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4915 (unsigned long long) rbd_dev->mapping.size);
4916
4917 return ret;
4918
4919 err_out_mapping:
4920 rbd_dev_mapping_clear(rbd_dev);
4921 err_out_disk:
4922 rbd_free_disk(rbd_dev);
4923 err_out_blkdev:
4924 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4925 err_out_id:
4926 rbd_dev_id_put(rbd_dev);
4927 rbd_dev_mapping_clear(rbd_dev);
4928
4929 return ret;
4930 }
4931
4932 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4933 {
4934 struct rbd_spec *spec = rbd_dev->spec;
4935 size_t size;
4936
4937 /* Record the header object name for this rbd image. */
4938
4939 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4940
4941 if (rbd_dev->image_format == 1)
4942 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4943 else
4944 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4945
4946 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4947 if (!rbd_dev->header_name)
4948 return -ENOMEM;
4949
4950 if (rbd_dev->image_format == 1)
4951 sprintf(rbd_dev->header_name, "%s%s",
4952 spec->image_name, RBD_SUFFIX);
4953 else
4954 sprintf(rbd_dev->header_name, "%s%s",
4955 RBD_HEADER_PREFIX, spec->image_id);
4956 return 0;
4957 }
4958
4959 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4960 {
4961 rbd_dev_unprobe(rbd_dev);
4962 kfree(rbd_dev->header_name);
4963 rbd_dev->header_name = NULL;
4964 rbd_dev->image_format = 0;
4965 kfree(rbd_dev->spec->image_id);
4966 rbd_dev->spec->image_id = NULL;
4967
4968 rbd_dev_destroy(rbd_dev);
4969 }
4970
4971 /*
4972 * Probe for the existence of the header object for the given rbd
4973 * device. If this image is the one being mapped (i.e., not a
4974 * parent), initiate a watch on its header object before using that
4975 * object to get detailed information about the rbd image.
4976 */
4977 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4978 {
4979 int ret;
4980 int tmp;
4981
4982 /*
4983 * Get the id from the image id object. Unless there's an
4984 * error, rbd_dev->spec->image_id will be filled in with
4985 * a dynamically-allocated string, and rbd_dev->image_format
4986 * will be set to either 1 or 2.
4987 */
4988 ret = rbd_dev_image_id(rbd_dev);
4989 if (ret)
4990 return ret;
4991 rbd_assert(rbd_dev->spec->image_id);
4992 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4993
4994 ret = rbd_dev_header_name(rbd_dev);
4995 if (ret)
4996 goto err_out_format;
4997
4998 if (mapping) {
4999 ret = rbd_dev_header_watch_sync(rbd_dev, true);
5000 if (ret)
5001 goto out_header_name;
5002 }
5003
5004 if (rbd_dev->image_format == 1)
5005 ret = rbd_dev_v1_header_info(rbd_dev);
5006 else
5007 ret = rbd_dev_v2_header_info(rbd_dev);
5008 if (ret)
5009 goto err_out_watch;
5010
5011 ret = rbd_dev_spec_update(rbd_dev);
5012 if (ret)
5013 goto err_out_probe;
5014
5015 ret = rbd_dev_probe_parent(rbd_dev);
5016 if (ret)
5017 goto err_out_probe;
5018
5019 dout("discovered format %u image, header name is %s\n",
5020 rbd_dev->image_format, rbd_dev->header_name);
5021
5022 return 0;
5023 err_out_probe:
5024 rbd_dev_unprobe(rbd_dev);
5025 err_out_watch:
5026 if (mapping) {
5027 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
5028 if (tmp)
5029 rbd_warn(rbd_dev, "unable to tear down "
5030 "watch request (%d)\n", tmp);
5031 }
5032 out_header_name:
5033 kfree(rbd_dev->header_name);
5034 rbd_dev->header_name = NULL;
5035 err_out_format:
5036 rbd_dev->image_format = 0;
5037 kfree(rbd_dev->spec->image_id);
5038 rbd_dev->spec->image_id = NULL;
5039
5040 dout("probe failed, returning %d\n", ret);
5041
5042 return ret;
5043 }
5044
5045 static ssize_t rbd_add(struct bus_type *bus,
5046 const char *buf,
5047 size_t count)
5048 {
5049 struct rbd_device *rbd_dev = NULL;
5050 struct ceph_options *ceph_opts = NULL;
5051 struct rbd_options *rbd_opts = NULL;
5052 struct rbd_spec *spec = NULL;
5053 struct rbd_client *rbdc;
5054 struct ceph_osd_client *osdc;
5055 bool read_only;
5056 int rc = -ENOMEM;
5057
5058 if (!try_module_get(THIS_MODULE))
5059 return -ENODEV;
5060
5061 /* parse add command */
5062 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5063 if (rc < 0)
5064 goto err_out_module;
5065 read_only = rbd_opts->read_only;
5066 kfree(rbd_opts);
5067 rbd_opts = NULL; /* done with this */
5068
5069 rbdc = rbd_get_client(ceph_opts);
5070 if (IS_ERR(rbdc)) {
5071 rc = PTR_ERR(rbdc);
5072 goto err_out_args;
5073 }
5074
5075 /* pick the pool */
5076 osdc = &rbdc->client->osdc;
5077 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5078 if (rc < 0)
5079 goto err_out_client;
5080 spec->pool_id = (u64)rc;
5081
5082 /* The ceph file layout needs to fit pool id in 32 bits */
5083
5084 if (spec->pool_id > (u64)U32_MAX) {
5085 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5086 (unsigned long long)spec->pool_id, U32_MAX);
5087 rc = -EIO;
5088 goto err_out_client;
5089 }
5090
5091 rbd_dev = rbd_dev_create(rbdc, spec);
5092 if (!rbd_dev)
5093 goto err_out_client;
5094 rbdc = NULL; /* rbd_dev now owns this */
5095 spec = NULL; /* rbd_dev now owns this */
5096
5097 rc = rbd_dev_image_probe(rbd_dev, true);
5098 if (rc < 0)
5099 goto err_out_rbd_dev;
5100
5101 /* If we are mapping a snapshot it must be marked read-only */
5102
5103 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5104 read_only = true;
5105 rbd_dev->mapping.read_only = read_only;
5106
5107 rc = rbd_dev_device_setup(rbd_dev);
5108 if (rc) {
5109 rbd_dev_image_release(rbd_dev);
5110 goto err_out_module;
5111 }
5112
5113 return count;
5114
5115 err_out_rbd_dev:
5116 rbd_dev_destroy(rbd_dev);
5117 err_out_client:
5118 rbd_put_client(rbdc);
5119 err_out_args:
5120 rbd_spec_put(spec);
5121 err_out_module:
5122 module_put(THIS_MODULE);
5123
5124 dout("Error adding device %s\n", buf);
5125
5126 return (ssize_t)rc;
5127 }
5128
5129 static void rbd_dev_device_release(struct device *dev)
5130 {
5131 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5132
5133 rbd_free_disk(rbd_dev);
5134 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5135 rbd_dev_mapping_clear(rbd_dev);
5136 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5137 rbd_dev->major = 0;
5138 rbd_dev_id_put(rbd_dev);
5139 rbd_dev_mapping_clear(rbd_dev);
5140 }
5141
5142 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5143 {
5144 while (rbd_dev->parent) {
5145 struct rbd_device *first = rbd_dev;
5146 struct rbd_device *second = first->parent;
5147 struct rbd_device *third;
5148
5149 /*
5150 * Follow to the parent with no grandparent and
5151 * remove it.
5152 */
5153 while (second && (third = second->parent)) {
5154 first = second;
5155 second = third;
5156 }
5157 rbd_assert(second);
5158 rbd_dev_image_release(second);
5159 first->parent = NULL;
5160 first->parent_overlap = 0;
5161
5162 rbd_assert(first->parent_spec);
5163 rbd_spec_put(first->parent_spec);
5164 first->parent_spec = NULL;
5165 }
5166 }
5167
5168 static ssize_t rbd_remove(struct bus_type *bus,
5169 const char *buf,
5170 size_t count)
5171 {
5172 struct rbd_device *rbd_dev = NULL;
5173 struct list_head *tmp;
5174 int dev_id;
5175 unsigned long ul;
5176 bool already = false;
5177 int ret;
5178
5179 ret = strict_strtoul(buf, 10, &ul);
5180 if (ret)
5181 return ret;
5182
5183 /* convert to int; abort if we lost anything in the conversion */
5184 dev_id = (int)ul;
5185 if (dev_id != ul)
5186 return -EINVAL;
5187
5188 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5189
5190 ret = -ENOENT;
5191 spin_lock(&rbd_dev_list_lock);
5192 list_for_each(tmp, &rbd_dev_list) {
5193 rbd_dev = list_entry(tmp, struct rbd_device, node);
5194 if (rbd_dev->dev_id == dev_id) {
5195 ret = 0;
5196 break;
5197 }
5198 }
5199 if (!ret) {
5200 spin_lock_irq(&rbd_dev->lock);
5201 if (rbd_dev->open_count)
5202 ret = -EBUSY;
5203 else
5204 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5205 &rbd_dev->flags);
5206 spin_unlock_irq(&rbd_dev->lock);
5207 }
5208 spin_unlock(&rbd_dev_list_lock);
5209 if (ret < 0 || already)
5210 goto done;
5211
5212 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5213 if (ret)
5214 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5215
5216 /*
5217 * flush remaining watch callbacks - these must be complete
5218 * before the osd_client is shutdown
5219 */
5220 dout("%s: flushing notifies", __func__);
5221 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5222 /*
5223 * Don't free anything from rbd_dev->disk until after all
5224 * notifies are completely processed. Otherwise
5225 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5226 * in a potential use after free of rbd_dev->disk or rbd_dev.
5227 */
5228 rbd_bus_del_dev(rbd_dev);
5229 rbd_dev_image_release(rbd_dev);
5230 module_put(THIS_MODULE);
5231 ret = count;
5232 done:
5233 mutex_unlock(&ctl_mutex);
5234
5235 return ret;
5236 }
5237
5238 /*
5239 * create control files in sysfs
5240 * /sys/bus/rbd/...
5241 */
5242 static int rbd_sysfs_init(void)
5243 {
5244 int ret;
5245
5246 ret = device_register(&rbd_root_dev);
5247 if (ret < 0)
5248 return ret;
5249
5250 ret = bus_register(&rbd_bus_type);
5251 if (ret < 0)
5252 device_unregister(&rbd_root_dev);
5253
5254 return ret;
5255 }
5256
5257 static void rbd_sysfs_cleanup(void)
5258 {
5259 bus_unregister(&rbd_bus_type);
5260 device_unregister(&rbd_root_dev);
5261 }
5262
5263 static int rbd_slab_init(void)
5264 {
5265 rbd_assert(!rbd_img_request_cache);
5266 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5267 sizeof (struct rbd_img_request),
5268 __alignof__(struct rbd_img_request),
5269 0, NULL);
5270 if (!rbd_img_request_cache)
5271 return -ENOMEM;
5272
5273 rbd_assert(!rbd_obj_request_cache);
5274 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5275 sizeof (struct rbd_obj_request),
5276 __alignof__(struct rbd_obj_request),
5277 0, NULL);
5278 if (!rbd_obj_request_cache)
5279 goto out_err;
5280
5281 rbd_assert(!rbd_segment_name_cache);
5282 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5283 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5284 if (rbd_segment_name_cache)
5285 return 0;
5286 out_err:
5287 if (rbd_obj_request_cache) {
5288 kmem_cache_destroy(rbd_obj_request_cache);
5289 rbd_obj_request_cache = NULL;
5290 }
5291
5292 kmem_cache_destroy(rbd_img_request_cache);
5293 rbd_img_request_cache = NULL;
5294
5295 return -ENOMEM;
5296 }
5297
5298 static void rbd_slab_exit(void)
5299 {
5300 rbd_assert(rbd_segment_name_cache);
5301 kmem_cache_destroy(rbd_segment_name_cache);
5302 rbd_segment_name_cache = NULL;
5303
5304 rbd_assert(rbd_obj_request_cache);
5305 kmem_cache_destroy(rbd_obj_request_cache);
5306 rbd_obj_request_cache = NULL;
5307
5308 rbd_assert(rbd_img_request_cache);
5309 kmem_cache_destroy(rbd_img_request_cache);
5310 rbd_img_request_cache = NULL;
5311 }
5312
5313 static int __init rbd_init(void)
5314 {
5315 int rc;
5316
5317 if (!libceph_compatible(NULL)) {
5318 rbd_warn(NULL, "libceph incompatibility (quitting)");
5319
5320 return -EINVAL;
5321 }
5322 rc = rbd_slab_init();
5323 if (rc)
5324 return rc;
5325 rc = rbd_sysfs_init();
5326 if (rc)
5327 rbd_slab_exit();
5328 else
5329 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5330
5331 return rc;
5332 }
5333
5334 static void __exit rbd_exit(void)
5335 {
5336 rbd_sysfs_cleanup();
5337 rbd_slab_exit();
5338 }
5339
5340 module_init(rbd_init);
5341 module_exit(rbd_exit);
5342
5343 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5344 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5345 MODULE_DESCRIPTION("rados block device");
5346
5347 /* following authorship retained from original osdblk.c */
5348 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5349
5350 MODULE_LICENSE("GPL");