2 #include "ceph_debug.h"
4 #include <linux/slab.h>
9 #include "crush/hash.h"
10 #include "crush/mapper.h"
13 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
22 if (state
& CEPH_OSD_EXISTS
) {
23 snprintf(str
, len
, "exists");
26 if (state
& CEPH_OSD_UP
) {
27 snprintf(str
, len
, "%s%s%s", str
, (flag
? ", " : ""),
32 snprintf(str
, len
, "doesn't exist");
40 static int calc_bits_of(unsigned t
)
51 * the foo_mask is the smallest value 2^n-1 that is >= foo.
53 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
55 pi
->pg_num_mask
= (1 << calc_bits_of(le32_to_cpu(pi
->v
.pg_num
)-1)) - 1;
57 (1 << calc_bits_of(le32_to_cpu(pi
->v
.pgp_num
)-1)) - 1;
59 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpg_num
)-1)) - 1;
61 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpgp_num
)-1)) - 1;
67 static int crush_decode_uniform_bucket(void **p
, void *end
,
68 struct crush_bucket_uniform
*b
)
70 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
71 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
72 b
->item_weight
= ceph_decode_32(p
);
78 static int crush_decode_list_bucket(void **p
, void *end
,
79 struct crush_bucket_list
*b
)
82 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
83 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
84 if (b
->item_weights
== NULL
)
86 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
87 if (b
->sum_weights
== NULL
)
89 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
90 for (j
= 0; j
< b
->h
.size
; j
++) {
91 b
->item_weights
[j
] = ceph_decode_32(p
);
92 b
->sum_weights
[j
] = ceph_decode_32(p
);
99 static int crush_decode_tree_bucket(void **p
, void *end
,
100 struct crush_bucket_tree
*b
)
103 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
104 ceph_decode_32_safe(p
, end
, b
->num_nodes
, bad
);
105 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
106 if (b
->node_weights
== NULL
)
108 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
109 for (j
= 0; j
< b
->num_nodes
; j
++)
110 b
->node_weights
[j
] = ceph_decode_32(p
);
116 static int crush_decode_straw_bucket(void **p
, void *end
,
117 struct crush_bucket_straw
*b
)
120 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
121 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
122 if (b
->item_weights
== NULL
)
124 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
125 if (b
->straws
== NULL
)
127 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
128 for (j
= 0; j
< b
->h
.size
; j
++) {
129 b
->item_weights
[j
] = ceph_decode_32(p
);
130 b
->straws
[j
] = ceph_decode_32(p
);
137 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
143 void *start
= pbyval
;
146 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
148 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
150 return ERR_PTR(-ENOMEM
);
152 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
153 magic
= ceph_decode_32(p
);
154 if (magic
!= CRUSH_MAGIC
) {
155 pr_err("crush_decode magic %x != current %x\n",
156 (unsigned)magic
, (unsigned)CRUSH_MAGIC
);
159 c
->max_buckets
= ceph_decode_32(p
);
160 c
->max_rules
= ceph_decode_32(p
);
161 c
->max_devices
= ceph_decode_32(p
);
163 c
->device_parents
= kcalloc(c
->max_devices
, sizeof(u32
), GFP_NOFS
);
164 if (c
->device_parents
== NULL
)
166 c
->bucket_parents
= kcalloc(c
->max_buckets
, sizeof(u32
), GFP_NOFS
);
167 if (c
->bucket_parents
== NULL
)
170 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
171 if (c
->buckets
== NULL
)
173 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
174 if (c
->rules
== NULL
)
178 for (i
= 0; i
< c
->max_buckets
; i
++) {
181 struct crush_bucket
*b
;
183 ceph_decode_32_safe(p
, end
, alg
, bad
);
185 c
->buckets
[i
] = NULL
;
188 dout("crush_decode bucket %d off %x %p to %p\n",
189 i
, (int)(*p
-start
), *p
, end
);
192 case CRUSH_BUCKET_UNIFORM
:
193 size
= sizeof(struct crush_bucket_uniform
);
195 case CRUSH_BUCKET_LIST
:
196 size
= sizeof(struct crush_bucket_list
);
198 case CRUSH_BUCKET_TREE
:
199 size
= sizeof(struct crush_bucket_tree
);
201 case CRUSH_BUCKET_STRAW
:
202 size
= sizeof(struct crush_bucket_straw
);
209 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
213 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
214 b
->id
= ceph_decode_32(p
);
215 b
->type
= ceph_decode_16(p
);
216 b
->alg
= ceph_decode_8(p
);
217 b
->hash
= ceph_decode_8(p
);
218 b
->weight
= ceph_decode_32(p
);
219 b
->size
= ceph_decode_32(p
);
221 dout("crush_decode bucket size %d off %x %p to %p\n",
222 b
->size
, (int)(*p
-start
), *p
, end
);
224 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
225 if (b
->items
== NULL
)
227 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
232 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
233 for (j
= 0; j
< b
->size
; j
++)
234 b
->items
[j
] = ceph_decode_32(p
);
237 case CRUSH_BUCKET_UNIFORM
:
238 err
= crush_decode_uniform_bucket(p
, end
,
239 (struct crush_bucket_uniform
*)b
);
243 case CRUSH_BUCKET_LIST
:
244 err
= crush_decode_list_bucket(p
, end
,
245 (struct crush_bucket_list
*)b
);
249 case CRUSH_BUCKET_TREE
:
250 err
= crush_decode_tree_bucket(p
, end
,
251 (struct crush_bucket_tree
*)b
);
255 case CRUSH_BUCKET_STRAW
:
256 err
= crush_decode_straw_bucket(p
, end
,
257 (struct crush_bucket_straw
*)b
);
265 dout("rule vec is %p\n", c
->rules
);
266 for (i
= 0; i
< c
->max_rules
; i
++) {
268 struct crush_rule
*r
;
270 ceph_decode_32_safe(p
, end
, yes
, bad
);
272 dout("crush_decode NO rule %d off %x %p to %p\n",
273 i
, (int)(*p
-start
), *p
, end
);
278 dout("crush_decode rule %d off %x %p to %p\n",
279 i
, (int)(*p
-start
), *p
, end
);
282 ceph_decode_32_safe(p
, end
, yes
, bad
);
283 #if BITS_PER_LONG == 32
285 if (yes
> ULONG_MAX
/ sizeof(struct crush_rule_step
))
288 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
289 yes
*sizeof(struct crush_rule_step
),
293 dout(" rule %d is at %p\n", i
, r
);
295 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
296 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
297 for (j
= 0; j
< r
->len
; j
++) {
298 r
->steps
[j
].op
= ceph_decode_32(p
);
299 r
->steps
[j
].arg1
= ceph_decode_32(p
);
300 r
->steps
[j
].arg2
= ceph_decode_32(p
);
304 /* ignore trailing name maps. */
306 dout("crush_decode success\n");
312 dout("crush_decode fail %d\n", err
);
318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
321 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
333 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
334 struct rb_root
*root
)
336 struct rb_node
**p
= &root
->rb_node
;
337 struct rb_node
*parent
= NULL
;
338 struct ceph_pg_mapping
*pg
= NULL
;
343 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
344 c
= pgid_cmp(new->pgid
, pg
->pgid
);
353 rb_link_node(&new->node
, parent
, p
);
354 rb_insert_color(&new->node
, root
);
358 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
361 struct rb_node
*n
= root
->rb_node
;
362 struct ceph_pg_mapping
*pg
;
366 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
367 c
= pgid_cmp(pgid
, pg
->pgid
);
379 * rbtree of pg pool info
381 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
383 struct rb_node
**p
= &root
->rb_node
;
384 struct rb_node
*parent
= NULL
;
385 struct ceph_pg_pool_info
*pi
= NULL
;
389 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
390 if (new->id
< pi
->id
)
392 else if (new->id
> pi
->id
)
398 rb_link_node(&new->node
, parent
, p
);
399 rb_insert_color(&new->node
, root
);
403 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, int id
)
405 struct ceph_pg_pool_info
*pi
;
406 struct rb_node
*n
= root
->rb_node
;
409 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
412 else if (id
> pi
->id
)
420 int ceph_pg_poolid_by_name(struct ceph_osdmap
*map
, const char *name
)
424 for (rbp
= rb_first(&map
->pg_pools
); rbp
; rbp
= rb_next(rbp
)) {
425 struct ceph_pg_pool_info
*pi
=
426 rb_entry(rbp
, struct ceph_pg_pool_info
, node
);
427 if (pi
->name
&& strcmp(pi
->name
, name
) == 0)
433 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
435 rb_erase(&pi
->node
, root
);
440 static int __decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
444 ceph_decode_copy(p
, &pi
->v
, sizeof(pi
->v
));
447 /* num_snaps * snap_info_t */
448 n
= le32_to_cpu(pi
->v
.num_snaps
);
450 ceph_decode_need(p
, end
, sizeof(u64
) + 1 + sizeof(u64
) +
451 sizeof(struct ceph_timespec
), bad
);
452 *p
+= sizeof(u64
) + /* key */
453 1 + sizeof(u64
) + /* u8, snapid */
454 sizeof(struct ceph_timespec
);
455 m
= ceph_decode_32(p
); /* snap name */
459 *p
+= le32_to_cpu(pi
->v
.num_removed_snap_intervals
) * sizeof(u64
) * 2;
466 static int __decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
468 struct ceph_pg_pool_info
*pi
;
471 ceph_decode_32_safe(p
, end
, num
, bad
);
472 dout(" %d pool names\n", num
);
474 ceph_decode_32_safe(p
, end
, pool
, bad
);
475 ceph_decode_32_safe(p
, end
, len
, bad
);
476 dout(" pool %d len %d\n", pool
, len
);
477 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
480 pi
->name
= kmalloc(len
+ 1, GFP_NOFS
);
482 memcpy(pi
->name
, *p
, len
);
483 pi
->name
[len
] = '\0';
484 dout(" name is %s\n", pi
->name
);
498 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
500 dout("osdmap_destroy %p\n", map
);
502 crush_destroy(map
->crush
);
503 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
504 struct ceph_pg_mapping
*pg
=
505 rb_entry(rb_first(&map
->pg_temp
),
506 struct ceph_pg_mapping
, node
);
507 rb_erase(&pg
->node
, &map
->pg_temp
);
510 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
511 struct ceph_pg_pool_info
*pi
=
512 rb_entry(rb_first(&map
->pg_pools
),
513 struct ceph_pg_pool_info
, node
);
514 __remove_pg_pool(&map
->pg_pools
, pi
);
516 kfree(map
->osd_state
);
517 kfree(map
->osd_weight
);
518 kfree(map
->osd_addr
);
523 * adjust max osd value. reallocate arrays.
525 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
528 struct ceph_entity_addr
*addr
;
531 state
= kcalloc(max
, sizeof(*state
), GFP_NOFS
);
532 addr
= kcalloc(max
, sizeof(*addr
), GFP_NOFS
);
533 weight
= kcalloc(max
, sizeof(*weight
), GFP_NOFS
);
534 if (state
== NULL
|| addr
== NULL
|| weight
== NULL
) {
542 if (map
->osd_state
) {
543 memcpy(state
, map
->osd_state
, map
->max_osd
*sizeof(*state
));
544 memcpy(addr
, map
->osd_addr
, map
->max_osd
*sizeof(*addr
));
545 memcpy(weight
, map
->osd_weight
, map
->max_osd
*sizeof(*weight
));
546 kfree(map
->osd_state
);
547 kfree(map
->osd_addr
);
548 kfree(map
->osd_weight
);
551 map
->osd_state
= state
;
552 map
->osd_weight
= weight
;
553 map
->osd_addr
= addr
;
561 struct ceph_osdmap
*osdmap_decode(void **p
, void *end
)
563 struct ceph_osdmap
*map
;
569 struct ceph_pg_pool_info
*pi
;
571 dout("osdmap_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
573 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
575 return ERR_PTR(-ENOMEM
);
576 map
->pg_temp
= RB_ROOT
;
578 ceph_decode_16_safe(p
, end
, version
, bad
);
579 if (version
> CEPH_OSDMAP_VERSION
) {
580 pr_warning("got unknown v %d > %d of osdmap\n", version
,
581 CEPH_OSDMAP_VERSION
);
585 ceph_decode_need(p
, end
, 2*sizeof(u64
)+6*sizeof(u32
), bad
);
586 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
587 map
->epoch
= ceph_decode_32(p
);
588 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
589 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
591 ceph_decode_32_safe(p
, end
, max
, bad
);
593 ceph_decode_need(p
, end
, 4 + 1 + sizeof(pi
->v
), bad
);
594 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
597 pi
->id
= ceph_decode_32(p
);
598 ev
= ceph_decode_8(p
); /* encoding version */
599 if (ev
> CEPH_PG_POOL_VERSION
) {
600 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
601 ev
, CEPH_PG_POOL_VERSION
);
605 err
= __decode_pool(p
, end
, pi
);
608 __insert_pg_pool(&map
->pg_pools
, pi
);
611 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
614 ceph_decode_32_safe(p
, end
, map
->pool_max
, bad
);
616 ceph_decode_32_safe(p
, end
, map
->flags
, bad
);
618 max
= ceph_decode_32(p
);
620 /* (re)alloc osd arrays */
621 err
= osdmap_set_max_osd(map
, max
);
624 dout("osdmap_decode max_osd = %d\n", map
->max_osd
);
628 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
629 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
630 sizeof(*map
->osd_addr
)), bad
);
631 *p
+= 4; /* skip length field (should match max) */
632 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
634 *p
+= 4; /* skip length field (should match max) */
635 for (i
= 0; i
< map
->max_osd
; i
++)
636 map
->osd_weight
[i
] = ceph_decode_32(p
);
638 *p
+= 4; /* skip length field (should match max) */
639 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
640 for (i
= 0; i
< map
->max_osd
; i
++)
641 ceph_decode_addr(&map
->osd_addr
[i
]);
644 ceph_decode_32_safe(p
, end
, len
, bad
);
645 for (i
= 0; i
< len
; i
++) {
648 struct ceph_pg_mapping
*pg
;
650 ceph_decode_need(p
, end
, sizeof(u32
) + sizeof(u64
), bad
);
651 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
652 n
= ceph_decode_32(p
);
653 ceph_decode_need(p
, end
, n
* sizeof(u32
), bad
);
655 pg
= kmalloc(sizeof(*pg
) + n
*sizeof(u32
), GFP_NOFS
);
660 for (j
= 0; j
< n
; j
++)
661 pg
->osds
[j
] = ceph_decode_32(p
);
663 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
666 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
, len
);
670 ceph_decode_32_safe(p
, end
, len
, bad
);
671 dout("osdmap_decode crush len %d from off 0x%x\n", len
,
673 ceph_decode_need(p
, end
, len
, bad
);
674 map
->crush
= crush_decode(*p
, end
);
676 if (IS_ERR(map
->crush
)) {
677 err
= PTR_ERR(map
->crush
);
682 /* ignore the rest of the map */
685 dout("osdmap_decode done %p %p\n", *p
, end
);
689 dout("osdmap_decode fail\n");
690 ceph_osdmap_destroy(map
);
695 * decode and apply an incremental map update.
697 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
698 struct ceph_osdmap
*map
,
699 struct ceph_messenger
*msgr
)
701 struct crush_map
*newcrush
= NULL
;
702 struct ceph_fsid fsid
;
704 struct ceph_timespec modified
;
706 __s32 new_pool_max
, new_flags
, max
;
712 ceph_decode_16_safe(p
, end
, version
, bad
);
713 if (version
> CEPH_OSDMAP_INC_VERSION
) {
714 pr_warning("got unknown v %d > %d of inc osdmap\n", version
,
715 CEPH_OSDMAP_INC_VERSION
);
719 ceph_decode_need(p
, end
, sizeof(fsid
)+sizeof(modified
)+2*sizeof(u32
),
721 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
722 epoch
= ceph_decode_32(p
);
723 BUG_ON(epoch
!= map
->epoch
+1);
724 ceph_decode_copy(p
, &modified
, sizeof(modified
));
725 new_pool_max
= ceph_decode_32(p
);
726 new_flags
= ceph_decode_32(p
);
729 ceph_decode_32_safe(p
, end
, len
, bad
);
731 dout("apply_incremental full map len %d, %p to %p\n",
733 return osdmap_decode(p
, min(*p
+len
, end
));
737 ceph_decode_32_safe(p
, end
, len
, bad
);
739 dout("apply_incremental new crush map len %d, %p to %p\n",
741 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
742 if (IS_ERR(newcrush
))
743 return ERR_CAST(newcrush
);
749 map
->flags
= new_flags
;
750 if (new_pool_max
>= 0)
751 map
->pool_max
= new_pool_max
;
753 ceph_decode_need(p
, end
, 5*sizeof(u32
), bad
);
756 max
= ceph_decode_32(p
);
758 err
= osdmap_set_max_osd(map
, max
);
764 map
->modified
= map
->modified
;
767 crush_destroy(map
->crush
);
768 map
->crush
= newcrush
;
773 ceph_decode_32_safe(p
, end
, len
, bad
);
776 struct ceph_pg_pool_info
*pi
;
778 ceph_decode_32_safe(p
, end
, pool
, bad
);
779 ceph_decode_need(p
, end
, 1 + sizeof(pi
->v
), bad
);
780 ev
= ceph_decode_8(p
); /* encoding version */
781 if (ev
> CEPH_PG_POOL_VERSION
) {
782 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
783 ev
, CEPH_PG_POOL_VERSION
);
786 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
788 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
794 __insert_pg_pool(&map
->pg_pools
, pi
);
796 err
= __decode_pool(p
, end
, pi
);
800 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
804 ceph_decode_32_safe(p
, end
, len
, bad
);
806 struct ceph_pg_pool_info
*pi
;
808 ceph_decode_32_safe(p
, end
, pool
, bad
);
809 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
811 __remove_pg_pool(&map
->pg_pools
, pi
);
816 ceph_decode_32_safe(p
, end
, len
, bad
);
819 struct ceph_entity_addr addr
;
820 ceph_decode_32_safe(p
, end
, osd
, bad
);
821 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), bad
);
822 ceph_decode_addr(&addr
);
823 pr_info("osd%d up\n", osd
);
824 BUG_ON(osd
>= map
->max_osd
);
825 map
->osd_state
[osd
] |= CEPH_OSD_UP
;
826 map
->osd_addr
[osd
] = addr
;
830 ceph_decode_32_safe(p
, end
, len
, bad
);
833 ceph_decode_32_safe(p
, end
, osd
, bad
);
834 (*p
)++; /* clean flag */
835 pr_info("osd%d down\n", osd
);
836 if (osd
< map
->max_osd
)
837 map
->osd_state
[osd
] &= ~CEPH_OSD_UP
;
841 ceph_decode_32_safe(p
, end
, len
, bad
);
844 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
845 osd
= ceph_decode_32(p
);
846 off
= ceph_decode_32(p
);
847 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
848 off
== CEPH_OSD_IN
? "(in)" :
849 (off
== CEPH_OSD_OUT
? "(out)" : ""));
850 if (osd
< map
->max_osd
)
851 map
->osd_weight
[osd
] = off
;
855 rbp
= rb_first(&map
->pg_temp
);
856 ceph_decode_32_safe(p
, end
, len
, bad
);
858 struct ceph_pg_mapping
*pg
;
862 ceph_decode_need(p
, end
, sizeof(u64
) + sizeof(u32
), bad
);
863 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
864 pglen
= ceph_decode_32(p
);
867 while (rbp
&& pgid_cmp(rb_entry(rbp
, struct ceph_pg_mapping
,
868 node
)->pgid
, pgid
) <= 0) {
869 struct ceph_pg_mapping
*cur
=
870 rb_entry(rbp
, struct ceph_pg_mapping
, node
);
873 dout(" removed pg_temp %llx\n", *(u64
*)&cur
->pgid
);
874 rb_erase(&cur
->node
, &map
->pg_temp
);
880 ceph_decode_need(p
, end
, pglen
*sizeof(u32
), bad
);
881 pg
= kmalloc(sizeof(*pg
) + sizeof(u32
)*pglen
, GFP_NOFS
);
888 for (j
= 0; j
< pglen
; j
++)
889 pg
->osds
[j
] = ceph_decode_32(p
);
890 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
895 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
,
900 struct ceph_pg_mapping
*cur
=
901 rb_entry(rbp
, struct ceph_pg_mapping
, node
);
904 dout(" removed pg_temp %llx\n", *(u64
*)&cur
->pgid
);
905 rb_erase(&cur
->node
, &map
->pg_temp
);
909 /* ignore the rest */
914 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
915 epoch
, (int)(*p
- start
), *p
, start
, end
);
916 print_hex_dump(KERN_DEBUG
, "osdmap: ",
917 DUMP_PREFIX_OFFSET
, 16, 1,
918 start
, end
- start
, true);
920 crush_destroy(newcrush
);
928 * calculate file layout from given offset, length.
929 * fill in correct oid, logical length, and object extent
932 * for now, we write only a single su, until we can
933 * pass a stride back to the caller.
935 void ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
938 u64
*oxoff
, u64
*oxlen
)
940 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
941 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
942 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
943 u32 bl
, stripeno
, stripepos
, objsetno
;
947 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, *plen
,
949 su_per_object
= osize
/ su
;
950 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
953 BUG_ON((su
& ~PAGE_MASK
) != 0);
954 /* bl = *off / su; */
958 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
962 objsetno
= stripeno
/ su_per_object
;
964 *ono
= objsetno
* sc
+ stripepos
;
965 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned)*ono
);
967 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
969 su_offset
= do_div(t
, su
);
970 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
973 * Calculate the length of the extent being written to the selected
974 * object. This is the minimum of the full length requested (plen) or
975 * the remainder of the current stripe being written to.
977 *oxlen
= min_t(u64
, *plen
, su
- su_offset
);
980 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
984 * calculate an object layout (i.e. pgid) from an oid,
985 * file_layout, and osdmap
987 int ceph_calc_object_layout(struct ceph_object_layout
*ol
,
989 struct ceph_file_layout
*fl
,
990 struct ceph_osdmap
*osdmap
)
992 unsigned num
, num_mask
;
994 s32 preferred
= (s32
)le32_to_cpu(fl
->fl_pg_preferred
);
995 int poolid
= le32_to_cpu(fl
->fl_pg_pool
);
996 struct ceph_pg_pool_info
*pool
;
1001 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
1004 ps
= ceph_str_hash(pool
->v
.object_hash
, oid
, strlen(oid
));
1005 if (preferred
>= 0) {
1007 num
= le32_to_cpu(pool
->v
.lpg_num
);
1008 num_mask
= pool
->lpg_num_mask
;
1010 num
= le32_to_cpu(pool
->v
.pg_num
);
1011 num_mask
= pool
->pg_num_mask
;
1014 pgid
.ps
= cpu_to_le16(ps
);
1015 pgid
.preferred
= cpu_to_le16(preferred
);
1016 pgid
.pool
= fl
->fl_pg_pool
;
1018 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid
, poolid
, ps
,
1021 dout("calc_object_layout '%s' pgid %d.%x\n", oid
, poolid
, ps
);
1024 ol
->ol_stripe_unit
= fl
->fl_object_stripe_unit
;
1029 * Calculate raw osd vector for the given pgid. Return pointer to osd
1030 * array, or NULL on failure.
1032 static int *calc_pg_raw(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1033 int *osds
, int *num
)
1035 struct ceph_pg_mapping
*pg
;
1036 struct ceph_pg_pool_info
*pool
;
1038 unsigned poolid
, ps
, pps
;
1042 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
1049 poolid
= le32_to_cpu(pgid
.pool
);
1050 ps
= le16_to_cpu(pgid
.ps
);
1051 preferred
= (s16
)le16_to_cpu(pgid
.preferred
);
1053 /* don't forcefeed bad device ids to crush */
1054 if (preferred
>= osdmap
->max_osd
||
1055 preferred
>= osdmap
->crush
->max_devices
)
1058 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
1061 ruleno
= crush_find_rule(osdmap
->crush
, pool
->v
.crush_ruleset
,
1062 pool
->v
.type
, pool
->v
.size
);
1064 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1065 poolid
, pool
->v
.crush_ruleset
, pool
->v
.type
,
1071 pps
= ceph_stable_mod(ps
,
1072 le32_to_cpu(pool
->v
.lpgp_num
),
1073 pool
->lpgp_num_mask
);
1075 pps
= ceph_stable_mod(ps
,
1076 le32_to_cpu(pool
->v
.pgp_num
),
1077 pool
->pgp_num_mask
);
1079 *num
= crush_do_rule(osdmap
->crush
, ruleno
, pps
, osds
,
1080 min_t(int, pool
->v
.size
, *num
),
1081 preferred
, osdmap
->osd_weight
);
1086 * Return acting set for given pgid.
1088 int ceph_calc_pg_acting(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1091 int rawosds
[CEPH_PG_MAX_SIZE
], *osds
;
1092 int i
, o
, num
= CEPH_PG_MAX_SIZE
;
1094 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1098 /* primary is first up osd */
1100 for (i
= 0; i
< num
; i
++)
1101 if (ceph_osd_is_up(osdmap
, osds
[i
]))
1102 acting
[o
++] = osds
[i
];
1107 * Return primary osd for given pgid, or -1 if none.
1109 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
1111 int rawosds
[CEPH_PG_MAX_SIZE
], *osds
;
1112 int i
, num
= CEPH_PG_MAX_SIZE
;
1114 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1118 /* primary is first up osd */
1119 for (i
= 0; i
< num
; i
++)
1120 if (ceph_osd_is_up(osdmap
, osds
[i
]))