1 #include <linux/ceph/ceph_debug.h>
4 #include "mds_client.h"
6 #include <linux/ceph/decode.h>
8 #include <linux/xattr.h>
9 #include <linux/slab.h>
11 #define XATTR_CEPH_PREFIX "ceph."
12 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
14 static bool ceph_is_valid_xattr(const char *name)
16 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
17 !strncmp(name, XATTR_SECURITY_PREFIX,
18 XATTR_SECURITY_PREFIX_LEN) ||
19 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
20 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
24 * These define virtual xattrs exposing the recursive directory
25 * statistics and layout metadata.
29 size_t name_size; /* strlen(name) + 1 (for '\0') */
30 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
32 bool readonly, hidden;
33 bool (*exists_cb)(struct ceph_inode_info *ci);
38 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
41 char *p = (char *)&ci->i_layout;
43 for (s = 0; s < sizeof(ci->i_layout); s++, p++)
49 static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
53 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
54 struct ceph_osd_client *osdc = &fsc->client->osdc;
55 s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
56 const char *pool_name;
58 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
59 down_read(&osdc->map_sem);
60 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
62 ret = snprintf(val, size,
63 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%s",
64 (unsigned long long)ceph_file_layout_su(ci->i_layout),
65 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
66 (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
69 ret = snprintf(val, size,
70 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
71 (unsigned long long)ceph_file_layout_su(ci->i_layout),
72 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
73 (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
74 (unsigned long long)pool);
76 up_read(&osdc->map_sem);
80 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
81 char *val, size_t size)
83 return snprintf(val, size, "%lld",
84 (unsigned long long)ceph_file_layout_su(ci->i_layout));
87 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
88 char *val, size_t size)
90 return snprintf(val, size, "%lld",
91 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout));
94 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
95 char *val, size_t size)
97 return snprintf(val, size, "%lld",
98 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
101 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
102 char *val, size_t size)
105 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
106 struct ceph_osd_client *osdc = &fsc->client->osdc;
107 s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
108 const char *pool_name;
110 down_read(&osdc->map_sem);
111 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
113 ret = snprintf(val, size, "%s", pool_name);
115 ret = snprintf(val, size, "%lld", (unsigned long long)pool);
116 up_read(&osdc->map_sem);
122 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
125 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
128 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
131 return snprintf(val, size, "%lld", ci->i_files);
134 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
137 return snprintf(val, size, "%lld", ci->i_subdirs);
140 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
143 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
146 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
149 return snprintf(val, size, "%lld", ci->i_rfiles);
152 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
155 return snprintf(val, size, "%lld", ci->i_rsubdirs);
158 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
161 return snprintf(val, size, "%lld", ci->i_rbytes);
164 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
167 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
168 (long)ci->i_rctime.tv_nsec);
172 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
173 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
174 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
176 #define XATTR_NAME_CEPH(_type, _name) \
178 .name = CEPH_XATTR_NAME(_type, _name), \
179 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
180 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
185 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
187 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
188 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
189 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
192 .exists_cb = ceph_vxattrcb_layout_exists, \
195 static struct ceph_vxattr ceph_dir_vxattrs[] = {
197 .name = "ceph.dir.layout",
198 .name_size = sizeof("ceph.dir.layout"),
199 .getxattr_cb = ceph_vxattrcb_layout,
202 .exists_cb = ceph_vxattrcb_layout_exists,
204 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
205 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
206 XATTR_LAYOUT_FIELD(dir, layout, object_size),
207 XATTR_LAYOUT_FIELD(dir, layout, pool),
208 XATTR_NAME_CEPH(dir, entries),
209 XATTR_NAME_CEPH(dir, files),
210 XATTR_NAME_CEPH(dir, subdirs),
211 XATTR_NAME_CEPH(dir, rentries),
212 XATTR_NAME_CEPH(dir, rfiles),
213 XATTR_NAME_CEPH(dir, rsubdirs),
214 XATTR_NAME_CEPH(dir, rbytes),
215 XATTR_NAME_CEPH(dir, rctime),
216 { 0 } /* Required table terminator */
218 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
222 static struct ceph_vxattr ceph_file_vxattrs[] = {
224 .name = "ceph.file.layout",
225 .name_size = sizeof("ceph.file.layout"),
226 .getxattr_cb = ceph_vxattrcb_layout,
229 .exists_cb = ceph_vxattrcb_layout_exists,
231 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
232 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
233 XATTR_LAYOUT_FIELD(file, layout, object_size),
234 XATTR_LAYOUT_FIELD(file, layout, pool),
235 { 0 } /* Required table terminator */
237 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
239 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
241 if (S_ISDIR(inode->i_mode))
242 return ceph_dir_vxattrs;
243 else if (S_ISREG(inode->i_mode))
244 return ceph_file_vxattrs;
248 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
250 if (vxattrs == ceph_dir_vxattrs)
251 return ceph_dir_vxattrs_name_size;
252 if (vxattrs == ceph_file_vxattrs)
253 return ceph_file_vxattrs_name_size;
260 * Compute the aggregate size (including terminating '\0') of all
261 * virtual extended attribute names in the given vxattr table.
263 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
265 struct ceph_vxattr *vxattr;
268 for (vxattr = vxattrs; vxattr->name; vxattr++)
270 size += vxattr->name_size;
275 /* Routines called at initialization and exit time */
277 void __init ceph_xattr_init(void)
279 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
280 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
283 void ceph_xattr_exit(void)
285 ceph_dir_vxattrs_name_size = 0;
286 ceph_file_vxattrs_name_size = 0;
289 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
292 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
295 while (vxattr->name) {
296 if (!strcmp(vxattr->name, name))
305 static int __set_xattr(struct ceph_inode_info *ci,
306 const char *name, int name_len,
307 const char *val, int val_len,
309 int should_free_name, int should_free_val,
310 struct ceph_inode_xattr **newxattr)
313 struct rb_node *parent = NULL;
314 struct ceph_inode_xattr *xattr = NULL;
318 p = &ci->i_xattrs.index.rb_node;
321 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
322 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
328 if (name_len == xattr->name_len)
330 else if (name_len < xattr->name_len)
342 xattr->name_len = name_len;
343 xattr->should_free_name = should_free_name;
345 ci->i_xattrs.count++;
346 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
350 if (xattr->should_free_val)
351 kfree((void *)xattr->val);
353 if (should_free_name) {
357 ci->i_xattrs.names_size -= xattr->name_len;
358 ci->i_xattrs.vals_size -= xattr->val_len;
360 ci->i_xattrs.names_size += name_len;
361 ci->i_xattrs.vals_size += val_len;
367 xattr->val_len = val_len;
368 xattr->dirty = dirty;
369 xattr->should_free_val = (val && should_free_val);
372 rb_link_node(&xattr->node, parent, p);
373 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
374 dout("__set_xattr_val p=%p\n", p);
377 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
378 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
383 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
387 struct rb_node *parent = NULL;
388 struct ceph_inode_xattr *xattr = NULL;
389 int name_len = strlen(name);
392 p = &ci->i_xattrs.index.rb_node;
395 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
396 c = strncmp(name, xattr->name, xattr->name_len);
397 if (c == 0 && name_len > xattr->name_len)
404 dout("__get_xattr %s: found %.*s\n", name,
405 xattr->val_len, xattr->val);
410 dout("__get_xattr %s: not found\n", name);
415 static void __free_xattr(struct ceph_inode_xattr *xattr)
419 if (xattr->should_free_name)
420 kfree((void *)xattr->name);
421 if (xattr->should_free_val)
422 kfree((void *)xattr->val);
427 static int __remove_xattr(struct ceph_inode_info *ci,
428 struct ceph_inode_xattr *xattr)
433 rb_erase(&xattr->node, &ci->i_xattrs.index);
435 if (xattr->should_free_name)
436 kfree((void *)xattr->name);
437 if (xattr->should_free_val)
438 kfree((void *)xattr->val);
440 ci->i_xattrs.names_size -= xattr->name_len;
441 ci->i_xattrs.vals_size -= xattr->val_len;
442 ci->i_xattrs.count--;
448 static int __remove_xattr_by_name(struct ceph_inode_info *ci,
452 struct ceph_inode_xattr *xattr;
455 p = &ci->i_xattrs.index.rb_node;
456 xattr = __get_xattr(ci, name);
457 err = __remove_xattr(ci, xattr);
461 static char *__copy_xattr_names(struct ceph_inode_info *ci,
465 struct ceph_inode_xattr *xattr = NULL;
467 p = rb_first(&ci->i_xattrs.index);
468 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
471 xattr = rb_entry(p, struct ceph_inode_xattr, node);
472 memcpy(dest, xattr->name, xattr->name_len);
473 dest[xattr->name_len] = '\0';
475 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
476 xattr->name_len, ci->i_xattrs.names_size);
478 dest += xattr->name_len + 1;
485 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
487 struct rb_node *p, *tmp;
488 struct ceph_inode_xattr *xattr = NULL;
490 p = rb_first(&ci->i_xattrs.index);
492 dout("__ceph_destroy_xattrs p=%p\n", p);
495 xattr = rb_entry(p, struct ceph_inode_xattr, node);
498 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
499 xattr->name_len, xattr->name);
500 rb_erase(tmp, &ci->i_xattrs.index);
505 ci->i_xattrs.names_size = 0;
506 ci->i_xattrs.vals_size = 0;
507 ci->i_xattrs.index_version = 0;
508 ci->i_xattrs.count = 0;
509 ci->i_xattrs.index = RB_ROOT;
512 static int __build_xattrs(struct inode *inode)
513 __releases(ci->i_ceph_lock)
514 __acquires(ci->i_ceph_lock)
520 const char *name, *val;
521 struct ceph_inode_info *ci = ceph_inode(inode);
523 struct ceph_inode_xattr **xattrs = NULL;
527 dout("__build_xattrs() len=%d\n",
528 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
530 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
531 return 0; /* already built */
533 __ceph_destroy_xattrs(ci);
536 /* updated internal xattr rb tree */
537 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
538 p = ci->i_xattrs.blob->vec.iov_base;
539 end = p + ci->i_xattrs.blob->vec.iov_len;
540 ceph_decode_32_safe(&p, end, numattr, bad);
541 xattr_version = ci->i_xattrs.version;
542 spin_unlock(&ci->i_ceph_lock);
544 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
549 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
550 for (i = 0; i < numattr; i++) {
551 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
557 spin_lock(&ci->i_ceph_lock);
558 if (ci->i_xattrs.version != xattr_version) {
559 /* lost a race, retry */
560 for (i = 0; i < numattr; i++)
568 ceph_decode_32_safe(&p, end, len, bad);
572 ceph_decode_32_safe(&p, end, len, bad);
576 err = __set_xattr(ci, name, namelen, val, len,
577 0, 0, 0, &xattrs[numattr]);
584 ci->i_xattrs.index_version = ci->i_xattrs.version;
585 ci->i_xattrs.dirty = false;
589 spin_lock(&ci->i_ceph_lock);
592 for (i = 0; i < numattr; i++)
596 ci->i_xattrs.names_size = 0;
600 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
604 * 4 bytes for the length, and additional 4 bytes per each xattr name,
605 * 4 bytes per each value
607 int size = 4 + ci->i_xattrs.count*(4 + 4) +
608 ci->i_xattrs.names_size +
609 ci->i_xattrs.vals_size;
610 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
611 ci->i_xattrs.count, ci->i_xattrs.names_size,
612 ci->i_xattrs.vals_size);
615 size += 4 + 4 + name_size + val_size;
621 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
622 * and swap into place.
624 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
627 struct ceph_inode_xattr *xattr = NULL;
630 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
631 if (ci->i_xattrs.dirty) {
632 int need = __get_required_blob_size(ci, 0, 0);
634 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
636 p = rb_first(&ci->i_xattrs.index);
637 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
639 ceph_encode_32(&dest, ci->i_xattrs.count);
641 xattr = rb_entry(p, struct ceph_inode_xattr, node);
643 ceph_encode_32(&dest, xattr->name_len);
644 memcpy(dest, xattr->name, xattr->name_len);
645 dest += xattr->name_len;
646 ceph_encode_32(&dest, xattr->val_len);
647 memcpy(dest, xattr->val, xattr->val_len);
648 dest += xattr->val_len;
653 /* adjust buffer len; it may be larger than we need */
654 ci->i_xattrs.prealloc_blob->vec.iov_len =
655 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
657 if (ci->i_xattrs.blob)
658 ceph_buffer_put(ci->i_xattrs.blob);
659 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
660 ci->i_xattrs.prealloc_blob = NULL;
661 ci->i_xattrs.dirty = false;
662 ci->i_xattrs.version++;
666 ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
669 struct inode *inode = dentry->d_inode;
670 struct ceph_inode_info *ci = ceph_inode(inode);
672 struct ceph_inode_xattr *xattr;
673 struct ceph_vxattr *vxattr = NULL;
675 if (!ceph_is_valid_xattr(name))
678 spin_lock(&ci->i_ceph_lock);
679 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
680 ci->i_xattrs.version, ci->i_xattrs.index_version);
682 /* let's see if a virtual xattr was requested */
683 vxattr = ceph_match_vxattr(inode, name);
684 if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
685 err = vxattr->getxattr_cb(ci, value, size);
689 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
690 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
693 spin_unlock(&ci->i_ceph_lock);
694 /* get xattrs from mds (if we don't already have them) */
695 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
700 spin_lock(&ci->i_ceph_lock);
702 err = __build_xattrs(inode);
707 err = -ENODATA; /* == ENOATTR */
708 xattr = __get_xattr(ci, name);
713 if (size && size < xattr->val_len)
716 err = xattr->val_len;
720 memcpy(value, xattr->val, xattr->val_len);
723 spin_unlock(&ci->i_ceph_lock);
727 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
729 struct inode *inode = dentry->d_inode;
730 struct ceph_inode_info *ci = ceph_inode(inode);
731 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
738 spin_lock(&ci->i_ceph_lock);
739 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
740 ci->i_xattrs.version, ci->i_xattrs.index_version);
742 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
743 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
746 spin_unlock(&ci->i_ceph_lock);
747 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
752 spin_lock(&ci->i_ceph_lock);
754 err = __build_xattrs(inode);
760 * Start with virtual dir xattr names (if any) (including
761 * terminating '\0' characters for each).
763 vir_namelen = ceph_vxattrs_name_size(vxattrs);
765 /* adding 1 byte per each variable due to the null termination */
766 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
768 if (size && vir_namelen + namelen > size)
771 err = namelen + vir_namelen;
775 names = __copy_xattr_names(ci, names);
777 /* virtual xattr names, too */
780 for (i = 0; vxattrs[i].name; i++) {
781 if (!vxattrs[i].hidden &&
782 !(vxattrs[i].exists_cb &&
783 !vxattrs[i].exists_cb(ci))) {
784 len = sprintf(names, "%s", vxattrs[i].name);
792 spin_unlock(&ci->i_ceph_lock);
796 static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
797 const char *value, size_t size, int flags)
799 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
800 struct inode *inode = dentry->d_inode;
801 struct ceph_inode_info *ci = ceph_inode(inode);
802 struct inode *parent_inode;
803 struct ceph_mds_request *req;
804 struct ceph_mds_client *mdsc = fsc->mdsc;
807 struct page **pages = NULL;
810 /* copy value into some pages */
811 nr_pages = calc_pages_for(0, size);
813 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
817 for (i = 0; i < nr_pages; i++) {
818 pages[i] = __page_cache_alloc(GFP_NOFS);
823 kaddr = kmap(pages[i]);
824 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
825 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
829 dout("setxattr value=%.*s\n", (int)size, value);
832 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
838 req->r_inode = inode;
840 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
842 req->r_args.setxattr.flags = cpu_to_le32(flags);
843 req->r_path2 = kstrdup(name, GFP_NOFS);
845 req->r_pages = pages;
846 req->r_num_pages = nr_pages;
847 req->r_data_len = size;
849 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
850 parent_inode = ceph_get_dentry_parent_inode(dentry);
851 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
853 ceph_mdsc_put_request(req);
854 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
858 for (i = 0; i < nr_pages; i++)
859 __free_page(pages[i]);
865 int ceph_setxattr(struct dentry *dentry, const char *name,
866 const void *value, size_t size, int flags)
868 struct inode *inode = dentry->d_inode;
869 struct ceph_vxattr *vxattr;
870 struct ceph_inode_info *ci = ceph_inode(inode);
874 int name_len = strlen(name);
876 char *newname = NULL;
878 struct ceph_inode_xattr *xattr = NULL;
879 int required_blob_size;
881 if (ceph_snap(inode) != CEPH_NOSNAP)
884 if (!ceph_is_valid_xattr(name))
887 vxattr = ceph_match_vxattr(inode, name);
888 if (vxattr && vxattr->readonly)
891 /* pass any unhandled ceph.* xattrs through to the MDS */
892 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
893 goto do_sync_unlocked;
895 /* preallocate memory for xattr name, value, index node */
897 newname = kmemdup(name, name_len + 1, GFP_NOFS);
902 newval = kmemdup(value, val_len, GFP_NOFS);
907 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
911 spin_lock(&ci->i_ceph_lock);
913 issued = __ceph_caps_issued(ci, NULL);
914 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
915 if (!(issued & CEPH_CAP_XATTR_EXCL))
917 __build_xattrs(inode);
919 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
921 if (!ci->i_xattrs.prealloc_blob ||
922 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
923 struct ceph_buffer *blob;
925 spin_unlock(&ci->i_ceph_lock);
926 dout(" preaallocating new blob size=%d\n", required_blob_size);
927 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
930 spin_lock(&ci->i_ceph_lock);
931 if (ci->i_xattrs.prealloc_blob)
932 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
933 ci->i_xattrs.prealloc_blob = blob;
937 err = __set_xattr(ci, newname, name_len, newval,
938 val_len, 1, 1, 1, &xattr);
940 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
941 ci->i_xattrs.dirty = true;
942 inode->i_ctime = CURRENT_TIME;
944 spin_unlock(&ci->i_ceph_lock);
946 __mark_inode_dirty(inode, dirty);
950 spin_unlock(&ci->i_ceph_lock);
952 err = ceph_sync_setxattr(dentry, name, value, size, flags);
960 static int ceph_send_removexattr(struct dentry *dentry, const char *name)
962 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
963 struct ceph_mds_client *mdsc = fsc->mdsc;
964 struct inode *inode = dentry->d_inode;
965 struct inode *parent_inode;
966 struct ceph_mds_request *req;
969 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
973 req->r_inode = inode;
975 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
977 req->r_path2 = kstrdup(name, GFP_NOFS);
979 parent_inode = ceph_get_dentry_parent_inode(dentry);
980 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
982 ceph_mdsc_put_request(req);
986 int ceph_removexattr(struct dentry *dentry, const char *name)
988 struct inode *inode = dentry->d_inode;
989 struct ceph_vxattr *vxattr;
990 struct ceph_inode_info *ci = ceph_inode(inode);
993 int required_blob_size;
996 if (ceph_snap(inode) != CEPH_NOSNAP)
999 if (!ceph_is_valid_xattr(name))
1002 vxattr = ceph_match_vxattr(inode, name);
1003 if (vxattr && vxattr->readonly)
1006 /* pass any unhandled ceph.* xattrs through to the MDS */
1007 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
1008 goto do_sync_unlocked;
1011 spin_lock(&ci->i_ceph_lock);
1013 issued = __ceph_caps_issued(ci, NULL);
1014 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
1016 if (!(issued & CEPH_CAP_XATTR_EXCL))
1018 __build_xattrs(inode);
1020 required_blob_size = __get_required_blob_size(ci, 0, 0);
1022 if (!ci->i_xattrs.prealloc_blob ||
1023 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1024 struct ceph_buffer *blob;
1026 spin_unlock(&ci->i_ceph_lock);
1027 dout(" preaallocating new blob size=%d\n", required_blob_size);
1028 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1031 spin_lock(&ci->i_ceph_lock);
1032 if (ci->i_xattrs.prealloc_blob)
1033 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
1034 ci->i_xattrs.prealloc_blob = blob;
1038 err = __remove_xattr_by_name(ceph_inode(inode), name);
1040 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
1041 ci->i_xattrs.dirty = true;
1042 inode->i_ctime = CURRENT_TIME;
1043 spin_unlock(&ci->i_ceph_lock);
1045 __mark_inode_dirty(inode, dirty);
1048 spin_unlock(&ci->i_ceph_lock);
1050 err = ceph_send_removexattr(dentry, name);