]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ceph/xattr.c
NFSv4: Fix a sleep in atomic context in nfs4_callback_sequence()
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / xattr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/pagelist.h>
4
5 #include "super.h"
6 #include "mds_client.h"
7
8 #include <linux/ceph/decode.h>
9
10 #include <linux/xattr.h>
11 #include <linux/posix_acl_xattr.h>
12 #include <linux/slab.h>
13
14 #define XATTR_CEPH_PREFIX "ceph."
15 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
16
17 static int __remove_xattr(struct ceph_inode_info *ci,
18 struct ceph_inode_xattr *xattr);
19
20 static const struct xattr_handler ceph_other_xattr_handler;
21
22 /*
23 * List of handlers for synthetic system.* attributes. Other
24 * attributes are handled directly.
25 */
26 const struct xattr_handler *ceph_xattr_handlers[] = {
27 #ifdef CONFIG_CEPH_FS_POSIX_ACL
28 &posix_acl_access_xattr_handler,
29 &posix_acl_default_xattr_handler,
30 #endif
31 &ceph_other_xattr_handler,
32 NULL,
33 };
34
35 static bool ceph_is_valid_xattr(const char *name)
36 {
37 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
38 !strncmp(name, XATTR_SECURITY_PREFIX,
39 XATTR_SECURITY_PREFIX_LEN) ||
40 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
41 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
42 }
43
44 /*
45 * These define virtual xattrs exposing the recursive directory
46 * statistics and layout metadata.
47 */
48 struct ceph_vxattr {
49 char *name;
50 size_t name_size; /* strlen(name) + 1 (for '\0') */
51 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
52 size_t size);
53 bool readonly, hidden;
54 bool (*exists_cb)(struct ceph_inode_info *ci);
55 };
56
57 /* layouts */
58
59 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
60 {
61 struct ceph_file_layout *fl = &ci->i_layout;
62 return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
63 fl->object_size > 0 || fl->pool_id >= 0 ||
64 rcu_dereference_raw(fl->pool_ns) != NULL);
65 }
66
67 static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
68 size_t size)
69 {
70 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
71 struct ceph_osd_client *osdc = &fsc->client->osdc;
72 struct ceph_string *pool_ns;
73 s64 pool = ci->i_layout.pool_id;
74 const char *pool_name;
75 const char *ns_field = " pool_namespace=";
76 char buf[128];
77 size_t len, total_len = 0;
78 int ret;
79
80 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
81
82 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
83 down_read(&osdc->lock);
84 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
85 if (pool_name) {
86 len = snprintf(buf, sizeof(buf),
87 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
88 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
89 ci->i_layout.object_size);
90 total_len = len + strlen(pool_name);
91 } else {
92 len = snprintf(buf, sizeof(buf),
93 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
94 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
95 ci->i_layout.object_size, (unsigned long long)pool);
96 total_len = len;
97 }
98
99 if (pool_ns)
100 total_len += strlen(ns_field) + pool_ns->len;
101
102 if (!size) {
103 ret = total_len;
104 } else if (total_len > size) {
105 ret = -ERANGE;
106 } else {
107 memcpy(val, buf, len);
108 ret = len;
109 if (pool_name) {
110 len = strlen(pool_name);
111 memcpy(val + ret, pool_name, len);
112 ret += len;
113 }
114 if (pool_ns) {
115 len = strlen(ns_field);
116 memcpy(val + ret, ns_field, len);
117 ret += len;
118 memcpy(val + ret, pool_ns->str, pool_ns->len);
119 ret += pool_ns->len;
120 }
121 }
122 up_read(&osdc->lock);
123 ceph_put_string(pool_ns);
124 return ret;
125 }
126
127 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
128 char *val, size_t size)
129 {
130 return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
131 }
132
133 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
134 char *val, size_t size)
135 {
136 return snprintf(val, size, "%u", ci->i_layout.stripe_count);
137 }
138
139 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
140 char *val, size_t size)
141 {
142 return snprintf(val, size, "%u", ci->i_layout.object_size);
143 }
144
145 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
146 char *val, size_t size)
147 {
148 int ret;
149 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
150 struct ceph_osd_client *osdc = &fsc->client->osdc;
151 s64 pool = ci->i_layout.pool_id;
152 const char *pool_name;
153
154 down_read(&osdc->lock);
155 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
156 if (pool_name)
157 ret = snprintf(val, size, "%s", pool_name);
158 else
159 ret = snprintf(val, size, "%lld", (unsigned long long)pool);
160 up_read(&osdc->lock);
161 return ret;
162 }
163
164 static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
165 char *val, size_t size)
166 {
167 int ret = 0;
168 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
169 if (ns) {
170 ret = snprintf(val, size, "%.*s", (int)ns->len, ns->str);
171 ceph_put_string(ns);
172 }
173 return ret;
174 }
175
176 /* directories */
177
178 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
179 size_t size)
180 {
181 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
182 }
183
184 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
185 size_t size)
186 {
187 return snprintf(val, size, "%lld", ci->i_files);
188 }
189
190 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
191 size_t size)
192 {
193 return snprintf(val, size, "%lld", ci->i_subdirs);
194 }
195
196 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
197 size_t size)
198 {
199 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
200 }
201
202 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
203 size_t size)
204 {
205 return snprintf(val, size, "%lld", ci->i_rfiles);
206 }
207
208 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
209 size_t size)
210 {
211 return snprintf(val, size, "%lld", ci->i_rsubdirs);
212 }
213
214 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
215 size_t size)
216 {
217 return snprintf(val, size, "%lld", ci->i_rbytes);
218 }
219
220 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
221 size_t size)
222 {
223 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
224 (long)ci->i_rctime.tv_nsec);
225 }
226
227 /* quotas */
228
229 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
230 {
231 return (ci->i_max_files || ci->i_max_bytes);
232 }
233
234 static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
235 size_t size)
236 {
237 return snprintf(val, size, "max_bytes=%llu max_files=%llu",
238 ci->i_max_bytes, ci->i_max_files);
239 }
240
241 static size_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci,
242 char *val, size_t size)
243 {
244 return snprintf(val, size, "%llu", ci->i_max_bytes);
245 }
246
247 static size_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci,
248 char *val, size_t size)
249 {
250 return snprintf(val, size, "%llu", ci->i_max_files);
251 }
252
253 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
254 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
255 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
256
257 #define XATTR_NAME_CEPH(_type, _name) \
258 { \
259 .name = CEPH_XATTR_NAME(_type, _name), \
260 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
261 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
262 .readonly = true, \
263 .hidden = false, \
264 .exists_cb = NULL, \
265 }
266 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
267 { \
268 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
269 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
270 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
271 .readonly = false, \
272 .hidden = true, \
273 .exists_cb = ceph_vxattrcb_layout_exists, \
274 }
275 #define XATTR_QUOTA_FIELD(_type, _name) \
276 { \
277 .name = CEPH_XATTR_NAME(_type, _name), \
278 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \
279 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
280 .readonly = false, \
281 .hidden = true, \
282 .exists_cb = ceph_vxattrcb_quota_exists, \
283 }
284
285 static struct ceph_vxattr ceph_dir_vxattrs[] = {
286 {
287 .name = "ceph.dir.layout",
288 .name_size = sizeof("ceph.dir.layout"),
289 .getxattr_cb = ceph_vxattrcb_layout,
290 .readonly = false,
291 .hidden = true,
292 .exists_cb = ceph_vxattrcb_layout_exists,
293 },
294 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
295 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
296 XATTR_LAYOUT_FIELD(dir, layout, object_size),
297 XATTR_LAYOUT_FIELD(dir, layout, pool),
298 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
299 XATTR_NAME_CEPH(dir, entries),
300 XATTR_NAME_CEPH(dir, files),
301 XATTR_NAME_CEPH(dir, subdirs),
302 XATTR_NAME_CEPH(dir, rentries),
303 XATTR_NAME_CEPH(dir, rfiles),
304 XATTR_NAME_CEPH(dir, rsubdirs),
305 XATTR_NAME_CEPH(dir, rbytes),
306 XATTR_NAME_CEPH(dir, rctime),
307 {
308 .name = "ceph.quota",
309 .name_size = sizeof("ceph.quota"),
310 .getxattr_cb = ceph_vxattrcb_quota,
311 .readonly = false,
312 .hidden = true,
313 .exists_cb = ceph_vxattrcb_quota_exists,
314 },
315 XATTR_QUOTA_FIELD(quota, max_bytes),
316 XATTR_QUOTA_FIELD(quota, max_files),
317 { .name = NULL, 0 } /* Required table terminator */
318 };
319 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
320
321 /* files */
322
323 static struct ceph_vxattr ceph_file_vxattrs[] = {
324 {
325 .name = "ceph.file.layout",
326 .name_size = sizeof("ceph.file.layout"),
327 .getxattr_cb = ceph_vxattrcb_layout,
328 .readonly = false,
329 .hidden = true,
330 .exists_cb = ceph_vxattrcb_layout_exists,
331 },
332 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
333 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
334 XATTR_LAYOUT_FIELD(file, layout, object_size),
335 XATTR_LAYOUT_FIELD(file, layout, pool),
336 XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
337 { .name = NULL, 0 } /* Required table terminator */
338 };
339 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
340
341 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
342 {
343 if (S_ISDIR(inode->i_mode))
344 return ceph_dir_vxattrs;
345 else if (S_ISREG(inode->i_mode))
346 return ceph_file_vxattrs;
347 return NULL;
348 }
349
350 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
351 {
352 if (vxattrs == ceph_dir_vxattrs)
353 return ceph_dir_vxattrs_name_size;
354 if (vxattrs == ceph_file_vxattrs)
355 return ceph_file_vxattrs_name_size;
356 BUG_ON(vxattrs);
357 return 0;
358 }
359
360 /*
361 * Compute the aggregate size (including terminating '\0') of all
362 * virtual extended attribute names in the given vxattr table.
363 */
364 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
365 {
366 struct ceph_vxattr *vxattr;
367 size_t size = 0;
368
369 for (vxattr = vxattrs; vxattr->name; vxattr++)
370 if (!vxattr->hidden)
371 size += vxattr->name_size;
372
373 return size;
374 }
375
376 /* Routines called at initialization and exit time */
377
378 void __init ceph_xattr_init(void)
379 {
380 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
381 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
382 }
383
384 void ceph_xattr_exit(void)
385 {
386 ceph_dir_vxattrs_name_size = 0;
387 ceph_file_vxattrs_name_size = 0;
388 }
389
390 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
391 const char *name)
392 {
393 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
394
395 if (vxattr) {
396 while (vxattr->name) {
397 if (!strcmp(vxattr->name, name))
398 return vxattr;
399 vxattr++;
400 }
401 }
402
403 return NULL;
404 }
405
406 static int __set_xattr(struct ceph_inode_info *ci,
407 const char *name, int name_len,
408 const char *val, int val_len,
409 int flags, int update_xattr,
410 struct ceph_inode_xattr **newxattr)
411 {
412 struct rb_node **p;
413 struct rb_node *parent = NULL;
414 struct ceph_inode_xattr *xattr = NULL;
415 int c;
416 int new = 0;
417
418 p = &ci->i_xattrs.index.rb_node;
419 while (*p) {
420 parent = *p;
421 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
422 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
423 if (c < 0)
424 p = &(*p)->rb_left;
425 else if (c > 0)
426 p = &(*p)->rb_right;
427 else {
428 if (name_len == xattr->name_len)
429 break;
430 else if (name_len < xattr->name_len)
431 p = &(*p)->rb_left;
432 else
433 p = &(*p)->rb_right;
434 }
435 xattr = NULL;
436 }
437
438 if (update_xattr) {
439 int err = 0;
440
441 if (xattr && (flags & XATTR_CREATE))
442 err = -EEXIST;
443 else if (!xattr && (flags & XATTR_REPLACE))
444 err = -ENODATA;
445 if (err) {
446 kfree(name);
447 kfree(val);
448 kfree(*newxattr);
449 return err;
450 }
451 if (update_xattr < 0) {
452 if (xattr)
453 __remove_xattr(ci, xattr);
454 kfree(name);
455 kfree(*newxattr);
456 return 0;
457 }
458 }
459
460 if (!xattr) {
461 new = 1;
462 xattr = *newxattr;
463 xattr->name = name;
464 xattr->name_len = name_len;
465 xattr->should_free_name = update_xattr;
466
467 ci->i_xattrs.count++;
468 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
469 } else {
470 kfree(*newxattr);
471 *newxattr = NULL;
472 if (xattr->should_free_val)
473 kfree((void *)xattr->val);
474
475 if (update_xattr) {
476 kfree((void *)name);
477 name = xattr->name;
478 }
479 ci->i_xattrs.names_size -= xattr->name_len;
480 ci->i_xattrs.vals_size -= xattr->val_len;
481 }
482 ci->i_xattrs.names_size += name_len;
483 ci->i_xattrs.vals_size += val_len;
484 if (val)
485 xattr->val = val;
486 else
487 xattr->val = "";
488
489 xattr->val_len = val_len;
490 xattr->dirty = update_xattr;
491 xattr->should_free_val = (val && update_xattr);
492
493 if (new) {
494 rb_link_node(&xattr->node, parent, p);
495 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
496 dout("__set_xattr_val p=%p\n", p);
497 }
498
499 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
500 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
501
502 return 0;
503 }
504
505 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
506 const char *name)
507 {
508 struct rb_node **p;
509 struct rb_node *parent = NULL;
510 struct ceph_inode_xattr *xattr = NULL;
511 int name_len = strlen(name);
512 int c;
513
514 p = &ci->i_xattrs.index.rb_node;
515 while (*p) {
516 parent = *p;
517 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
518 c = strncmp(name, xattr->name, xattr->name_len);
519 if (c == 0 && name_len > xattr->name_len)
520 c = 1;
521 if (c < 0)
522 p = &(*p)->rb_left;
523 else if (c > 0)
524 p = &(*p)->rb_right;
525 else {
526 dout("__get_xattr %s: found %.*s\n", name,
527 xattr->val_len, xattr->val);
528 return xattr;
529 }
530 }
531
532 dout("__get_xattr %s: not found\n", name);
533
534 return NULL;
535 }
536
537 static void __free_xattr(struct ceph_inode_xattr *xattr)
538 {
539 BUG_ON(!xattr);
540
541 if (xattr->should_free_name)
542 kfree((void *)xattr->name);
543 if (xattr->should_free_val)
544 kfree((void *)xattr->val);
545
546 kfree(xattr);
547 }
548
549 static int __remove_xattr(struct ceph_inode_info *ci,
550 struct ceph_inode_xattr *xattr)
551 {
552 if (!xattr)
553 return -ENODATA;
554
555 rb_erase(&xattr->node, &ci->i_xattrs.index);
556
557 if (xattr->should_free_name)
558 kfree((void *)xattr->name);
559 if (xattr->should_free_val)
560 kfree((void *)xattr->val);
561
562 ci->i_xattrs.names_size -= xattr->name_len;
563 ci->i_xattrs.vals_size -= xattr->val_len;
564 ci->i_xattrs.count--;
565 kfree(xattr);
566
567 return 0;
568 }
569
570 static char *__copy_xattr_names(struct ceph_inode_info *ci,
571 char *dest)
572 {
573 struct rb_node *p;
574 struct ceph_inode_xattr *xattr = NULL;
575
576 p = rb_first(&ci->i_xattrs.index);
577 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
578
579 while (p) {
580 xattr = rb_entry(p, struct ceph_inode_xattr, node);
581 memcpy(dest, xattr->name, xattr->name_len);
582 dest[xattr->name_len] = '\0';
583
584 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
585 xattr->name_len, ci->i_xattrs.names_size);
586
587 dest += xattr->name_len + 1;
588 p = rb_next(p);
589 }
590
591 return dest;
592 }
593
594 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
595 {
596 struct rb_node *p, *tmp;
597 struct ceph_inode_xattr *xattr = NULL;
598
599 p = rb_first(&ci->i_xattrs.index);
600
601 dout("__ceph_destroy_xattrs p=%p\n", p);
602
603 while (p) {
604 xattr = rb_entry(p, struct ceph_inode_xattr, node);
605 tmp = p;
606 p = rb_next(tmp);
607 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
608 xattr->name_len, xattr->name);
609 rb_erase(tmp, &ci->i_xattrs.index);
610
611 __free_xattr(xattr);
612 }
613
614 ci->i_xattrs.names_size = 0;
615 ci->i_xattrs.vals_size = 0;
616 ci->i_xattrs.index_version = 0;
617 ci->i_xattrs.count = 0;
618 ci->i_xattrs.index = RB_ROOT;
619 }
620
621 static int __build_xattrs(struct inode *inode)
622 __releases(ci->i_ceph_lock)
623 __acquires(ci->i_ceph_lock)
624 {
625 u32 namelen;
626 u32 numattr = 0;
627 void *p, *end;
628 u32 len;
629 const char *name, *val;
630 struct ceph_inode_info *ci = ceph_inode(inode);
631 int xattr_version;
632 struct ceph_inode_xattr **xattrs = NULL;
633 int err = 0;
634 int i;
635
636 dout("__build_xattrs() len=%d\n",
637 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
638
639 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
640 return 0; /* already built */
641
642 __ceph_destroy_xattrs(ci);
643
644 start:
645 /* updated internal xattr rb tree */
646 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
647 p = ci->i_xattrs.blob->vec.iov_base;
648 end = p + ci->i_xattrs.blob->vec.iov_len;
649 ceph_decode_32_safe(&p, end, numattr, bad);
650 xattr_version = ci->i_xattrs.version;
651 spin_unlock(&ci->i_ceph_lock);
652
653 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
654 GFP_NOFS);
655 err = -ENOMEM;
656 if (!xattrs)
657 goto bad_lock;
658
659 for (i = 0; i < numattr; i++) {
660 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
661 GFP_NOFS);
662 if (!xattrs[i])
663 goto bad_lock;
664 }
665
666 spin_lock(&ci->i_ceph_lock);
667 if (ci->i_xattrs.version != xattr_version) {
668 /* lost a race, retry */
669 for (i = 0; i < numattr; i++)
670 kfree(xattrs[i]);
671 kfree(xattrs);
672 xattrs = NULL;
673 goto start;
674 }
675 err = -EIO;
676 while (numattr--) {
677 ceph_decode_32_safe(&p, end, len, bad);
678 namelen = len;
679 name = p;
680 p += len;
681 ceph_decode_32_safe(&p, end, len, bad);
682 val = p;
683 p += len;
684
685 err = __set_xattr(ci, name, namelen, val, len,
686 0, 0, &xattrs[numattr]);
687
688 if (err < 0)
689 goto bad;
690 }
691 kfree(xattrs);
692 }
693 ci->i_xattrs.index_version = ci->i_xattrs.version;
694 ci->i_xattrs.dirty = false;
695
696 return err;
697 bad_lock:
698 spin_lock(&ci->i_ceph_lock);
699 bad:
700 if (xattrs) {
701 for (i = 0; i < numattr; i++)
702 kfree(xattrs[i]);
703 kfree(xattrs);
704 }
705 ci->i_xattrs.names_size = 0;
706 return err;
707 }
708
709 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
710 int val_size)
711 {
712 /*
713 * 4 bytes for the length, and additional 4 bytes per each xattr name,
714 * 4 bytes per each value
715 */
716 int size = 4 + ci->i_xattrs.count*(4 + 4) +
717 ci->i_xattrs.names_size +
718 ci->i_xattrs.vals_size;
719 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
720 ci->i_xattrs.count, ci->i_xattrs.names_size,
721 ci->i_xattrs.vals_size);
722
723 if (name_size)
724 size += 4 + 4 + name_size + val_size;
725
726 return size;
727 }
728
729 /*
730 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
731 * and swap into place.
732 */
733 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
734 {
735 struct rb_node *p;
736 struct ceph_inode_xattr *xattr = NULL;
737 void *dest;
738
739 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
740 if (ci->i_xattrs.dirty) {
741 int need = __get_required_blob_size(ci, 0, 0);
742
743 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
744
745 p = rb_first(&ci->i_xattrs.index);
746 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
747
748 ceph_encode_32(&dest, ci->i_xattrs.count);
749 while (p) {
750 xattr = rb_entry(p, struct ceph_inode_xattr, node);
751
752 ceph_encode_32(&dest, xattr->name_len);
753 memcpy(dest, xattr->name, xattr->name_len);
754 dest += xattr->name_len;
755 ceph_encode_32(&dest, xattr->val_len);
756 memcpy(dest, xattr->val, xattr->val_len);
757 dest += xattr->val_len;
758
759 p = rb_next(p);
760 }
761
762 /* adjust buffer len; it may be larger than we need */
763 ci->i_xattrs.prealloc_blob->vec.iov_len =
764 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
765
766 if (ci->i_xattrs.blob)
767 ceph_buffer_put(ci->i_xattrs.blob);
768 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
769 ci->i_xattrs.prealloc_blob = NULL;
770 ci->i_xattrs.dirty = false;
771 ci->i_xattrs.version++;
772 }
773 }
774
775 static inline int __get_request_mask(struct inode *in) {
776 struct ceph_mds_request *req = current->journal_info;
777 int mask = 0;
778 if (req && req->r_target_inode == in) {
779 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
780 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
781 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
782 req->r_op == CEPH_MDS_OP_GETATTR) {
783 mask = le32_to_cpu(req->r_args.getattr.mask);
784 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
785 req->r_op == CEPH_MDS_OP_CREATE) {
786 mask = le32_to_cpu(req->r_args.open.mask);
787 }
788 }
789 return mask;
790 }
791
792 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
793 size_t size)
794 {
795 struct ceph_inode_info *ci = ceph_inode(inode);
796 struct ceph_inode_xattr *xattr;
797 struct ceph_vxattr *vxattr = NULL;
798 int req_mask;
799 int err;
800
801 /* let's see if a virtual xattr was requested */
802 vxattr = ceph_match_vxattr(inode, name);
803 if (vxattr) {
804 err = ceph_do_getattr(inode, 0, true);
805 if (err)
806 return err;
807 err = -ENODATA;
808 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
809 err = vxattr->getxattr_cb(ci, value, size);
810 return err;
811 }
812
813 req_mask = __get_request_mask(inode);
814
815 spin_lock(&ci->i_ceph_lock);
816 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
817 ci->i_xattrs.version, ci->i_xattrs.index_version);
818
819 if (ci->i_xattrs.version == 0 ||
820 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
821 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
822 spin_unlock(&ci->i_ceph_lock);
823
824 /* security module gets xattr while filling trace */
825 if (current->journal_info) {
826 pr_warn_ratelimited("sync getxattr %p "
827 "during filling trace\n", inode);
828 return -EBUSY;
829 }
830
831 /* get xattrs from mds (if we don't already have them) */
832 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
833 if (err)
834 return err;
835 spin_lock(&ci->i_ceph_lock);
836 }
837
838 err = __build_xattrs(inode);
839 if (err < 0)
840 goto out;
841
842 err = -ENODATA; /* == ENOATTR */
843 xattr = __get_xattr(ci, name);
844 if (!xattr)
845 goto out;
846
847 err = -ERANGE;
848 if (size && size < xattr->val_len)
849 goto out;
850
851 err = xattr->val_len;
852 if (size == 0)
853 goto out;
854
855 memcpy(value, xattr->val, xattr->val_len);
856
857 if (current->journal_info &&
858 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
859 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
860 out:
861 spin_unlock(&ci->i_ceph_lock);
862 return err;
863 }
864
865 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
866 {
867 struct inode *inode = d_inode(dentry);
868 struct ceph_inode_info *ci = ceph_inode(inode);
869 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
870 u32 vir_namelen = 0;
871 u32 namelen;
872 int err;
873 u32 len;
874 int i;
875
876 spin_lock(&ci->i_ceph_lock);
877 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
878 ci->i_xattrs.version, ci->i_xattrs.index_version);
879
880 if (ci->i_xattrs.version == 0 ||
881 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
882 spin_unlock(&ci->i_ceph_lock);
883 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
884 if (err)
885 return err;
886 spin_lock(&ci->i_ceph_lock);
887 }
888
889 err = __build_xattrs(inode);
890 if (err < 0)
891 goto out;
892 /*
893 * Start with virtual dir xattr names (if any) (including
894 * terminating '\0' characters for each).
895 */
896 vir_namelen = ceph_vxattrs_name_size(vxattrs);
897
898 /* adding 1 byte per each variable due to the null termination */
899 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
900 err = -ERANGE;
901 if (size && vir_namelen + namelen > size)
902 goto out;
903
904 err = namelen + vir_namelen;
905 if (size == 0)
906 goto out;
907
908 names = __copy_xattr_names(ci, names);
909
910 /* virtual xattr names, too */
911 err = namelen;
912 if (vxattrs) {
913 for (i = 0; vxattrs[i].name; i++) {
914 if (!vxattrs[i].hidden &&
915 !(vxattrs[i].exists_cb &&
916 !vxattrs[i].exists_cb(ci))) {
917 len = sprintf(names, "%s", vxattrs[i].name);
918 names += len + 1;
919 err += len + 1;
920 }
921 }
922 }
923
924 out:
925 spin_unlock(&ci->i_ceph_lock);
926 return err;
927 }
928
929 static int ceph_sync_setxattr(struct inode *inode, const char *name,
930 const char *value, size_t size, int flags)
931 {
932 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
933 struct ceph_inode_info *ci = ceph_inode(inode);
934 struct ceph_mds_request *req;
935 struct ceph_mds_client *mdsc = fsc->mdsc;
936 struct ceph_pagelist *pagelist = NULL;
937 int op = CEPH_MDS_OP_SETXATTR;
938 int err;
939
940 if (size > 0) {
941 /* copy value into pagelist */
942 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
943 if (!pagelist)
944 return -ENOMEM;
945
946 ceph_pagelist_init(pagelist);
947 err = ceph_pagelist_append(pagelist, value, size);
948 if (err)
949 goto out;
950 } else if (!value) {
951 if (flags & CEPH_XATTR_REPLACE)
952 op = CEPH_MDS_OP_RMXATTR;
953 else
954 flags |= CEPH_XATTR_REMOVE;
955 }
956
957 dout("setxattr value=%.*s\n", (int)size, value);
958
959 /* do request */
960 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
961 if (IS_ERR(req)) {
962 err = PTR_ERR(req);
963 goto out;
964 }
965
966 req->r_path2 = kstrdup(name, GFP_NOFS);
967 if (!req->r_path2) {
968 ceph_mdsc_put_request(req);
969 err = -ENOMEM;
970 goto out;
971 }
972
973 if (op == CEPH_MDS_OP_SETXATTR) {
974 req->r_args.setxattr.flags = cpu_to_le32(flags);
975 req->r_pagelist = pagelist;
976 pagelist = NULL;
977 }
978
979 req->r_inode = inode;
980 ihold(inode);
981 req->r_num_caps = 1;
982 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
983
984 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
985 err = ceph_mdsc_do_request(mdsc, NULL, req);
986 ceph_mdsc_put_request(req);
987 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
988
989 out:
990 if (pagelist)
991 ceph_pagelist_release(pagelist);
992 return err;
993 }
994
995 int __ceph_setxattr(struct inode *inode, const char *name,
996 const void *value, size_t size, int flags)
997 {
998 struct ceph_vxattr *vxattr;
999 struct ceph_inode_info *ci = ceph_inode(inode);
1000 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1001 struct ceph_cap_flush *prealloc_cf = NULL;
1002 int issued;
1003 int err;
1004 int dirty = 0;
1005 int name_len = strlen(name);
1006 int val_len = size;
1007 char *newname = NULL;
1008 char *newval = NULL;
1009 struct ceph_inode_xattr *xattr = NULL;
1010 int required_blob_size;
1011 bool lock_snap_rwsem = false;
1012
1013 if (ceph_snap(inode) != CEPH_NOSNAP)
1014 return -EROFS;
1015
1016 vxattr = ceph_match_vxattr(inode, name);
1017 if (vxattr && vxattr->readonly)
1018 return -EOPNOTSUPP;
1019
1020 /* pass any unhandled ceph.* xattrs through to the MDS */
1021 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
1022 goto do_sync_unlocked;
1023
1024 /* preallocate memory for xattr name, value, index node */
1025 err = -ENOMEM;
1026 newname = kmemdup(name, name_len + 1, GFP_NOFS);
1027 if (!newname)
1028 goto out;
1029
1030 if (val_len) {
1031 newval = kmemdup(value, val_len, GFP_NOFS);
1032 if (!newval)
1033 goto out;
1034 }
1035
1036 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
1037 if (!xattr)
1038 goto out;
1039
1040 prealloc_cf = ceph_alloc_cap_flush();
1041 if (!prealloc_cf)
1042 goto out;
1043
1044 spin_lock(&ci->i_ceph_lock);
1045 retry:
1046 issued = __ceph_caps_issued(ci, NULL);
1047 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
1048 goto do_sync;
1049
1050 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1051 lock_snap_rwsem = true;
1052 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1053 spin_unlock(&ci->i_ceph_lock);
1054 down_read(&mdsc->snap_rwsem);
1055 spin_lock(&ci->i_ceph_lock);
1056 goto retry;
1057 }
1058 }
1059
1060 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
1061 __build_xattrs(inode);
1062
1063 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1064
1065 if (!ci->i_xattrs.prealloc_blob ||
1066 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1067 struct ceph_buffer *blob;
1068
1069 spin_unlock(&ci->i_ceph_lock);
1070 dout(" preaallocating new blob size=%d\n", required_blob_size);
1071 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1072 if (!blob)
1073 goto do_sync_unlocked;
1074 spin_lock(&ci->i_ceph_lock);
1075 if (ci->i_xattrs.prealloc_blob)
1076 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
1077 ci->i_xattrs.prealloc_blob = blob;
1078 goto retry;
1079 }
1080
1081 err = __set_xattr(ci, newname, name_len, newval, val_len,
1082 flags, value ? 1 : -1, &xattr);
1083
1084 if (!err) {
1085 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1086 &prealloc_cf);
1087 ci->i_xattrs.dirty = true;
1088 inode->i_ctime = current_time(inode);
1089 }
1090
1091 spin_unlock(&ci->i_ceph_lock);
1092 if (lock_snap_rwsem)
1093 up_read(&mdsc->snap_rwsem);
1094 if (dirty)
1095 __mark_inode_dirty(inode, dirty);
1096 ceph_free_cap_flush(prealloc_cf);
1097 return err;
1098
1099 do_sync:
1100 spin_unlock(&ci->i_ceph_lock);
1101 do_sync_unlocked:
1102 if (lock_snap_rwsem)
1103 up_read(&mdsc->snap_rwsem);
1104
1105 /* security module set xattr while filling trace */
1106 if (current->journal_info) {
1107 pr_warn_ratelimited("sync setxattr %p "
1108 "during filling trace\n", inode);
1109 err = -EBUSY;
1110 } else {
1111 err = ceph_sync_setxattr(inode, name, value, size, flags);
1112 }
1113 out:
1114 ceph_free_cap_flush(prealloc_cf);
1115 kfree(newname);
1116 kfree(newval);
1117 kfree(xattr);
1118 return err;
1119 }
1120
1121 static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1122 struct dentry *dentry, struct inode *inode,
1123 const char *name, void *value, size_t size)
1124 {
1125 if (!ceph_is_valid_xattr(name))
1126 return -EOPNOTSUPP;
1127 return __ceph_getxattr(inode, name, value, size);
1128 }
1129
1130 static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1131 struct dentry *unused, struct inode *inode,
1132 const char *name, const void *value,
1133 size_t size, int flags)
1134 {
1135 if (!ceph_is_valid_xattr(name))
1136 return -EOPNOTSUPP;
1137 return __ceph_setxattr(inode, name, value, size, flags);
1138 }
1139
1140 static const struct xattr_handler ceph_other_xattr_handler = {
1141 .prefix = "", /* match any name => handlers called with full name */
1142 .get = ceph_get_xattr_handler,
1143 .set = ceph_set_xattr_handler,
1144 };
1145
1146 #ifdef CONFIG_SECURITY
1147 bool ceph_security_xattr_wanted(struct inode *in)
1148 {
1149 return in->i_security != NULL;
1150 }
1151
1152 bool ceph_security_xattr_deadlock(struct inode *in)
1153 {
1154 struct ceph_inode_info *ci;
1155 bool ret;
1156 if (!in->i_security)
1157 return false;
1158 ci = ceph_inode(in);
1159 spin_lock(&ci->i_ceph_lock);
1160 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1161 !(ci->i_xattrs.version > 0 &&
1162 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1163 spin_unlock(&ci->i_ceph_lock);
1164 return ret;
1165 }
1166 #endif