]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ceph/xattr.c
x86/speculation: Rework speculative_store_bypass_update()
[mirror_ubuntu-artful-kernel.git] / fs / ceph / xattr.c
1 #include <linux/ceph/ceph_debug.h>
2 #include <linux/ceph/pagelist.h>
3
4 #include "super.h"
5 #include "mds_client.h"
6
7 #include <linux/ceph/decode.h>
8
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
12
13 #define XATTR_CEPH_PREFIX "ceph."
14 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
15
16 static int __remove_xattr(struct ceph_inode_info *ci,
17 struct ceph_inode_xattr *xattr);
18
19 static const struct xattr_handler ceph_other_xattr_handler;
20
21 /*
22 * List of handlers for synthetic system.* attributes. Other
23 * attributes are handled directly.
24 */
25 const struct xattr_handler *ceph_xattr_handlers[] = {
26 #ifdef CONFIG_CEPH_FS_POSIX_ACL
27 &posix_acl_access_xattr_handler,
28 &posix_acl_default_xattr_handler,
29 #endif
30 &ceph_other_xattr_handler,
31 NULL,
32 };
33
34 static bool ceph_is_valid_xattr(const char *name)
35 {
36 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
37 !strncmp(name, XATTR_SECURITY_PREFIX,
38 XATTR_SECURITY_PREFIX_LEN) ||
39 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
40 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
41 }
42
43 /*
44 * These define virtual xattrs exposing the recursive directory
45 * statistics and layout metadata.
46 */
47 struct ceph_vxattr {
48 char *name;
49 size_t name_size; /* strlen(name) + 1 (for '\0') */
50 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
51 size_t size);
52 bool readonly, hidden;
53 bool (*exists_cb)(struct ceph_inode_info *ci);
54 };
55
56 /* layouts */
57
58 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
59 {
60 struct ceph_file_layout *fl = &ci->i_layout;
61 return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
62 fl->object_size > 0 || fl->pool_id >= 0 ||
63 rcu_dereference_raw(fl->pool_ns) != NULL);
64 }
65
66 static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
67 size_t size)
68 {
69 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
70 struct ceph_osd_client *osdc = &fsc->client->osdc;
71 struct ceph_string *pool_ns;
72 s64 pool = ci->i_layout.pool_id;
73 const char *pool_name;
74 const char *ns_field = " pool_namespace=";
75 char buf[128];
76 size_t len, total_len = 0;
77 int ret;
78
79 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
80
81 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
82 down_read(&osdc->lock);
83 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
84 if (pool_name) {
85 len = snprintf(buf, sizeof(buf),
86 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
87 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
88 ci->i_layout.object_size);
89 total_len = len + strlen(pool_name);
90 } else {
91 len = snprintf(buf, sizeof(buf),
92 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
93 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
94 ci->i_layout.object_size, (unsigned long long)pool);
95 total_len = len;
96 }
97
98 if (pool_ns)
99 total_len += strlen(ns_field) + pool_ns->len;
100
101 if (!size) {
102 ret = total_len;
103 } else if (total_len > size) {
104 ret = -ERANGE;
105 } else {
106 memcpy(val, buf, len);
107 ret = len;
108 if (pool_name) {
109 len = strlen(pool_name);
110 memcpy(val + ret, pool_name, len);
111 ret += len;
112 }
113 if (pool_ns) {
114 len = strlen(ns_field);
115 memcpy(val + ret, ns_field, len);
116 ret += len;
117 memcpy(val + ret, pool_ns->str, pool_ns->len);
118 ret += pool_ns->len;
119 }
120 }
121 up_read(&osdc->lock);
122 ceph_put_string(pool_ns);
123 return ret;
124 }
125
126 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
127 char *val, size_t size)
128 {
129 return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
130 }
131
132 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
133 char *val, size_t size)
134 {
135 return snprintf(val, size, "%u", ci->i_layout.stripe_count);
136 }
137
138 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
139 char *val, size_t size)
140 {
141 return snprintf(val, size, "%u", ci->i_layout.object_size);
142 }
143
144 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
145 char *val, size_t size)
146 {
147 int ret;
148 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
149 struct ceph_osd_client *osdc = &fsc->client->osdc;
150 s64 pool = ci->i_layout.pool_id;
151 const char *pool_name;
152
153 down_read(&osdc->lock);
154 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
155 if (pool_name)
156 ret = snprintf(val, size, "%s", pool_name);
157 else
158 ret = snprintf(val, size, "%lld", (unsigned long long)pool);
159 up_read(&osdc->lock);
160 return ret;
161 }
162
163 static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
164 char *val, size_t size)
165 {
166 int ret = 0;
167 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
168 if (ns) {
169 ret = snprintf(val, size, "%.*s", (int)ns->len, ns->str);
170 ceph_put_string(ns);
171 }
172 return ret;
173 }
174
175 /* directories */
176
177 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
178 size_t size)
179 {
180 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
181 }
182
183 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
184 size_t size)
185 {
186 return snprintf(val, size, "%lld", ci->i_files);
187 }
188
189 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
190 size_t size)
191 {
192 return snprintf(val, size, "%lld", ci->i_subdirs);
193 }
194
195 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
196 size_t size)
197 {
198 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
199 }
200
201 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
202 size_t size)
203 {
204 return snprintf(val, size, "%lld", ci->i_rfiles);
205 }
206
207 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
208 size_t size)
209 {
210 return snprintf(val, size, "%lld", ci->i_rsubdirs);
211 }
212
213 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
214 size_t size)
215 {
216 return snprintf(val, size, "%lld", ci->i_rbytes);
217 }
218
219 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
220 size_t size)
221 {
222 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
223 (long)ci->i_rctime.tv_nsec);
224 }
225
226
227 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
228 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
229 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
230
231 #define XATTR_NAME_CEPH(_type, _name) \
232 { \
233 .name = CEPH_XATTR_NAME(_type, _name), \
234 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
235 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
236 .readonly = true, \
237 .hidden = false, \
238 .exists_cb = NULL, \
239 }
240 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
241 { \
242 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
243 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
244 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
245 .readonly = false, \
246 .hidden = true, \
247 .exists_cb = ceph_vxattrcb_layout_exists, \
248 }
249
250 static struct ceph_vxattr ceph_dir_vxattrs[] = {
251 {
252 .name = "ceph.dir.layout",
253 .name_size = sizeof("ceph.dir.layout"),
254 .getxattr_cb = ceph_vxattrcb_layout,
255 .readonly = false,
256 .hidden = true,
257 .exists_cb = ceph_vxattrcb_layout_exists,
258 },
259 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
260 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
261 XATTR_LAYOUT_FIELD(dir, layout, object_size),
262 XATTR_LAYOUT_FIELD(dir, layout, pool),
263 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
264 XATTR_NAME_CEPH(dir, entries),
265 XATTR_NAME_CEPH(dir, files),
266 XATTR_NAME_CEPH(dir, subdirs),
267 XATTR_NAME_CEPH(dir, rentries),
268 XATTR_NAME_CEPH(dir, rfiles),
269 XATTR_NAME_CEPH(dir, rsubdirs),
270 XATTR_NAME_CEPH(dir, rbytes),
271 XATTR_NAME_CEPH(dir, rctime),
272 { .name = NULL, 0 } /* Required table terminator */
273 };
274 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
275
276 /* files */
277
278 static struct ceph_vxattr ceph_file_vxattrs[] = {
279 {
280 .name = "ceph.file.layout",
281 .name_size = sizeof("ceph.file.layout"),
282 .getxattr_cb = ceph_vxattrcb_layout,
283 .readonly = false,
284 .hidden = true,
285 .exists_cb = ceph_vxattrcb_layout_exists,
286 },
287 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
288 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
289 XATTR_LAYOUT_FIELD(file, layout, object_size),
290 XATTR_LAYOUT_FIELD(file, layout, pool),
291 XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
292 { .name = NULL, 0 } /* Required table terminator */
293 };
294 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
295
296 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
297 {
298 if (S_ISDIR(inode->i_mode))
299 return ceph_dir_vxattrs;
300 else if (S_ISREG(inode->i_mode))
301 return ceph_file_vxattrs;
302 return NULL;
303 }
304
305 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
306 {
307 if (vxattrs == ceph_dir_vxattrs)
308 return ceph_dir_vxattrs_name_size;
309 if (vxattrs == ceph_file_vxattrs)
310 return ceph_file_vxattrs_name_size;
311 BUG_ON(vxattrs);
312 return 0;
313 }
314
315 /*
316 * Compute the aggregate size (including terminating '\0') of all
317 * virtual extended attribute names in the given vxattr table.
318 */
319 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
320 {
321 struct ceph_vxattr *vxattr;
322 size_t size = 0;
323
324 for (vxattr = vxattrs; vxattr->name; vxattr++)
325 if (!vxattr->hidden)
326 size += vxattr->name_size;
327
328 return size;
329 }
330
331 /* Routines called at initialization and exit time */
332
333 void __init ceph_xattr_init(void)
334 {
335 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
336 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
337 }
338
339 void ceph_xattr_exit(void)
340 {
341 ceph_dir_vxattrs_name_size = 0;
342 ceph_file_vxattrs_name_size = 0;
343 }
344
345 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
346 const char *name)
347 {
348 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
349
350 if (vxattr) {
351 while (vxattr->name) {
352 if (!strcmp(vxattr->name, name))
353 return vxattr;
354 vxattr++;
355 }
356 }
357
358 return NULL;
359 }
360
361 static int __set_xattr(struct ceph_inode_info *ci,
362 const char *name, int name_len,
363 const char *val, int val_len,
364 int flags, int update_xattr,
365 struct ceph_inode_xattr **newxattr)
366 {
367 struct rb_node **p;
368 struct rb_node *parent = NULL;
369 struct ceph_inode_xattr *xattr = NULL;
370 int c;
371 int new = 0;
372
373 p = &ci->i_xattrs.index.rb_node;
374 while (*p) {
375 parent = *p;
376 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
377 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
378 if (c < 0)
379 p = &(*p)->rb_left;
380 else if (c > 0)
381 p = &(*p)->rb_right;
382 else {
383 if (name_len == xattr->name_len)
384 break;
385 else if (name_len < xattr->name_len)
386 p = &(*p)->rb_left;
387 else
388 p = &(*p)->rb_right;
389 }
390 xattr = NULL;
391 }
392
393 if (update_xattr) {
394 int err = 0;
395
396 if (xattr && (flags & XATTR_CREATE))
397 err = -EEXIST;
398 else if (!xattr && (flags & XATTR_REPLACE))
399 err = -ENODATA;
400 if (err) {
401 kfree(name);
402 kfree(val);
403 kfree(*newxattr);
404 return err;
405 }
406 if (update_xattr < 0) {
407 if (xattr)
408 __remove_xattr(ci, xattr);
409 kfree(name);
410 kfree(*newxattr);
411 return 0;
412 }
413 }
414
415 if (!xattr) {
416 new = 1;
417 xattr = *newxattr;
418 xattr->name = name;
419 xattr->name_len = name_len;
420 xattr->should_free_name = update_xattr;
421
422 ci->i_xattrs.count++;
423 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
424 } else {
425 kfree(*newxattr);
426 *newxattr = NULL;
427 if (xattr->should_free_val)
428 kfree((void *)xattr->val);
429
430 if (update_xattr) {
431 kfree((void *)name);
432 name = xattr->name;
433 }
434 ci->i_xattrs.names_size -= xattr->name_len;
435 ci->i_xattrs.vals_size -= xattr->val_len;
436 }
437 ci->i_xattrs.names_size += name_len;
438 ci->i_xattrs.vals_size += val_len;
439 if (val)
440 xattr->val = val;
441 else
442 xattr->val = "";
443
444 xattr->val_len = val_len;
445 xattr->dirty = update_xattr;
446 xattr->should_free_val = (val && update_xattr);
447
448 if (new) {
449 rb_link_node(&xattr->node, parent, p);
450 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
451 dout("__set_xattr_val p=%p\n", p);
452 }
453
454 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
455 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
456
457 return 0;
458 }
459
460 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
461 const char *name)
462 {
463 struct rb_node **p;
464 struct rb_node *parent = NULL;
465 struct ceph_inode_xattr *xattr = NULL;
466 int name_len = strlen(name);
467 int c;
468
469 p = &ci->i_xattrs.index.rb_node;
470 while (*p) {
471 parent = *p;
472 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
473 c = strncmp(name, xattr->name, xattr->name_len);
474 if (c == 0 && name_len > xattr->name_len)
475 c = 1;
476 if (c < 0)
477 p = &(*p)->rb_left;
478 else if (c > 0)
479 p = &(*p)->rb_right;
480 else {
481 dout("__get_xattr %s: found %.*s\n", name,
482 xattr->val_len, xattr->val);
483 return xattr;
484 }
485 }
486
487 dout("__get_xattr %s: not found\n", name);
488
489 return NULL;
490 }
491
492 static void __free_xattr(struct ceph_inode_xattr *xattr)
493 {
494 BUG_ON(!xattr);
495
496 if (xattr->should_free_name)
497 kfree((void *)xattr->name);
498 if (xattr->should_free_val)
499 kfree((void *)xattr->val);
500
501 kfree(xattr);
502 }
503
504 static int __remove_xattr(struct ceph_inode_info *ci,
505 struct ceph_inode_xattr *xattr)
506 {
507 if (!xattr)
508 return -ENODATA;
509
510 rb_erase(&xattr->node, &ci->i_xattrs.index);
511
512 if (xattr->should_free_name)
513 kfree((void *)xattr->name);
514 if (xattr->should_free_val)
515 kfree((void *)xattr->val);
516
517 ci->i_xattrs.names_size -= xattr->name_len;
518 ci->i_xattrs.vals_size -= xattr->val_len;
519 ci->i_xattrs.count--;
520 kfree(xattr);
521
522 return 0;
523 }
524
525 static char *__copy_xattr_names(struct ceph_inode_info *ci,
526 char *dest)
527 {
528 struct rb_node *p;
529 struct ceph_inode_xattr *xattr = NULL;
530
531 p = rb_first(&ci->i_xattrs.index);
532 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
533
534 while (p) {
535 xattr = rb_entry(p, struct ceph_inode_xattr, node);
536 memcpy(dest, xattr->name, xattr->name_len);
537 dest[xattr->name_len] = '\0';
538
539 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
540 xattr->name_len, ci->i_xattrs.names_size);
541
542 dest += xattr->name_len + 1;
543 p = rb_next(p);
544 }
545
546 return dest;
547 }
548
549 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
550 {
551 struct rb_node *p, *tmp;
552 struct ceph_inode_xattr *xattr = NULL;
553
554 p = rb_first(&ci->i_xattrs.index);
555
556 dout("__ceph_destroy_xattrs p=%p\n", p);
557
558 while (p) {
559 xattr = rb_entry(p, struct ceph_inode_xattr, node);
560 tmp = p;
561 p = rb_next(tmp);
562 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
563 xattr->name_len, xattr->name);
564 rb_erase(tmp, &ci->i_xattrs.index);
565
566 __free_xattr(xattr);
567 }
568
569 ci->i_xattrs.names_size = 0;
570 ci->i_xattrs.vals_size = 0;
571 ci->i_xattrs.index_version = 0;
572 ci->i_xattrs.count = 0;
573 ci->i_xattrs.index = RB_ROOT;
574 }
575
576 static int __build_xattrs(struct inode *inode)
577 __releases(ci->i_ceph_lock)
578 __acquires(ci->i_ceph_lock)
579 {
580 u32 namelen;
581 u32 numattr = 0;
582 void *p, *end;
583 u32 len;
584 const char *name, *val;
585 struct ceph_inode_info *ci = ceph_inode(inode);
586 int xattr_version;
587 struct ceph_inode_xattr **xattrs = NULL;
588 int err = 0;
589 int i;
590
591 dout("__build_xattrs() len=%d\n",
592 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
593
594 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
595 return 0; /* already built */
596
597 __ceph_destroy_xattrs(ci);
598
599 start:
600 /* updated internal xattr rb tree */
601 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
602 p = ci->i_xattrs.blob->vec.iov_base;
603 end = p + ci->i_xattrs.blob->vec.iov_len;
604 ceph_decode_32_safe(&p, end, numattr, bad);
605 xattr_version = ci->i_xattrs.version;
606 spin_unlock(&ci->i_ceph_lock);
607
608 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
609 GFP_NOFS);
610 err = -ENOMEM;
611 if (!xattrs)
612 goto bad_lock;
613
614 for (i = 0; i < numattr; i++) {
615 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
616 GFP_NOFS);
617 if (!xattrs[i])
618 goto bad_lock;
619 }
620
621 spin_lock(&ci->i_ceph_lock);
622 if (ci->i_xattrs.version != xattr_version) {
623 /* lost a race, retry */
624 for (i = 0; i < numattr; i++)
625 kfree(xattrs[i]);
626 kfree(xattrs);
627 xattrs = NULL;
628 goto start;
629 }
630 err = -EIO;
631 while (numattr--) {
632 ceph_decode_32_safe(&p, end, len, bad);
633 namelen = len;
634 name = p;
635 p += len;
636 ceph_decode_32_safe(&p, end, len, bad);
637 val = p;
638 p += len;
639
640 err = __set_xattr(ci, name, namelen, val, len,
641 0, 0, &xattrs[numattr]);
642
643 if (err < 0)
644 goto bad;
645 }
646 kfree(xattrs);
647 }
648 ci->i_xattrs.index_version = ci->i_xattrs.version;
649 ci->i_xattrs.dirty = false;
650
651 return err;
652 bad_lock:
653 spin_lock(&ci->i_ceph_lock);
654 bad:
655 if (xattrs) {
656 for (i = 0; i < numattr; i++)
657 kfree(xattrs[i]);
658 kfree(xattrs);
659 }
660 ci->i_xattrs.names_size = 0;
661 return err;
662 }
663
664 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
665 int val_size)
666 {
667 /*
668 * 4 bytes for the length, and additional 4 bytes per each xattr name,
669 * 4 bytes per each value
670 */
671 int size = 4 + ci->i_xattrs.count*(4 + 4) +
672 ci->i_xattrs.names_size +
673 ci->i_xattrs.vals_size;
674 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
675 ci->i_xattrs.count, ci->i_xattrs.names_size,
676 ci->i_xattrs.vals_size);
677
678 if (name_size)
679 size += 4 + 4 + name_size + val_size;
680
681 return size;
682 }
683
684 /*
685 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
686 * and swap into place.
687 */
688 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
689 {
690 struct rb_node *p;
691 struct ceph_inode_xattr *xattr = NULL;
692 void *dest;
693
694 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
695 if (ci->i_xattrs.dirty) {
696 int need = __get_required_blob_size(ci, 0, 0);
697
698 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
699
700 p = rb_first(&ci->i_xattrs.index);
701 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
702
703 ceph_encode_32(&dest, ci->i_xattrs.count);
704 while (p) {
705 xattr = rb_entry(p, struct ceph_inode_xattr, node);
706
707 ceph_encode_32(&dest, xattr->name_len);
708 memcpy(dest, xattr->name, xattr->name_len);
709 dest += xattr->name_len;
710 ceph_encode_32(&dest, xattr->val_len);
711 memcpy(dest, xattr->val, xattr->val_len);
712 dest += xattr->val_len;
713
714 p = rb_next(p);
715 }
716
717 /* adjust buffer len; it may be larger than we need */
718 ci->i_xattrs.prealloc_blob->vec.iov_len =
719 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
720
721 if (ci->i_xattrs.blob)
722 ceph_buffer_put(ci->i_xattrs.blob);
723 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
724 ci->i_xattrs.prealloc_blob = NULL;
725 ci->i_xattrs.dirty = false;
726 ci->i_xattrs.version++;
727 }
728 }
729
730 static inline int __get_request_mask(struct inode *in) {
731 struct ceph_mds_request *req = current->journal_info;
732 int mask = 0;
733 if (req && req->r_target_inode == in) {
734 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
735 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
736 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
737 req->r_op == CEPH_MDS_OP_GETATTR) {
738 mask = le32_to_cpu(req->r_args.getattr.mask);
739 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
740 req->r_op == CEPH_MDS_OP_CREATE) {
741 mask = le32_to_cpu(req->r_args.open.mask);
742 }
743 }
744 return mask;
745 }
746
747 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
748 size_t size)
749 {
750 struct ceph_inode_info *ci = ceph_inode(inode);
751 struct ceph_inode_xattr *xattr;
752 struct ceph_vxattr *vxattr = NULL;
753 int req_mask;
754 int err;
755
756 /* let's see if a virtual xattr was requested */
757 vxattr = ceph_match_vxattr(inode, name);
758 if (vxattr) {
759 err = ceph_do_getattr(inode, 0, true);
760 if (err)
761 return err;
762 err = -ENODATA;
763 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
764 err = vxattr->getxattr_cb(ci, value, size);
765 return err;
766 }
767
768 req_mask = __get_request_mask(inode);
769
770 spin_lock(&ci->i_ceph_lock);
771 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
772 ci->i_xattrs.version, ci->i_xattrs.index_version);
773
774 if (ci->i_xattrs.version == 0 ||
775 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
776 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
777 spin_unlock(&ci->i_ceph_lock);
778
779 /* security module gets xattr while filling trace */
780 if (current->journal_info != NULL) {
781 pr_warn_ratelimited("sync getxattr %p "
782 "during filling trace\n", inode);
783 return -EBUSY;
784 }
785
786 /* get xattrs from mds (if we don't already have them) */
787 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
788 if (err)
789 return err;
790 spin_lock(&ci->i_ceph_lock);
791 }
792
793 err = __build_xattrs(inode);
794 if (err < 0)
795 goto out;
796
797 err = -ENODATA; /* == ENOATTR */
798 xattr = __get_xattr(ci, name);
799 if (!xattr)
800 goto out;
801
802 err = -ERANGE;
803 if (size && size < xattr->val_len)
804 goto out;
805
806 err = xattr->val_len;
807 if (size == 0)
808 goto out;
809
810 memcpy(value, xattr->val, xattr->val_len);
811
812 if (current->journal_info != NULL &&
813 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
814 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
815 out:
816 spin_unlock(&ci->i_ceph_lock);
817 return err;
818 }
819
820 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
821 {
822 struct inode *inode = d_inode(dentry);
823 struct ceph_inode_info *ci = ceph_inode(inode);
824 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
825 u32 vir_namelen = 0;
826 u32 namelen;
827 int err;
828 u32 len;
829 int i;
830
831 spin_lock(&ci->i_ceph_lock);
832 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
833 ci->i_xattrs.version, ci->i_xattrs.index_version);
834
835 if (ci->i_xattrs.version == 0 ||
836 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
837 spin_unlock(&ci->i_ceph_lock);
838 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
839 if (err)
840 return err;
841 spin_lock(&ci->i_ceph_lock);
842 }
843
844 err = __build_xattrs(inode);
845 if (err < 0)
846 goto out;
847 /*
848 * Start with virtual dir xattr names (if any) (including
849 * terminating '\0' characters for each).
850 */
851 vir_namelen = ceph_vxattrs_name_size(vxattrs);
852
853 /* adding 1 byte per each variable due to the null termination */
854 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
855 err = -ERANGE;
856 if (size && vir_namelen + namelen > size)
857 goto out;
858
859 err = namelen + vir_namelen;
860 if (size == 0)
861 goto out;
862
863 names = __copy_xattr_names(ci, names);
864
865 /* virtual xattr names, too */
866 err = namelen;
867 if (vxattrs) {
868 for (i = 0; vxattrs[i].name; i++) {
869 if (!vxattrs[i].hidden &&
870 !(vxattrs[i].exists_cb &&
871 !vxattrs[i].exists_cb(ci))) {
872 len = sprintf(names, "%s", vxattrs[i].name);
873 names += len + 1;
874 err += len + 1;
875 }
876 }
877 }
878
879 out:
880 spin_unlock(&ci->i_ceph_lock);
881 return err;
882 }
883
884 static int ceph_sync_setxattr(struct inode *inode, const char *name,
885 const char *value, size_t size, int flags)
886 {
887 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
888 struct ceph_inode_info *ci = ceph_inode(inode);
889 struct ceph_mds_request *req;
890 struct ceph_mds_client *mdsc = fsc->mdsc;
891 struct ceph_pagelist *pagelist = NULL;
892 int op = CEPH_MDS_OP_SETXATTR;
893 int err;
894
895 if (size > 0) {
896 /* copy value into pagelist */
897 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
898 if (!pagelist)
899 return -ENOMEM;
900
901 ceph_pagelist_init(pagelist);
902 err = ceph_pagelist_append(pagelist, value, size);
903 if (err)
904 goto out;
905 } else if (!value) {
906 if (flags & CEPH_XATTR_REPLACE)
907 op = CEPH_MDS_OP_RMXATTR;
908 else
909 flags |= CEPH_XATTR_REMOVE;
910 }
911
912 dout("setxattr value=%.*s\n", (int)size, value);
913
914 /* do request */
915 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
916 if (IS_ERR(req)) {
917 err = PTR_ERR(req);
918 goto out;
919 }
920
921 req->r_path2 = kstrdup(name, GFP_NOFS);
922 if (!req->r_path2) {
923 ceph_mdsc_put_request(req);
924 err = -ENOMEM;
925 goto out;
926 }
927
928 if (op == CEPH_MDS_OP_SETXATTR) {
929 req->r_args.setxattr.flags = cpu_to_le32(flags);
930 req->r_pagelist = pagelist;
931 pagelist = NULL;
932 }
933
934 req->r_inode = inode;
935 ihold(inode);
936 req->r_num_caps = 1;
937 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
938
939 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
940 err = ceph_mdsc_do_request(mdsc, NULL, req);
941 ceph_mdsc_put_request(req);
942 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
943
944 out:
945 if (pagelist)
946 ceph_pagelist_release(pagelist);
947 return err;
948 }
949
950 int __ceph_setxattr(struct inode *inode, const char *name,
951 const void *value, size_t size, int flags)
952 {
953 struct ceph_vxattr *vxattr;
954 struct ceph_inode_info *ci = ceph_inode(inode);
955 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
956 struct ceph_cap_flush *prealloc_cf = NULL;
957 int issued;
958 int err;
959 int dirty = 0;
960 int name_len = strlen(name);
961 int val_len = size;
962 char *newname = NULL;
963 char *newval = NULL;
964 struct ceph_inode_xattr *xattr = NULL;
965 int required_blob_size;
966 bool lock_snap_rwsem = false;
967
968 if (ceph_snap(inode) != CEPH_NOSNAP)
969 return -EROFS;
970
971 vxattr = ceph_match_vxattr(inode, name);
972 if (vxattr && vxattr->readonly)
973 return -EOPNOTSUPP;
974
975 /* pass any unhandled ceph.* xattrs through to the MDS */
976 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
977 goto do_sync_unlocked;
978
979 /* preallocate memory for xattr name, value, index node */
980 err = -ENOMEM;
981 newname = kmemdup(name, name_len + 1, GFP_NOFS);
982 if (!newname)
983 goto out;
984
985 if (val_len) {
986 newval = kmemdup(value, val_len, GFP_NOFS);
987 if (!newval)
988 goto out;
989 }
990
991 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
992 if (!xattr)
993 goto out;
994
995 prealloc_cf = ceph_alloc_cap_flush();
996 if (!prealloc_cf)
997 goto out;
998
999 spin_lock(&ci->i_ceph_lock);
1000 retry:
1001 issued = __ceph_caps_issued(ci, NULL);
1002 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
1003 goto do_sync;
1004
1005 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1006 lock_snap_rwsem = true;
1007 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1008 spin_unlock(&ci->i_ceph_lock);
1009 down_read(&mdsc->snap_rwsem);
1010 spin_lock(&ci->i_ceph_lock);
1011 goto retry;
1012 }
1013 }
1014
1015 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
1016 __build_xattrs(inode);
1017
1018 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1019
1020 if (!ci->i_xattrs.prealloc_blob ||
1021 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1022 struct ceph_buffer *blob;
1023
1024 spin_unlock(&ci->i_ceph_lock);
1025 dout(" preaallocating new blob size=%d\n", required_blob_size);
1026 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1027 if (!blob)
1028 goto do_sync_unlocked;
1029 spin_lock(&ci->i_ceph_lock);
1030 if (ci->i_xattrs.prealloc_blob)
1031 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
1032 ci->i_xattrs.prealloc_blob = blob;
1033 goto retry;
1034 }
1035
1036 err = __set_xattr(ci, newname, name_len, newval, val_len,
1037 flags, value ? 1 : -1, &xattr);
1038
1039 if (!err) {
1040 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1041 &prealloc_cf);
1042 ci->i_xattrs.dirty = true;
1043 inode->i_ctime = current_time(inode);
1044 }
1045
1046 spin_unlock(&ci->i_ceph_lock);
1047 if (lock_snap_rwsem)
1048 up_read(&mdsc->snap_rwsem);
1049 if (dirty)
1050 __mark_inode_dirty(inode, dirty);
1051 ceph_free_cap_flush(prealloc_cf);
1052 return err;
1053
1054 do_sync:
1055 spin_unlock(&ci->i_ceph_lock);
1056 do_sync_unlocked:
1057 if (lock_snap_rwsem)
1058 up_read(&mdsc->snap_rwsem);
1059
1060 /* security module set xattr while filling trace */
1061 if (current->journal_info != NULL) {
1062 pr_warn_ratelimited("sync setxattr %p "
1063 "during filling trace\n", inode);
1064 err = -EBUSY;
1065 } else {
1066 err = ceph_sync_setxattr(inode, name, value, size, flags);
1067 }
1068 out:
1069 ceph_free_cap_flush(prealloc_cf);
1070 kfree(newname);
1071 kfree(newval);
1072 kfree(xattr);
1073 return err;
1074 }
1075
1076 static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1077 struct dentry *dentry, struct inode *inode,
1078 const char *name, void *value, size_t size)
1079 {
1080 if (!ceph_is_valid_xattr(name))
1081 return -EOPNOTSUPP;
1082 return __ceph_getxattr(inode, name, value, size);
1083 }
1084
1085 static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1086 struct dentry *unused, struct inode *inode,
1087 const char *name, const void *value,
1088 size_t size, int flags)
1089 {
1090 if (!ceph_is_valid_xattr(name))
1091 return -EOPNOTSUPP;
1092 return __ceph_setxattr(inode, name, value, size, flags);
1093 }
1094
1095 static const struct xattr_handler ceph_other_xattr_handler = {
1096 .prefix = "", /* match any name => handlers called with full name */
1097 .get = ceph_get_xattr_handler,
1098 .set = ceph_set_xattr_handler,
1099 };
1100
1101 #ifdef CONFIG_SECURITY
1102 bool ceph_security_xattr_wanted(struct inode *in)
1103 {
1104 return in->i_security != NULL;
1105 }
1106
1107 bool ceph_security_xattr_deadlock(struct inode *in)
1108 {
1109 struct ceph_inode_info *ci;
1110 bool ret;
1111 if (in->i_security == NULL)
1112 return false;
1113 ci = ceph_inode(in);
1114 spin_lock(&ci->i_ceph_lock);
1115 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1116 !(ci->i_xattrs.version > 0 &&
1117 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1118 spin_unlock(&ci->i_ceph_lock);
1119 return ret;
1120 }
1121 #endif