]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/ceph/xattr.c
UBUNTU: SAUCE: LSM: Ensure the correct LSM context releaser
[mirror_ubuntu-jammy-kernel.git] / fs / ceph / xattr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/pagelist.h>
4
5 #include "super.h"
6 #include "mds_client.h"
7
8 #include <linux/ceph/decode.h>
9
10 #include <linux/xattr.h>
11 #include <linux/security.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/slab.h>
14
15 #define XATTR_CEPH_PREFIX "ceph."
16 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
17
18 static int __remove_xattr(struct ceph_inode_info *ci,
19 struct ceph_inode_xattr *xattr);
20
21 static bool ceph_is_valid_xattr(const char *name)
22 {
23 return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) ||
24 !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
25 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
26 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
27 }
28
29 /*
30 * These define virtual xattrs exposing the recursive directory
31 * statistics and layout metadata.
32 */
33 struct ceph_vxattr {
34 char *name;
35 size_t name_size; /* strlen(name) + 1 (for '\0') */
36 ssize_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
37 size_t size);
38 bool (*exists_cb)(struct ceph_inode_info *ci);
39 unsigned int flags;
40 };
41
42 #define VXATTR_FLAG_READONLY (1<<0)
43 #define VXATTR_FLAG_HIDDEN (1<<1)
44 #define VXATTR_FLAG_RSTAT (1<<2)
45 #define VXATTR_FLAG_DIRSTAT (1<<3)
46
47 /* layouts */
48
49 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
50 {
51 struct ceph_file_layout *fl = &ci->i_layout;
52 return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
53 fl->object_size > 0 || fl->pool_id >= 0 ||
54 rcu_dereference_raw(fl->pool_ns) != NULL);
55 }
56
57 static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
58 size_t size)
59 {
60 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
61 struct ceph_osd_client *osdc = &fsc->client->osdc;
62 struct ceph_string *pool_ns;
63 s64 pool = ci->i_layout.pool_id;
64 const char *pool_name;
65 const char *ns_field = " pool_namespace=";
66 char buf[128];
67 size_t len, total_len = 0;
68 ssize_t ret;
69
70 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
71
72 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
73 down_read(&osdc->lock);
74 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
75 if (pool_name) {
76 len = snprintf(buf, sizeof(buf),
77 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
78 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
79 ci->i_layout.object_size);
80 total_len = len + strlen(pool_name);
81 } else {
82 len = snprintf(buf, sizeof(buf),
83 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
84 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
85 ci->i_layout.object_size, pool);
86 total_len = len;
87 }
88
89 if (pool_ns)
90 total_len += strlen(ns_field) + pool_ns->len;
91
92 ret = total_len;
93 if (size >= total_len) {
94 memcpy(val, buf, len);
95 ret = len;
96 if (pool_name) {
97 len = strlen(pool_name);
98 memcpy(val + ret, pool_name, len);
99 ret += len;
100 }
101 if (pool_ns) {
102 len = strlen(ns_field);
103 memcpy(val + ret, ns_field, len);
104 ret += len;
105 memcpy(val + ret, pool_ns->str, pool_ns->len);
106 ret += pool_ns->len;
107 }
108 }
109 up_read(&osdc->lock);
110 ceph_put_string(pool_ns);
111 return ret;
112 }
113
114 /*
115 * The convention with strings in xattrs is that they should not be NULL
116 * terminated, since we're returning the length with them. snprintf always
117 * NULL terminates however, so call it on a temporary buffer and then memcpy
118 * the result into place.
119 */
120 static __printf(3, 4)
121 int ceph_fmt_xattr(char *val, size_t size, const char *fmt, ...)
122 {
123 int ret;
124 va_list args;
125 char buf[96]; /* NB: reevaluate size if new vxattrs are added */
126
127 va_start(args, fmt);
128 ret = vsnprintf(buf, size ? sizeof(buf) : 0, fmt, args);
129 va_end(args);
130
131 /* Sanity check */
132 if (size && ret + 1 > sizeof(buf)) {
133 WARN_ONCE(true, "Returned length too big (%d)", ret);
134 return -E2BIG;
135 }
136
137 if (ret <= size)
138 memcpy(val, buf, ret);
139 return ret;
140 }
141
142 static ssize_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
143 char *val, size_t size)
144 {
145 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_unit);
146 }
147
148 static ssize_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
149 char *val, size_t size)
150 {
151 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_count);
152 }
153
154 static ssize_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
155 char *val, size_t size)
156 {
157 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.object_size);
158 }
159
160 static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
161 char *val, size_t size)
162 {
163 ssize_t ret;
164 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
165 struct ceph_osd_client *osdc = &fsc->client->osdc;
166 s64 pool = ci->i_layout.pool_id;
167 const char *pool_name;
168
169 down_read(&osdc->lock);
170 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
171 if (pool_name) {
172 ret = strlen(pool_name);
173 if (ret <= size)
174 memcpy(val, pool_name, ret);
175 } else {
176 ret = ceph_fmt_xattr(val, size, "%lld", pool);
177 }
178 up_read(&osdc->lock);
179 return ret;
180 }
181
182 static ssize_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
183 char *val, size_t size)
184 {
185 ssize_t ret = 0;
186 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
187
188 if (ns) {
189 ret = ns->len;
190 if (ret <= size)
191 memcpy(val, ns->str, ret);
192 ceph_put_string(ns);
193 }
194 return ret;
195 }
196
197 /* directories */
198
199 static ssize_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
200 size_t size)
201 {
202 return ceph_fmt_xattr(val, size, "%lld", ci->i_files + ci->i_subdirs);
203 }
204
205 static ssize_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
206 size_t size)
207 {
208 return ceph_fmt_xattr(val, size, "%lld", ci->i_files);
209 }
210
211 static ssize_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
212 size_t size)
213 {
214 return ceph_fmt_xattr(val, size, "%lld", ci->i_subdirs);
215 }
216
217 static ssize_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
218 size_t size)
219 {
220 return ceph_fmt_xattr(val, size, "%lld",
221 ci->i_rfiles + ci->i_rsubdirs);
222 }
223
224 static ssize_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
225 size_t size)
226 {
227 return ceph_fmt_xattr(val, size, "%lld", ci->i_rfiles);
228 }
229
230 static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
231 size_t size)
232 {
233 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs);
234 }
235
236 static ssize_t ceph_vxattrcb_dir_rsnaps(struct ceph_inode_info *ci, char *val,
237 size_t size)
238 {
239 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsnaps);
240 }
241
242 static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
243 size_t size)
244 {
245 return ceph_fmt_xattr(val, size, "%lld", ci->i_rbytes);
246 }
247
248 static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
249 size_t size)
250 {
251 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
252 ci->i_rctime.tv_nsec);
253 }
254
255 /* dir pin */
256 static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci)
257 {
258 return ci->i_dir_pin != -ENODATA;
259 }
260
261 static ssize_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val,
262 size_t size)
263 {
264 return ceph_fmt_xattr(val, size, "%d", (int)ci->i_dir_pin);
265 }
266
267 /* quotas */
268 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
269 {
270 bool ret = false;
271 spin_lock(&ci->i_ceph_lock);
272 if ((ci->i_max_files || ci->i_max_bytes) &&
273 ci->i_vino.snap == CEPH_NOSNAP &&
274 ci->i_snap_realm &&
275 ci->i_snap_realm->ino == ci->i_vino.ino)
276 ret = true;
277 spin_unlock(&ci->i_ceph_lock);
278 return ret;
279 }
280
281 static ssize_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
282 size_t size)
283 {
284 return ceph_fmt_xattr(val, size, "max_bytes=%llu max_files=%llu",
285 ci->i_max_bytes, ci->i_max_files);
286 }
287
288 static ssize_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci,
289 char *val, size_t size)
290 {
291 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_bytes);
292 }
293
294 static ssize_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci,
295 char *val, size_t size)
296 {
297 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_files);
298 }
299
300 /* snapshots */
301 static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
302 {
303 return (ci->i_snap_btime.tv_sec != 0 || ci->i_snap_btime.tv_nsec != 0);
304 }
305
306 static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
307 size_t size)
308 {
309 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
310 ci->i_snap_btime.tv_nsec);
311 }
312
313 static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
314 char *val, size_t size)
315 {
316 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
317
318 return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
319 }
320
321 static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
322 char *val, size_t size)
323 {
324 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
325
326 return ceph_fmt_xattr(val, size, "client%lld",
327 ceph_client_gid(fsc->client));
328 }
329
330 static ssize_t ceph_vxattrcb_caps(struct ceph_inode_info *ci, char *val,
331 size_t size)
332 {
333 int issued;
334
335 spin_lock(&ci->i_ceph_lock);
336 issued = __ceph_caps_issued(ci, NULL);
337 spin_unlock(&ci->i_ceph_lock);
338
339 return ceph_fmt_xattr(val, size, "%s/0x%x",
340 ceph_cap_string(issued), issued);
341 }
342
343 static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
344 char *val, size_t size)
345 {
346 int ret;
347
348 spin_lock(&ci->i_ceph_lock);
349 ret = ceph_fmt_xattr(val, size, "%d",
350 ci->i_auth_cap ? ci->i_auth_cap->session->s_mds : -1);
351 spin_unlock(&ci->i_ceph_lock);
352 return ret;
353 }
354
355 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
356 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
357 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
358
359 #define XATTR_NAME_CEPH(_type, _name, _flags) \
360 { \
361 .name = CEPH_XATTR_NAME(_type, _name), \
362 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
363 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
364 .exists_cb = NULL, \
365 .flags = (VXATTR_FLAG_READONLY | _flags), \
366 }
367 #define XATTR_RSTAT_FIELD(_type, _name) \
368 XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
369 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
370 { \
371 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
372 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
373 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
374 .exists_cb = ceph_vxattrcb_layout_exists, \
375 .flags = VXATTR_FLAG_HIDDEN, \
376 }
377 #define XATTR_QUOTA_FIELD(_type, _name) \
378 { \
379 .name = CEPH_XATTR_NAME(_type, _name), \
380 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \
381 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
382 .exists_cb = ceph_vxattrcb_quota_exists, \
383 .flags = VXATTR_FLAG_HIDDEN, \
384 }
385
386 static struct ceph_vxattr ceph_dir_vxattrs[] = {
387 {
388 .name = "ceph.dir.layout",
389 .name_size = sizeof("ceph.dir.layout"),
390 .getxattr_cb = ceph_vxattrcb_layout,
391 .exists_cb = ceph_vxattrcb_layout_exists,
392 .flags = VXATTR_FLAG_HIDDEN,
393 },
394 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
395 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
396 XATTR_LAYOUT_FIELD(dir, layout, object_size),
397 XATTR_LAYOUT_FIELD(dir, layout, pool),
398 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
399 XATTR_NAME_CEPH(dir, entries, VXATTR_FLAG_DIRSTAT),
400 XATTR_NAME_CEPH(dir, files, VXATTR_FLAG_DIRSTAT),
401 XATTR_NAME_CEPH(dir, subdirs, VXATTR_FLAG_DIRSTAT),
402 XATTR_RSTAT_FIELD(dir, rentries),
403 XATTR_RSTAT_FIELD(dir, rfiles),
404 XATTR_RSTAT_FIELD(dir, rsubdirs),
405 XATTR_RSTAT_FIELD(dir, rsnaps),
406 XATTR_RSTAT_FIELD(dir, rbytes),
407 XATTR_RSTAT_FIELD(dir, rctime),
408 {
409 .name = "ceph.dir.pin",
410 .name_size = sizeof("ceph.dir.pin"),
411 .getxattr_cb = ceph_vxattrcb_dir_pin,
412 .exists_cb = ceph_vxattrcb_dir_pin_exists,
413 .flags = VXATTR_FLAG_HIDDEN,
414 },
415 {
416 .name = "ceph.quota",
417 .name_size = sizeof("ceph.quota"),
418 .getxattr_cb = ceph_vxattrcb_quota,
419 .exists_cb = ceph_vxattrcb_quota_exists,
420 .flags = VXATTR_FLAG_HIDDEN,
421 },
422 XATTR_QUOTA_FIELD(quota, max_bytes),
423 XATTR_QUOTA_FIELD(quota, max_files),
424 {
425 .name = "ceph.snap.btime",
426 .name_size = sizeof("ceph.snap.btime"),
427 .getxattr_cb = ceph_vxattrcb_snap_btime,
428 .exists_cb = ceph_vxattrcb_snap_btime_exists,
429 .flags = VXATTR_FLAG_READONLY,
430 },
431 {
432 .name = "ceph.caps",
433 .name_size = sizeof("ceph.caps"),
434 .getxattr_cb = ceph_vxattrcb_caps,
435 .exists_cb = NULL,
436 .flags = VXATTR_FLAG_HIDDEN,
437 },
438 { .name = NULL, 0 } /* Required table terminator */
439 };
440
441 /* files */
442
443 static struct ceph_vxattr ceph_file_vxattrs[] = {
444 {
445 .name = "ceph.file.layout",
446 .name_size = sizeof("ceph.file.layout"),
447 .getxattr_cb = ceph_vxattrcb_layout,
448 .exists_cb = ceph_vxattrcb_layout_exists,
449 .flags = VXATTR_FLAG_HIDDEN,
450 },
451 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
452 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
453 XATTR_LAYOUT_FIELD(file, layout, object_size),
454 XATTR_LAYOUT_FIELD(file, layout, pool),
455 XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
456 {
457 .name = "ceph.snap.btime",
458 .name_size = sizeof("ceph.snap.btime"),
459 .getxattr_cb = ceph_vxattrcb_snap_btime,
460 .exists_cb = ceph_vxattrcb_snap_btime_exists,
461 .flags = VXATTR_FLAG_READONLY,
462 },
463 {
464 .name = "ceph.caps",
465 .name_size = sizeof("ceph.caps"),
466 .getxattr_cb = ceph_vxattrcb_caps,
467 .exists_cb = NULL,
468 .flags = VXATTR_FLAG_HIDDEN,
469 },
470 { .name = NULL, 0 } /* Required table terminator */
471 };
472
473 static struct ceph_vxattr ceph_common_vxattrs[] = {
474 {
475 .name = "ceph.cluster_fsid",
476 .name_size = sizeof("ceph.cluster_fsid"),
477 .getxattr_cb = ceph_vxattrcb_cluster_fsid,
478 .exists_cb = NULL,
479 .flags = VXATTR_FLAG_READONLY,
480 },
481 {
482 .name = "ceph.client_id",
483 .name_size = sizeof("ceph.client_id"),
484 .getxattr_cb = ceph_vxattrcb_client_id,
485 .exists_cb = NULL,
486 .flags = VXATTR_FLAG_READONLY,
487 },
488 {
489 .name = "ceph.auth_mds",
490 .name_size = sizeof("ceph.auth_mds"),
491 .getxattr_cb = ceph_vxattrcb_auth_mds,
492 .exists_cb = NULL,
493 .flags = VXATTR_FLAG_READONLY,
494 },
495 { .name = NULL, 0 } /* Required table terminator */
496 };
497
498 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
499 {
500 if (S_ISDIR(inode->i_mode))
501 return ceph_dir_vxattrs;
502 else if (S_ISREG(inode->i_mode))
503 return ceph_file_vxattrs;
504 return NULL;
505 }
506
507 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
508 const char *name)
509 {
510 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
511
512 if (vxattr) {
513 while (vxattr->name) {
514 if (!strcmp(vxattr->name, name))
515 return vxattr;
516 vxattr++;
517 }
518 }
519
520 vxattr = ceph_common_vxattrs;
521 while (vxattr->name) {
522 if (!strcmp(vxattr->name, name))
523 return vxattr;
524 vxattr++;
525 }
526
527 return NULL;
528 }
529
530 static int __set_xattr(struct ceph_inode_info *ci,
531 const char *name, int name_len,
532 const char *val, int val_len,
533 int flags, int update_xattr,
534 struct ceph_inode_xattr **newxattr)
535 {
536 struct rb_node **p;
537 struct rb_node *parent = NULL;
538 struct ceph_inode_xattr *xattr = NULL;
539 int c;
540 int new = 0;
541
542 p = &ci->i_xattrs.index.rb_node;
543 while (*p) {
544 parent = *p;
545 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
546 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
547 if (c < 0)
548 p = &(*p)->rb_left;
549 else if (c > 0)
550 p = &(*p)->rb_right;
551 else {
552 if (name_len == xattr->name_len)
553 break;
554 else if (name_len < xattr->name_len)
555 p = &(*p)->rb_left;
556 else
557 p = &(*p)->rb_right;
558 }
559 xattr = NULL;
560 }
561
562 if (update_xattr) {
563 int err = 0;
564
565 if (xattr && (flags & XATTR_CREATE))
566 err = -EEXIST;
567 else if (!xattr && (flags & XATTR_REPLACE))
568 err = -ENODATA;
569 if (err) {
570 kfree(name);
571 kfree(val);
572 kfree(*newxattr);
573 return err;
574 }
575 if (update_xattr < 0) {
576 if (xattr)
577 __remove_xattr(ci, xattr);
578 kfree(name);
579 kfree(*newxattr);
580 return 0;
581 }
582 }
583
584 if (!xattr) {
585 new = 1;
586 xattr = *newxattr;
587 xattr->name = name;
588 xattr->name_len = name_len;
589 xattr->should_free_name = update_xattr;
590
591 ci->i_xattrs.count++;
592 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
593 } else {
594 kfree(*newxattr);
595 *newxattr = NULL;
596 if (xattr->should_free_val)
597 kfree(xattr->val);
598
599 if (update_xattr) {
600 kfree(name);
601 name = xattr->name;
602 }
603 ci->i_xattrs.names_size -= xattr->name_len;
604 ci->i_xattrs.vals_size -= xattr->val_len;
605 }
606 ci->i_xattrs.names_size += name_len;
607 ci->i_xattrs.vals_size += val_len;
608 if (val)
609 xattr->val = val;
610 else
611 xattr->val = "";
612
613 xattr->val_len = val_len;
614 xattr->dirty = update_xattr;
615 xattr->should_free_val = (val && update_xattr);
616
617 if (new) {
618 rb_link_node(&xattr->node, parent, p);
619 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
620 dout("__set_xattr_val p=%p\n", p);
621 }
622
623 dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n",
624 ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val);
625
626 return 0;
627 }
628
629 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
630 const char *name)
631 {
632 struct rb_node **p;
633 struct rb_node *parent = NULL;
634 struct ceph_inode_xattr *xattr = NULL;
635 int name_len = strlen(name);
636 int c;
637
638 p = &ci->i_xattrs.index.rb_node;
639 while (*p) {
640 parent = *p;
641 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
642 c = strncmp(name, xattr->name, xattr->name_len);
643 if (c == 0 && name_len > xattr->name_len)
644 c = 1;
645 if (c < 0)
646 p = &(*p)->rb_left;
647 else if (c > 0)
648 p = &(*p)->rb_right;
649 else {
650 dout("__get_xattr %s: found %.*s\n", name,
651 xattr->val_len, xattr->val);
652 return xattr;
653 }
654 }
655
656 dout("__get_xattr %s: not found\n", name);
657
658 return NULL;
659 }
660
661 static void __free_xattr(struct ceph_inode_xattr *xattr)
662 {
663 BUG_ON(!xattr);
664
665 if (xattr->should_free_name)
666 kfree(xattr->name);
667 if (xattr->should_free_val)
668 kfree(xattr->val);
669
670 kfree(xattr);
671 }
672
673 static int __remove_xattr(struct ceph_inode_info *ci,
674 struct ceph_inode_xattr *xattr)
675 {
676 if (!xattr)
677 return -ENODATA;
678
679 rb_erase(&xattr->node, &ci->i_xattrs.index);
680
681 if (xattr->should_free_name)
682 kfree(xattr->name);
683 if (xattr->should_free_val)
684 kfree(xattr->val);
685
686 ci->i_xattrs.names_size -= xattr->name_len;
687 ci->i_xattrs.vals_size -= xattr->val_len;
688 ci->i_xattrs.count--;
689 kfree(xattr);
690
691 return 0;
692 }
693
694 static char *__copy_xattr_names(struct ceph_inode_info *ci,
695 char *dest)
696 {
697 struct rb_node *p;
698 struct ceph_inode_xattr *xattr = NULL;
699
700 p = rb_first(&ci->i_xattrs.index);
701 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
702
703 while (p) {
704 xattr = rb_entry(p, struct ceph_inode_xattr, node);
705 memcpy(dest, xattr->name, xattr->name_len);
706 dest[xattr->name_len] = '\0';
707
708 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
709 xattr->name_len, ci->i_xattrs.names_size);
710
711 dest += xattr->name_len + 1;
712 p = rb_next(p);
713 }
714
715 return dest;
716 }
717
718 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
719 {
720 struct rb_node *p, *tmp;
721 struct ceph_inode_xattr *xattr = NULL;
722
723 p = rb_first(&ci->i_xattrs.index);
724
725 dout("__ceph_destroy_xattrs p=%p\n", p);
726
727 while (p) {
728 xattr = rb_entry(p, struct ceph_inode_xattr, node);
729 tmp = p;
730 p = rb_next(tmp);
731 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
732 xattr->name_len, xattr->name);
733 rb_erase(tmp, &ci->i_xattrs.index);
734
735 __free_xattr(xattr);
736 }
737
738 ci->i_xattrs.names_size = 0;
739 ci->i_xattrs.vals_size = 0;
740 ci->i_xattrs.index_version = 0;
741 ci->i_xattrs.count = 0;
742 ci->i_xattrs.index = RB_ROOT;
743 }
744
745 static int __build_xattrs(struct inode *inode)
746 __releases(ci->i_ceph_lock)
747 __acquires(ci->i_ceph_lock)
748 {
749 u32 namelen;
750 u32 numattr = 0;
751 void *p, *end;
752 u32 len;
753 const char *name, *val;
754 struct ceph_inode_info *ci = ceph_inode(inode);
755 u64 xattr_version;
756 struct ceph_inode_xattr **xattrs = NULL;
757 int err = 0;
758 int i;
759
760 dout("__build_xattrs() len=%d\n",
761 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
762
763 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
764 return 0; /* already built */
765
766 __ceph_destroy_xattrs(ci);
767
768 start:
769 /* updated internal xattr rb tree */
770 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
771 p = ci->i_xattrs.blob->vec.iov_base;
772 end = p + ci->i_xattrs.blob->vec.iov_len;
773 ceph_decode_32_safe(&p, end, numattr, bad);
774 xattr_version = ci->i_xattrs.version;
775 spin_unlock(&ci->i_ceph_lock);
776
777 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
778 GFP_NOFS);
779 err = -ENOMEM;
780 if (!xattrs)
781 goto bad_lock;
782
783 for (i = 0; i < numattr; i++) {
784 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
785 GFP_NOFS);
786 if (!xattrs[i])
787 goto bad_lock;
788 }
789
790 spin_lock(&ci->i_ceph_lock);
791 if (ci->i_xattrs.version != xattr_version) {
792 /* lost a race, retry */
793 for (i = 0; i < numattr; i++)
794 kfree(xattrs[i]);
795 kfree(xattrs);
796 xattrs = NULL;
797 goto start;
798 }
799 err = -EIO;
800 while (numattr--) {
801 ceph_decode_32_safe(&p, end, len, bad);
802 namelen = len;
803 name = p;
804 p += len;
805 ceph_decode_32_safe(&p, end, len, bad);
806 val = p;
807 p += len;
808
809 err = __set_xattr(ci, name, namelen, val, len,
810 0, 0, &xattrs[numattr]);
811
812 if (err < 0)
813 goto bad;
814 }
815 kfree(xattrs);
816 }
817 ci->i_xattrs.index_version = ci->i_xattrs.version;
818 ci->i_xattrs.dirty = false;
819
820 return err;
821 bad_lock:
822 spin_lock(&ci->i_ceph_lock);
823 bad:
824 if (xattrs) {
825 for (i = 0; i < numattr; i++)
826 kfree(xattrs[i]);
827 kfree(xattrs);
828 }
829 ci->i_xattrs.names_size = 0;
830 return err;
831 }
832
833 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
834 int val_size)
835 {
836 /*
837 * 4 bytes for the length, and additional 4 bytes per each xattr name,
838 * 4 bytes per each value
839 */
840 int size = 4 + ci->i_xattrs.count*(4 + 4) +
841 ci->i_xattrs.names_size +
842 ci->i_xattrs.vals_size;
843 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
844 ci->i_xattrs.count, ci->i_xattrs.names_size,
845 ci->i_xattrs.vals_size);
846
847 if (name_size)
848 size += 4 + 4 + name_size + val_size;
849
850 return size;
851 }
852
853 /*
854 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
855 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
856 * that it can be freed by the caller as the i_ceph_lock is likely to be
857 * held.
858 */
859 struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
860 {
861 struct rb_node *p;
862 struct ceph_inode_xattr *xattr = NULL;
863 struct ceph_buffer *old_blob = NULL;
864 void *dest;
865
866 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
867 if (ci->i_xattrs.dirty) {
868 int need = __get_required_blob_size(ci, 0, 0);
869
870 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
871
872 p = rb_first(&ci->i_xattrs.index);
873 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
874
875 ceph_encode_32(&dest, ci->i_xattrs.count);
876 while (p) {
877 xattr = rb_entry(p, struct ceph_inode_xattr, node);
878
879 ceph_encode_32(&dest, xattr->name_len);
880 memcpy(dest, xattr->name, xattr->name_len);
881 dest += xattr->name_len;
882 ceph_encode_32(&dest, xattr->val_len);
883 memcpy(dest, xattr->val, xattr->val_len);
884 dest += xattr->val_len;
885
886 p = rb_next(p);
887 }
888
889 /* adjust buffer len; it may be larger than we need */
890 ci->i_xattrs.prealloc_blob->vec.iov_len =
891 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
892
893 if (ci->i_xattrs.blob)
894 old_blob = ci->i_xattrs.blob;
895 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
896 ci->i_xattrs.prealloc_blob = NULL;
897 ci->i_xattrs.dirty = false;
898 ci->i_xattrs.version++;
899 }
900
901 return old_blob;
902 }
903
904 static inline int __get_request_mask(struct inode *in) {
905 struct ceph_mds_request *req = current->journal_info;
906 int mask = 0;
907 if (req && req->r_target_inode == in) {
908 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
909 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
910 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
911 req->r_op == CEPH_MDS_OP_GETATTR) {
912 mask = le32_to_cpu(req->r_args.getattr.mask);
913 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
914 req->r_op == CEPH_MDS_OP_CREATE) {
915 mask = le32_to_cpu(req->r_args.open.mask);
916 }
917 }
918 return mask;
919 }
920
921 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
922 size_t size)
923 {
924 struct ceph_inode_info *ci = ceph_inode(inode);
925 struct ceph_inode_xattr *xattr;
926 struct ceph_vxattr *vxattr = NULL;
927 int req_mask;
928 ssize_t err;
929
930 /* let's see if a virtual xattr was requested */
931 vxattr = ceph_match_vxattr(inode, name);
932 if (vxattr) {
933 int mask = 0;
934 if (vxattr->flags & VXATTR_FLAG_RSTAT)
935 mask |= CEPH_STAT_RSTAT;
936 if (vxattr->flags & VXATTR_FLAG_DIRSTAT)
937 mask |= CEPH_CAP_FILE_SHARED;
938 err = ceph_do_getattr(inode, mask, true);
939 if (err)
940 return err;
941 err = -ENODATA;
942 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
943 err = vxattr->getxattr_cb(ci, value, size);
944 if (size && size < err)
945 err = -ERANGE;
946 }
947 return err;
948 }
949
950 req_mask = __get_request_mask(inode);
951
952 spin_lock(&ci->i_ceph_lock);
953 dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
954 ci->i_xattrs.version, ci->i_xattrs.index_version);
955
956 if (ci->i_xattrs.version == 0 ||
957 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
958 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) {
959 spin_unlock(&ci->i_ceph_lock);
960
961 /* security module gets xattr while filling trace */
962 if (current->journal_info) {
963 pr_warn_ratelimited("sync getxattr %p "
964 "during filling trace\n", inode);
965 return -EBUSY;
966 }
967
968 /* get xattrs from mds (if we don't already have them) */
969 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
970 if (err)
971 return err;
972 spin_lock(&ci->i_ceph_lock);
973 }
974
975 err = __build_xattrs(inode);
976 if (err < 0)
977 goto out;
978
979 err = -ENODATA; /* == ENOATTR */
980 xattr = __get_xattr(ci, name);
981 if (!xattr)
982 goto out;
983
984 err = -ERANGE;
985 if (size && size < xattr->val_len)
986 goto out;
987
988 err = xattr->val_len;
989 if (size == 0)
990 goto out;
991
992 memcpy(value, xattr->val, xattr->val_len);
993
994 if (current->journal_info &&
995 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
996 security_ismaclabel(name + XATTR_SECURITY_PREFIX_LEN))
997 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
998 out:
999 spin_unlock(&ci->i_ceph_lock);
1000 return err;
1001 }
1002
1003 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
1004 {
1005 struct inode *inode = d_inode(dentry);
1006 struct ceph_inode_info *ci = ceph_inode(inode);
1007 bool len_only = (size == 0);
1008 u32 namelen;
1009 int err;
1010
1011 spin_lock(&ci->i_ceph_lock);
1012 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
1013 ci->i_xattrs.version, ci->i_xattrs.index_version);
1014
1015 if (ci->i_xattrs.version == 0 ||
1016 !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
1017 spin_unlock(&ci->i_ceph_lock);
1018 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
1019 if (err)
1020 return err;
1021 spin_lock(&ci->i_ceph_lock);
1022 }
1023
1024 err = __build_xattrs(inode);
1025 if (err < 0)
1026 goto out;
1027
1028 /* add 1 byte for each xattr due to the null termination */
1029 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
1030 if (!len_only) {
1031 if (namelen > size) {
1032 err = -ERANGE;
1033 goto out;
1034 }
1035 names = __copy_xattr_names(ci, names);
1036 size -= namelen;
1037 }
1038 err = namelen;
1039 out:
1040 spin_unlock(&ci->i_ceph_lock);
1041 return err;
1042 }
1043
1044 static int ceph_sync_setxattr(struct inode *inode, const char *name,
1045 const char *value, size_t size, int flags)
1046 {
1047 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1048 struct ceph_inode_info *ci = ceph_inode(inode);
1049 struct ceph_mds_request *req;
1050 struct ceph_mds_client *mdsc = fsc->mdsc;
1051 struct ceph_osd_client *osdc = &fsc->client->osdc;
1052 struct ceph_pagelist *pagelist = NULL;
1053 int op = CEPH_MDS_OP_SETXATTR;
1054 int err;
1055
1056 if (size > 0) {
1057 /* copy value into pagelist */
1058 pagelist = ceph_pagelist_alloc(GFP_NOFS);
1059 if (!pagelist)
1060 return -ENOMEM;
1061
1062 err = ceph_pagelist_append(pagelist, value, size);
1063 if (err)
1064 goto out;
1065 } else if (!value) {
1066 if (flags & CEPH_XATTR_REPLACE)
1067 op = CEPH_MDS_OP_RMXATTR;
1068 else
1069 flags |= CEPH_XATTR_REMOVE;
1070 }
1071
1072 dout("setxattr value=%.*s\n", (int)size, value);
1073
1074 /* do request */
1075 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1076 if (IS_ERR(req)) {
1077 err = PTR_ERR(req);
1078 goto out;
1079 }
1080
1081 req->r_path2 = kstrdup(name, GFP_NOFS);
1082 if (!req->r_path2) {
1083 ceph_mdsc_put_request(req);
1084 err = -ENOMEM;
1085 goto out;
1086 }
1087
1088 if (op == CEPH_MDS_OP_SETXATTR) {
1089 req->r_args.setxattr.flags = cpu_to_le32(flags);
1090 req->r_args.setxattr.osdmap_epoch =
1091 cpu_to_le32(osdc->osdmap->epoch);
1092 req->r_pagelist = pagelist;
1093 pagelist = NULL;
1094 }
1095
1096 req->r_inode = inode;
1097 ihold(inode);
1098 req->r_num_caps = 1;
1099 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
1100
1101 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
1102 err = ceph_mdsc_do_request(mdsc, NULL, req);
1103 ceph_mdsc_put_request(req);
1104 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
1105
1106 out:
1107 if (pagelist)
1108 ceph_pagelist_release(pagelist);
1109 return err;
1110 }
1111
1112 int __ceph_setxattr(struct inode *inode, const char *name,
1113 const void *value, size_t size, int flags)
1114 {
1115 struct ceph_vxattr *vxattr;
1116 struct ceph_inode_info *ci = ceph_inode(inode);
1117 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1118 struct ceph_cap_flush *prealloc_cf = NULL;
1119 struct ceph_buffer *old_blob = NULL;
1120 int issued;
1121 int err;
1122 int dirty = 0;
1123 int name_len = strlen(name);
1124 int val_len = size;
1125 char *newname = NULL;
1126 char *newval = NULL;
1127 struct ceph_inode_xattr *xattr = NULL;
1128 int required_blob_size;
1129 bool check_realm = false;
1130 bool lock_snap_rwsem = false;
1131
1132 if (ceph_snap(inode) != CEPH_NOSNAP)
1133 return -EROFS;
1134
1135 vxattr = ceph_match_vxattr(inode, name);
1136 if (vxattr) {
1137 if (vxattr->flags & VXATTR_FLAG_READONLY)
1138 return -EOPNOTSUPP;
1139 if (value && !strncmp(vxattr->name, "ceph.quota", 10))
1140 check_realm = true;
1141 }
1142
1143 /* pass any unhandled ceph.* xattrs through to the MDS */
1144 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
1145 goto do_sync_unlocked;
1146
1147 /* preallocate memory for xattr name, value, index node */
1148 err = -ENOMEM;
1149 newname = kmemdup(name, name_len + 1, GFP_NOFS);
1150 if (!newname)
1151 goto out;
1152
1153 if (val_len) {
1154 newval = kmemdup(value, val_len, GFP_NOFS);
1155 if (!newval)
1156 goto out;
1157 }
1158
1159 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
1160 if (!xattr)
1161 goto out;
1162
1163 prealloc_cf = ceph_alloc_cap_flush();
1164 if (!prealloc_cf)
1165 goto out;
1166
1167 spin_lock(&ci->i_ceph_lock);
1168 retry:
1169 issued = __ceph_caps_issued(ci, NULL);
1170 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
1171 goto do_sync;
1172
1173 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1174 lock_snap_rwsem = true;
1175 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1176 spin_unlock(&ci->i_ceph_lock);
1177 down_read(&mdsc->snap_rwsem);
1178 spin_lock(&ci->i_ceph_lock);
1179 goto retry;
1180 }
1181 }
1182
1183 dout("setxattr %p name '%s' issued %s\n", inode, name,
1184 ceph_cap_string(issued));
1185 __build_xattrs(inode);
1186
1187 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1188
1189 if (!ci->i_xattrs.prealloc_blob ||
1190 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1191 struct ceph_buffer *blob;
1192
1193 spin_unlock(&ci->i_ceph_lock);
1194 ceph_buffer_put(old_blob); /* Shouldn't be required */
1195 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1196 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1197 if (!blob)
1198 goto do_sync_unlocked;
1199 spin_lock(&ci->i_ceph_lock);
1200 /* prealloc_blob can't be released while holding i_ceph_lock */
1201 if (ci->i_xattrs.prealloc_blob)
1202 old_blob = ci->i_xattrs.prealloc_blob;
1203 ci->i_xattrs.prealloc_blob = blob;
1204 goto retry;
1205 }
1206
1207 err = __set_xattr(ci, newname, name_len, newval, val_len,
1208 flags, value ? 1 : -1, &xattr);
1209
1210 if (!err) {
1211 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1212 &prealloc_cf);
1213 ci->i_xattrs.dirty = true;
1214 inode->i_ctime = current_time(inode);
1215 }
1216
1217 spin_unlock(&ci->i_ceph_lock);
1218 ceph_buffer_put(old_blob);
1219 if (lock_snap_rwsem)
1220 up_read(&mdsc->snap_rwsem);
1221 if (dirty)
1222 __mark_inode_dirty(inode, dirty);
1223 ceph_free_cap_flush(prealloc_cf);
1224 return err;
1225
1226 do_sync:
1227 spin_unlock(&ci->i_ceph_lock);
1228 do_sync_unlocked:
1229 if (lock_snap_rwsem)
1230 up_read(&mdsc->snap_rwsem);
1231
1232 /* security module set xattr while filling trace */
1233 if (current->journal_info) {
1234 pr_warn_ratelimited("sync setxattr %p "
1235 "during filling trace\n", inode);
1236 err = -EBUSY;
1237 } else {
1238 err = ceph_sync_setxattr(inode, name, value, size, flags);
1239 if (err >= 0 && check_realm) {
1240 /* check if snaprealm was created for quota inode */
1241 spin_lock(&ci->i_ceph_lock);
1242 if ((ci->i_max_files || ci->i_max_bytes) &&
1243 !(ci->i_snap_realm &&
1244 ci->i_snap_realm->ino == ci->i_vino.ino))
1245 err = -EOPNOTSUPP;
1246 spin_unlock(&ci->i_ceph_lock);
1247 }
1248 }
1249 out:
1250 ceph_free_cap_flush(prealloc_cf);
1251 kfree(newname);
1252 kfree(newval);
1253 kfree(xattr);
1254 return err;
1255 }
1256
1257 static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1258 struct dentry *dentry, struct inode *inode,
1259 const char *name, void *value, size_t size)
1260 {
1261 if (!ceph_is_valid_xattr(name))
1262 return -EOPNOTSUPP;
1263 return __ceph_getxattr(inode, name, value, size);
1264 }
1265
1266 static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1267 struct user_namespace *mnt_userns,
1268 struct dentry *unused, struct inode *inode,
1269 const char *name, const void *value,
1270 size_t size, int flags)
1271 {
1272 if (!ceph_is_valid_xattr(name))
1273 return -EOPNOTSUPP;
1274 return __ceph_setxattr(inode, name, value, size, flags);
1275 }
1276
1277 static const struct xattr_handler ceph_other_xattr_handler = {
1278 .prefix = "", /* match any name => handlers called with full name */
1279 .get = ceph_get_xattr_handler,
1280 .set = ceph_set_xattr_handler,
1281 };
1282
1283 #ifdef CONFIG_SECURITY
1284 bool ceph_security_xattr_wanted(struct inode *in)
1285 {
1286 return in->i_security != NULL;
1287 }
1288
1289 bool ceph_security_xattr_deadlock(struct inode *in)
1290 {
1291 struct ceph_inode_info *ci;
1292 bool ret;
1293 if (!in->i_security)
1294 return false;
1295 ci = ceph_inode(in);
1296 spin_lock(&ci->i_ceph_lock);
1297 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1298 !(ci->i_xattrs.version > 0 &&
1299 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1300 spin_unlock(&ci->i_ceph_lock);
1301 return ret;
1302 }
1303
1304 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1305 int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
1306 struct ceph_acl_sec_ctx *as_ctx)
1307 {
1308 struct ceph_pagelist *pagelist = as_ctx->pagelist;
1309 const char *name;
1310 size_t name_len;
1311 int err;
1312
1313 err = security_dentry_init_security(dentry, mode, &dentry->d_name,
1314 &as_ctx->sec_ctx,
1315 &as_ctx->sec_ctxlen);
1316 if (err < 0) {
1317 WARN_ON_ONCE(err != -EOPNOTSUPP);
1318 err = 0; /* do nothing */
1319 goto out;
1320 }
1321
1322 err = -ENOMEM;
1323 if (!pagelist) {
1324 pagelist = ceph_pagelist_alloc(GFP_KERNEL);
1325 if (!pagelist)
1326 goto out;
1327 err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
1328 if (err)
1329 goto out;
1330 ceph_pagelist_encode_32(pagelist, 1);
1331 }
1332
1333 /*
1334 * FIXME: Make security_dentry_init_security() generic. Currently
1335 * It only supports single security module and only selinux has
1336 * dentry_init_security hook.
1337 */
1338 name = XATTR_NAME_SELINUX;
1339 name_len = strlen(name);
1340 err = ceph_pagelist_reserve(pagelist,
1341 4 * 2 + name_len + as_ctx->sec_ctxlen);
1342 if (err)
1343 goto out;
1344
1345 if (as_ctx->pagelist) {
1346 /* update count of KV pairs */
1347 BUG_ON(pagelist->length <= sizeof(__le32));
1348 if (list_is_singular(&pagelist->head)) {
1349 le32_add_cpu((__le32*)pagelist->mapped_tail, 1);
1350 } else {
1351 struct page *page = list_first_entry(&pagelist->head,
1352 struct page, lru);
1353 void *addr = kmap_atomic(page);
1354 le32_add_cpu((__le32*)addr, 1);
1355 kunmap_atomic(addr);
1356 }
1357 } else {
1358 as_ctx->pagelist = pagelist;
1359 }
1360
1361 ceph_pagelist_encode_32(pagelist, name_len);
1362 ceph_pagelist_append(pagelist, name, name_len);
1363
1364 ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen);
1365 ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen);
1366
1367 err = 0;
1368 out:
1369 if (pagelist && !as_ctx->pagelist)
1370 ceph_pagelist_release(pagelist);
1371 return err;
1372 }
1373 #endif /* CONFIG_CEPH_FS_SECURITY_LABEL */
1374 #endif /* CONFIG_SECURITY */
1375
1376 void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
1377 {
1378 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1379 struct lsmcontext scaff; /* scaffolding */
1380 #endif
1381 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1382 posix_acl_release(as_ctx->acl);
1383 posix_acl_release(as_ctx->default_acl);
1384 #endif
1385 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1386 lsmcontext_init(&scaff, as_ctx->sec_ctx, as_ctx->sec_ctxlen, 0);
1387 security_release_secctx(&scaff);
1388 #endif
1389 if (as_ctx->pagelist)
1390 ceph_pagelist_release(as_ctx->pagelist);
1391 }
1392
1393 /*
1394 * List of handlers for synthetic system.* attributes. Other
1395 * attributes are handled directly.
1396 */
1397 const struct xattr_handler *ceph_xattr_handlers[] = {
1398 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1399 &posix_acl_access_xattr_handler,
1400 &posix_acl_default_xattr_handler,
1401 #endif
1402 &ceph_other_xattr_handler,
1403 NULL,
1404 };