]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/gfs2/sys.c
powerpc/cacheinfo: Remove double free
[mirror_ubuntu-hirsute-kernel.git] / fs / gfs2 / sys.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/sched.h>
13 #include <linux/cred.h>
14 #include <linux/spinlock.h>
15 #include <linux/completion.h>
16 #include <linux/buffer_head.h>
17 #include <linux/module.h>
18 #include <linux/kobject.h>
19 #include <linux/uaccess.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/genhd.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "sys.h"
26 #include "super.h"
27 #include "glock.h"
28 #include "quota.h"
29 #include "util.h"
30 #include "glops.h"
31 #include "recovery.h"
32
33 struct gfs2_attr {
34 struct attribute attr;
35 ssize_t (*show)(struct gfs2_sbd *, char *);
36 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t);
37 };
38
39 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr,
40 char *buf)
41 {
42 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
43 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
44 return a->show ? a->show(sdp, buf) : 0;
45 }
46
47 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
48 const char *buf, size_t len)
49 {
50 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
51 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
52 return a->store ? a->store(sdp, buf, len) : len;
53 }
54
55 static const struct sysfs_ops gfs2_attr_ops = {
56 .show = gfs2_attr_show,
57 .store = gfs2_attr_store,
58 };
59
60
61 static struct kset *gfs2_kset;
62
63 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
64 {
65 return snprintf(buf, PAGE_SIZE, "%u:%u\n",
66 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
67 }
68
69 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
70 {
71 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
72 }
73
74 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
75 {
76 struct super_block *s = sdp->sd_vfs;
77
78 buf[0] = '\0';
79 if (uuid_is_null(&s->s_uuid))
80 return 0;
81 return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
82 }
83
84 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
85 {
86 struct super_block *sb = sdp->sd_vfs;
87 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
88
89 return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
90 }
91
92 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
93 {
94 int error, n;
95
96 error = kstrtoint(buf, 0, &n);
97 if (error)
98 return error;
99
100 if (!capable(CAP_SYS_ADMIN))
101 return -EPERM;
102
103 switch (n) {
104 case 0:
105 error = thaw_super(sdp->sd_vfs);
106 break;
107 case 1:
108 error = freeze_super(sdp->sd_vfs);
109 break;
110 default:
111 return -EINVAL;
112 }
113
114 if (error) {
115 fs_warn(sdp, "freeze %d error %d\n", n, error);
116 return error;
117 }
118
119 return len;
120 }
121
122 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
123 {
124 unsigned int b = test_bit(SDF_SHUTDOWN, &sdp->sd_flags);
125 return snprintf(buf, PAGE_SIZE, "%u\n", b);
126 }
127
128 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
129 {
130 int error, val;
131
132 if (!capable(CAP_SYS_ADMIN))
133 return -EPERM;
134
135 error = kstrtoint(buf, 0, &val);
136 if (error)
137 return error;
138
139 if (val != 1)
140 return -EINVAL;
141
142 gfs2_lm_withdraw(sdp, "withdrawing from cluster at user's request\n");
143
144 return len;
145 }
146
147 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
148 size_t len)
149 {
150 int error, val;
151
152 if (!capable(CAP_SYS_ADMIN))
153 return -EPERM;
154
155 error = kstrtoint(buf, 0, &val);
156 if (error)
157 return error;
158
159 if (val != 1)
160 return -EINVAL;
161
162 gfs2_statfs_sync(sdp->sd_vfs, 0);
163 return len;
164 }
165
166 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
167 size_t len)
168 {
169 int error, val;
170
171 if (!capable(CAP_SYS_ADMIN))
172 return -EPERM;
173
174 error = kstrtoint(buf, 0, &val);
175 if (error)
176 return error;
177
178 if (val != 1)
179 return -EINVAL;
180
181 gfs2_quota_sync(sdp->sd_vfs, 0);
182 return len;
183 }
184
185 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
186 size_t len)
187 {
188 struct kqid qid;
189 int error;
190 u32 id;
191
192 if (!capable(CAP_SYS_ADMIN))
193 return -EPERM;
194
195 error = kstrtou32(buf, 0, &id);
196 if (error)
197 return error;
198
199 qid = make_kqid(current_user_ns(), USRQUOTA, id);
200 if (!qid_valid(qid))
201 return -EINVAL;
202
203 error = gfs2_quota_refresh(sdp, qid);
204 return error ? error : len;
205 }
206
207 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
208 size_t len)
209 {
210 struct kqid qid;
211 int error;
212 u32 id;
213
214 if (!capable(CAP_SYS_ADMIN))
215 return -EPERM;
216
217 error = kstrtou32(buf, 0, &id);
218 if (error)
219 return error;
220
221 qid = make_kqid(current_user_ns(), GRPQUOTA, id);
222 if (!qid_valid(qid))
223 return -EINVAL;
224
225 error = gfs2_quota_refresh(sdp, qid);
226 return error ? error : len;
227 }
228
229 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
230 {
231 struct gfs2_glock *gl;
232 const struct gfs2_glock_operations *glops;
233 unsigned int glmode;
234 unsigned int gltype;
235 unsigned long long glnum;
236 char mode[16];
237 int rv;
238
239 if (!capable(CAP_SYS_ADMIN))
240 return -EPERM;
241
242 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum,
243 mode);
244 if (rv != 3)
245 return -EINVAL;
246
247 if (strcmp(mode, "EX") == 0)
248 glmode = LM_ST_UNLOCKED;
249 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0))
250 glmode = LM_ST_DEFERRED;
251 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0))
252 glmode = LM_ST_SHARED;
253 else
254 return -EINVAL;
255
256 if (gltype > LM_TYPE_JOURNAL)
257 return -EINVAL;
258 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_FREEZE_LOCK)
259 glops = &gfs2_freeze_glops;
260 else
261 glops = gfs2_glops_list[gltype];
262 if (glops == NULL)
263 return -EINVAL;
264 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
265 fs_info(sdp, "demote interface used\n");
266 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
267 if (rv)
268 return rv;
269 gfs2_glock_cb(gl, glmode);
270 gfs2_glock_put(gl);
271 return len;
272 }
273
274
275 #define GFS2_ATTR(name, mode, show, store) \
276 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
277
278 GFS2_ATTR(id, 0444, id_show, NULL);
279 GFS2_ATTR(fsname, 0444, fsname_show, NULL);
280 GFS2_ATTR(uuid, 0444, uuid_show, NULL);
281 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
282 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
283 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
284 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
285 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store);
286 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store);
287 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store);
288
289 static struct attribute *gfs2_attrs[] = {
290 &gfs2_attr_id.attr,
291 &gfs2_attr_fsname.attr,
292 &gfs2_attr_uuid.attr,
293 &gfs2_attr_freeze.attr,
294 &gfs2_attr_withdraw.attr,
295 &gfs2_attr_statfs_sync.attr,
296 &gfs2_attr_quota_sync.attr,
297 &gfs2_attr_quota_refresh_user.attr,
298 &gfs2_attr_quota_refresh_group.attr,
299 &gfs2_attr_demote_rq.attr,
300 NULL,
301 };
302
303 static void gfs2_sbd_release(struct kobject *kobj)
304 {
305 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
306
307 kfree(sdp);
308 }
309
310 static struct kobj_type gfs2_ktype = {
311 .release = gfs2_sbd_release,
312 .default_attrs = gfs2_attrs,
313 .sysfs_ops = &gfs2_attr_ops,
314 };
315
316
317 /*
318 * lock_module. Originally from lock_dlm
319 */
320
321 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
322 {
323 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
324 return sprintf(buf, "%s\n", ops->lm_proto_name);
325 }
326
327 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
328 {
329 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
330 ssize_t ret;
331 int val = 0;
332
333 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
334 val = 1;
335 ret = sprintf(buf, "%d\n", val);
336 return ret;
337 }
338
339 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
340 {
341 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
342 int ret, val;
343
344 ret = kstrtoint(buf, 0, &val);
345 if (ret)
346 return ret;
347
348 if (val == 1)
349 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
350 else if (val == 0) {
351 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
352 smp_mb__after_atomic();
353 gfs2_glock_thaw(sdp);
354 } else {
355 return -EINVAL;
356 }
357 return len;
358 }
359
360 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
361 {
362 int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
363
364 return sprintf(buf, "%d\n", val);
365 }
366
367 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
368 {
369 int ret, val;
370
371 ret = kstrtoint(buf, 0, &val);
372 if (ret)
373 return ret;
374
375 if ((val == 1) &&
376 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
377 complete(&sdp->sd_wdack);
378 else
379 return -EINVAL;
380 return len;
381 }
382
383 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
384 {
385 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
386 return sprintf(buf, "%d\n", ls->ls_first);
387 }
388
389 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
390 {
391 unsigned first;
392 int rv;
393
394 rv = sscanf(buf, "%u", &first);
395 if (rv != 1 || first > 1)
396 return -EINVAL;
397 rv = wait_for_completion_killable(&sdp->sd_locking_init);
398 if (rv)
399 return rv;
400 spin_lock(&sdp->sd_jindex_spin);
401 rv = -EBUSY;
402 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
403 goto out;
404 rv = -EINVAL;
405 if (sdp->sd_args.ar_spectator)
406 goto out;
407 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
408 goto out;
409 sdp->sd_lockstruct.ls_first = first;
410 rv = 0;
411 out:
412 spin_unlock(&sdp->sd_jindex_spin);
413 return rv ? rv : len;
414 }
415
416 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
417 {
418 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
419 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
420 }
421
422 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
423 {
424 struct gfs2_jdesc *jd;
425 int rv;
426
427 /* Wait for our primary journal to be initialized */
428 wait_for_completion(&sdp->sd_journal_ready);
429
430 spin_lock(&sdp->sd_jindex_spin);
431 rv = -EBUSY;
432 /**
433 * If we're a spectator, we use journal0, but it's not really ours.
434 * So we need to wait for its recovery too. If we skip it we'd never
435 * queue work to the recovery workqueue, and so its completion would
436 * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
437 * permanently stop working.
438 */
439 if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
440 goto out;
441 rv = -ENOENT;
442 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
443 if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
444 continue;
445 rv = gfs2_recover_journal(jd, false);
446 break;
447 }
448 out:
449 spin_unlock(&sdp->sd_jindex_spin);
450 return rv;
451 }
452
453 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
454 {
455 unsigned jid;
456 int rv;
457
458 rv = sscanf(buf, "%u", &jid);
459 if (rv != 1)
460 return -EINVAL;
461
462 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
463 rv = -ESHUTDOWN;
464 goto out;
465 }
466
467 rv = gfs2_recover_set(sdp, jid);
468 out:
469 return rv ? rv : len;
470 }
471
472 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
473 {
474 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
475 return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
476 }
477
478 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
479 {
480 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
481 return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
482 }
483
484 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
485 {
486 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
487 }
488
489 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
490 {
491 int jid;
492 int rv;
493
494 rv = sscanf(buf, "%d", &jid);
495 if (rv != 1)
496 return -EINVAL;
497 rv = wait_for_completion_killable(&sdp->sd_locking_init);
498 if (rv)
499 return rv;
500 spin_lock(&sdp->sd_jindex_spin);
501 rv = -EINVAL;
502 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
503 goto out;
504 rv = -EBUSY;
505 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
506 goto out;
507 rv = 0;
508 if (sdp->sd_args.ar_spectator && jid > 0)
509 rv = jid = -EINVAL;
510 sdp->sd_lockstruct.ls_jid = jid;
511 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
512 smp_mb__after_atomic();
513 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
514 out:
515 spin_unlock(&sdp->sd_jindex_spin);
516 return rv ? rv : len;
517 }
518
519 #define GDLM_ATTR(_name,_mode,_show,_store) \
520 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
521
522 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
523 GDLM_ATTR(block, 0644, block_show, block_store);
524 GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
525 GDLM_ATTR(jid, 0644, jid_show, jid_store);
526 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
527 GDLM_ATTR(first_done, 0444, first_done_show, NULL);
528 GDLM_ATTR(recover, 0600, NULL, recover_store);
529 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
530 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
531
532 static struct attribute *lock_module_attrs[] = {
533 &gdlm_attr_proto_name.attr,
534 &gdlm_attr_block.attr,
535 &gdlm_attr_withdraw.attr,
536 &gdlm_attr_jid.attr,
537 &gdlm_attr_first.attr,
538 &gdlm_attr_first_done.attr,
539 &gdlm_attr_recover.attr,
540 &gdlm_attr_recover_done.attr,
541 &gdlm_attr_recover_status.attr,
542 NULL,
543 };
544
545 /*
546 * get and set struct gfs2_tune fields
547 */
548
549 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
550 {
551 return snprintf(buf, PAGE_SIZE, "%u %u\n",
552 sdp->sd_tune.gt_quota_scale_num,
553 sdp->sd_tune.gt_quota_scale_den);
554 }
555
556 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
557 size_t len)
558 {
559 struct gfs2_tune *gt = &sdp->sd_tune;
560 unsigned int x, y;
561
562 if (!capable(CAP_SYS_ADMIN))
563 return -EPERM;
564
565 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
566 return -EINVAL;
567
568 spin_lock(&gt->gt_spin);
569 gt->gt_quota_scale_num = x;
570 gt->gt_quota_scale_den = y;
571 spin_unlock(&gt->gt_spin);
572 return len;
573 }
574
575 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
576 int check_zero, const char *buf, size_t len)
577 {
578 struct gfs2_tune *gt = &sdp->sd_tune;
579 unsigned int x;
580 int error;
581
582 if (!capable(CAP_SYS_ADMIN))
583 return -EPERM;
584
585 error = kstrtouint(buf, 0, &x);
586 if (error)
587 return error;
588
589 if (check_zero && !x)
590 return -EINVAL;
591
592 spin_lock(&gt->gt_spin);
593 *field = x;
594 spin_unlock(&gt->gt_spin);
595 return len;
596 }
597
598 #define TUNE_ATTR_3(name, show, store) \
599 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
600
601 #define TUNE_ATTR_2(name, store) \
602 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
603 { \
604 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
605 } \
606 TUNE_ATTR_3(name, name##_show, store)
607
608 #define TUNE_ATTR(name, check_zero) \
609 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
610 { \
611 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \
612 } \
613 TUNE_ATTR_2(name, name##_store)
614
615 TUNE_ATTR(quota_warn_period, 0);
616 TUNE_ATTR(quota_quantum, 0);
617 TUNE_ATTR(max_readahead, 0);
618 TUNE_ATTR(complain_secs, 0);
619 TUNE_ATTR(statfs_slow, 0);
620 TUNE_ATTR(new_files_jdata, 0);
621 TUNE_ATTR(statfs_quantum, 1);
622 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
623
624 static struct attribute *tune_attrs[] = {
625 &tune_attr_quota_warn_period.attr,
626 &tune_attr_quota_quantum.attr,
627 &tune_attr_max_readahead.attr,
628 &tune_attr_complain_secs.attr,
629 &tune_attr_statfs_slow.attr,
630 &tune_attr_statfs_quantum.attr,
631 &tune_attr_quota_scale.attr,
632 &tune_attr_new_files_jdata.attr,
633 NULL,
634 };
635
636 static const struct attribute_group tune_group = {
637 .name = "tune",
638 .attrs = tune_attrs,
639 };
640
641 static const struct attribute_group lock_module_group = {
642 .name = "lock_module",
643 .attrs = lock_module_attrs,
644 };
645
646 int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
647 {
648 struct super_block *sb = sdp->sd_vfs;
649 int error;
650 char ro[20];
651 char spectator[20];
652 char *envp[] = { ro, spectator, NULL };
653 int sysfs_frees_sdp = 0;
654
655 sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
656 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
657
658 sdp->sd_kobj.kset = gfs2_kset;
659 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
660 "%s", sdp->sd_table_name);
661 if (error)
662 goto fail_reg;
663
664 sysfs_frees_sdp = 1; /* Freeing sdp is now done by sysfs calling
665 function gfs2_sbd_release. */
666 error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
667 if (error)
668 goto fail_reg;
669
670 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group);
671 if (error)
672 goto fail_tune;
673
674 error = sysfs_create_link(&sdp->sd_kobj,
675 &disk_to_dev(sb->s_bdev->bd_disk)->kobj,
676 "device");
677 if (error)
678 goto fail_lock_module;
679
680 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp);
681 return 0;
682
683 fail_lock_module:
684 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
685 fail_tune:
686 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
687 fail_reg:
688 free_percpu(sdp->sd_lkstats);
689 fs_err(sdp, "error %d adding sysfs files\n", error);
690 if (sysfs_frees_sdp)
691 kobject_put(&sdp->sd_kobj);
692 else
693 kfree(sdp);
694 sb->s_fs_info = NULL;
695 return error;
696 }
697
698 void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
699 {
700 sysfs_remove_link(&sdp->sd_kobj, "device");
701 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
702 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
703 kobject_put(&sdp->sd_kobj);
704 }
705
706 static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
707 struct kobj_uevent_env *env)
708 {
709 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
710 struct super_block *s = sdp->sd_vfs;
711
712 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
713 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
714 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
715 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
716 if (!uuid_is_null(&s->s_uuid))
717 add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
718 return 0;
719 }
720
721 static const struct kset_uevent_ops gfs2_uevent_ops = {
722 .uevent = gfs2_uevent,
723 };
724
725 int gfs2_sys_init(void)
726 {
727 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj);
728 if (!gfs2_kset)
729 return -ENOMEM;
730 return 0;
731 }
732
733 void gfs2_sys_uninit(void)
734 {
735 kset_unregister(gfs2_kset);
736 }
737