]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
x86/intel_rdt: Reset per cpu closids on unmount
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / intel_rdt_rdtgroup.c
CommitLineData
5ff193fb
FY
1/*
2 * User interface for Resource Alloction in Resource Director Technology(RDT)
3 *
4 * Copyright (C) 2016 Intel Corporation
5 *
6 * Author: Fenghua Yu <fenghua.yu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * More information about RDT be found in the Intel (R) x86 Architecture
18 * Software Developer Manual.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
12e0110c 23#include <linux/cpu.h>
5ff193fb
FY
24#include <linux/fs.h>
25#include <linux/sysfs.h>
26#include <linux/kernfs.h>
4e978d06
FY
27#include <linux/seq_file.h>
28#include <linux/sched.h>
5ff193fb 29#include <linux/slab.h>
60cf5e10 30#include <linux/cpu.h>
e02737d5 31#include <linux/task_work.h>
5ff193fb
FY
32
33#include <uapi/linux/magic.h>
34
35#include <asm/intel_rdt.h>
60cf5e10 36#include <asm/intel_rdt_common.h>
5ff193fb
FY
37
38DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
39struct kernfs_root *rdt_root;
40struct rdtgroup rdtgroup_default;
41LIST_HEAD(rdt_all_groups);
42
4e978d06
FY
43/* Kernel fs node for "info" directory under root */
44static struct kernfs_node *kn_info;
45
60cf5e10
FY
46/*
47 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
48 * we can keep a bitmap of free CLOSIDs in a single integer.
49 *
50 * Using a global CLOSID across all resources has some advantages and
51 * some drawbacks:
52 * + We can simply set "current->closid" to assign a task to a resource
53 * group.
54 * + Context switch code can avoid extra memory references deciding which
55 * CLOSID to load into the PQR_ASSOC MSR
56 * - We give up some options in configuring resource groups across multi-socket
57 * systems.
58 * - Our choices on how to configure each resource become progressively more
59 * limited as the number of resources grows.
60 */
61static int closid_free_map;
62
63static void closid_init(void)
64{
65 struct rdt_resource *r;
66 int rdt_min_closid = 32;
67
68 /* Compute rdt_min_closid across all resources */
69 for_each_enabled_rdt_resource(r)
70 rdt_min_closid = min(rdt_min_closid, r->num_closid);
71
72 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
73
74 /* CLOSID 0 is always reserved for the default group */
75 closid_free_map &= ~1;
76}
77
78int closid_alloc(void)
79{
80 int closid = ffs(closid_free_map);
81
82 if (closid == 0)
83 return -ENOSPC;
84 closid--;
85 closid_free_map &= ~(1 << closid);
86
87 return closid;
88}
89
90static void closid_free(int closid)
91{
92 closid_free_map |= 1 << closid;
93}
94
4e978d06
FY
95/* set uid and gid of rdtgroup dirs and files to that of the creator */
96static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
97{
98 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
99 .ia_uid = current_fsuid(),
100 .ia_gid = current_fsgid(), };
101
102 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
103 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
104 return 0;
105
106 return kernfs_setattr(kn, &iattr);
107}
108
109static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
110{
111 struct kernfs_node *kn;
112 int ret;
113
114 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
115 0, rft->kf_ops, rft, NULL, NULL);
116 if (IS_ERR(kn))
117 return PTR_ERR(kn);
118
119 ret = rdtgroup_kn_set_ugid(kn);
120 if (ret) {
121 kernfs_remove(kn);
122 return ret;
123 }
124
125 return 0;
126}
127
128static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
129 int len)
130{
131 struct rftype *rft;
132 int ret;
133
134 lockdep_assert_held(&rdtgroup_mutex);
135
136 for (rft = rfts; rft < rfts + len; rft++) {
137 ret = rdtgroup_add_file(kn, rft);
138 if (ret)
139 goto error;
140 }
141
142 return 0;
143error:
144 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
145 while (--rft >= rfts)
146 kernfs_remove_by_name(kn, rft->name);
147 return ret;
148}
149
150static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
151{
152 struct kernfs_open_file *of = m->private;
153 struct rftype *rft = of->kn->priv;
154
155 if (rft->seq_show)
156 return rft->seq_show(of, m, arg);
157 return 0;
158}
159
160static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
161 size_t nbytes, loff_t off)
162{
163 struct rftype *rft = of->kn->priv;
164
165 if (rft->write)
166 return rft->write(of, buf, nbytes, off);
167
168 return -EINVAL;
169}
170
171static struct kernfs_ops rdtgroup_kf_single_ops = {
172 .atomic_write_len = PAGE_SIZE,
173 .write = rdtgroup_file_write,
174 .seq_show = rdtgroup_seqfile_show,
175};
176
12e0110c
TL
177static int rdtgroup_cpus_show(struct kernfs_open_file *of,
178 struct seq_file *s, void *v)
179{
180 struct rdtgroup *rdtgrp;
181 int ret = 0;
182
183 rdtgrp = rdtgroup_kn_lock_live(of->kn);
184
185 if (rdtgrp)
186 seq_printf(s, "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask));
187 else
188 ret = -ENOENT;
189 rdtgroup_kn_unlock(of->kn);
190
191 return ret;
192}
193
194static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
195 char *buf, size_t nbytes, loff_t off)
196{
197 cpumask_var_t tmpmask, newmask;
198 struct rdtgroup *rdtgrp, *r;
199 int ret, cpu;
200
201 if (!buf)
202 return -EINVAL;
203
204 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
205 return -ENOMEM;
206 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
207 free_cpumask_var(tmpmask);
208 return -ENOMEM;
209 }
a2584e1d 210
12e0110c
TL
211 rdtgrp = rdtgroup_kn_lock_live(of->kn);
212 if (!rdtgrp) {
213 ret = -ENOENT;
214 goto unlock;
215 }
216
217 ret = cpumask_parse(buf, newmask);
218 if (ret)
219 goto unlock;
220
12e0110c
TL
221 /* check that user didn't specify any offline cpus */
222 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
223 if (cpumask_weight(tmpmask)) {
224 ret = -EINVAL;
a2584e1d 225 goto unlock;
12e0110c
TL
226 }
227
228 /* Check whether cpus are dropped from this group */
229 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
230 if (cpumask_weight(tmpmask)) {
231 /* Can't drop from default group */
232 if (rdtgrp == &rdtgroup_default) {
233 ret = -EINVAL;
a2584e1d 234 goto unlock;
12e0110c
TL
235 }
236 /* Give any dropped cpus to rdtgroup_default */
237 cpumask_or(&rdtgroup_default.cpu_mask,
238 &rdtgroup_default.cpu_mask, tmpmask);
239 for_each_cpu(cpu, tmpmask)
240 per_cpu(cpu_closid, cpu) = 0;
241 }
242
243 /*
244 * If we added cpus, remove them from previous group that owned them
245 * and update per-cpu closid
246 */
247 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
248 if (cpumask_weight(tmpmask)) {
249 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
250 if (r == rdtgrp)
251 continue;
252 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
253 }
254 for_each_cpu(cpu, tmpmask)
255 per_cpu(cpu_closid, cpu) = rdtgrp->closid;
256 }
257
258 /* Done pushing/pulling - update this group with new mask */
259 cpumask_copy(&rdtgrp->cpu_mask, newmask);
260
12e0110c
TL
261unlock:
262 rdtgroup_kn_unlock(of->kn);
263 free_cpumask_var(tmpmask);
264 free_cpumask_var(newmask);
265
266 return ret ?: nbytes;
267}
268
e02737d5
FY
269struct task_move_callback {
270 struct callback_head work;
271 struct rdtgroup *rdtgrp;
272};
273
274static void move_myself(struct callback_head *head)
275{
276 struct task_move_callback *callback;
277 struct rdtgroup *rdtgrp;
278
279 callback = container_of(head, struct task_move_callback, work);
280 rdtgrp = callback->rdtgrp;
281
282 /*
283 * If resource group was deleted before this task work callback
284 * was invoked, then assign the task to root group and free the
285 * resource group.
286 */
287 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
288 (rdtgrp->flags & RDT_DELETED)) {
289 current->closid = 0;
290 kfree(rdtgrp);
291 }
292
4f341a5e
FY
293 /* update PQR_ASSOC MSR to make resource group go into effect */
294 intel_rdt_sched_in();
295
e02737d5
FY
296 kfree(callback);
297}
298
299static int __rdtgroup_move_task(struct task_struct *tsk,
300 struct rdtgroup *rdtgrp)
301{
302 struct task_move_callback *callback;
303 int ret;
304
305 callback = kzalloc(sizeof(*callback), GFP_KERNEL);
306 if (!callback)
307 return -ENOMEM;
308 callback->work.func = move_myself;
309 callback->rdtgrp = rdtgrp;
310
311 /*
312 * Take a refcount, so rdtgrp cannot be freed before the
313 * callback has been invoked.
314 */
315 atomic_inc(&rdtgrp->waitcount);
316 ret = task_work_add(tsk, &callback->work, true);
317 if (ret) {
318 /*
319 * Task is exiting. Drop the refcount and free the callback.
320 * No need to check the refcount as the group cannot be
321 * deleted before the write function unlocks rdtgroup_mutex.
322 */
323 atomic_dec(&rdtgrp->waitcount);
324 kfree(callback);
325 } else {
326 tsk->closid = rdtgrp->closid;
327 }
328 return ret;
329}
330
331static int rdtgroup_task_write_permission(struct task_struct *task,
332 struct kernfs_open_file *of)
333{
334 const struct cred *tcred = get_task_cred(task);
335 const struct cred *cred = current_cred();
336 int ret = 0;
337
338 /*
339 * Even if we're attaching all tasks in the thread group, we only
340 * need to check permissions on one of them.
341 */
342 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
343 !uid_eq(cred->euid, tcred->uid) &&
344 !uid_eq(cred->euid, tcred->suid))
345 ret = -EPERM;
346
347 put_cred(tcred);
348 return ret;
349}
350
351static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
352 struct kernfs_open_file *of)
353{
354 struct task_struct *tsk;
355 int ret;
356
357 rcu_read_lock();
358 if (pid) {
359 tsk = find_task_by_vpid(pid);
360 if (!tsk) {
361 rcu_read_unlock();
362 return -ESRCH;
363 }
364 } else {
365 tsk = current;
366 }
367
368 get_task_struct(tsk);
369 rcu_read_unlock();
370
371 ret = rdtgroup_task_write_permission(tsk, of);
372 if (!ret)
373 ret = __rdtgroup_move_task(tsk, rdtgrp);
374
375 put_task_struct(tsk);
376 return ret;
377}
378
379static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
380 char *buf, size_t nbytes, loff_t off)
381{
382 struct rdtgroup *rdtgrp;
383 int ret = 0;
384 pid_t pid;
385
386 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
387 return -EINVAL;
388 rdtgrp = rdtgroup_kn_lock_live(of->kn);
389
390 if (rdtgrp)
391 ret = rdtgroup_move_task(pid, rdtgrp, of);
392 else
393 ret = -ENOENT;
394
395 rdtgroup_kn_unlock(of->kn);
396
397 return ret ?: nbytes;
398}
399
400static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
401{
402 struct task_struct *p, *t;
403
404 rcu_read_lock();
405 for_each_process_thread(p, t) {
406 if (t->closid == r->closid)
407 seq_printf(s, "%d\n", t->pid);
408 }
409 rcu_read_unlock();
410}
411
412static int rdtgroup_tasks_show(struct kernfs_open_file *of,
413 struct seq_file *s, void *v)
414{
415 struct rdtgroup *rdtgrp;
416 int ret = 0;
417
418 rdtgrp = rdtgroup_kn_lock_live(of->kn);
419 if (rdtgrp)
420 show_rdt_tasks(rdtgrp, s);
421 else
422 ret = -ENOENT;
423 rdtgroup_kn_unlock(of->kn);
424
425 return ret;
426}
427
12e0110c
TL
428/* Files in each rdtgroup */
429static struct rftype rdtgroup_base_files[] = {
430 {
431 .name = "cpus",
432 .mode = 0644,
433 .kf_ops = &rdtgroup_kf_single_ops,
434 .write = rdtgroup_cpus_write,
435 .seq_show = rdtgroup_cpus_show,
436 },
e02737d5
FY
437 {
438 .name = "tasks",
439 .mode = 0644,
440 .kf_ops = &rdtgroup_kf_single_ops,
441 .write = rdtgroup_tasks_write,
442 .seq_show = rdtgroup_tasks_show,
443 },
60ec2440
TL
444 {
445 .name = "schemata",
446 .mode = 0644,
447 .kf_ops = &rdtgroup_kf_single_ops,
448 .write = rdtgroup_schemata_write,
449 .seq_show = rdtgroup_schemata_show,
450 },
12e0110c
TL
451};
452
4e978d06
FY
453static int rdt_num_closids_show(struct kernfs_open_file *of,
454 struct seq_file *seq, void *v)
455{
456 struct rdt_resource *r = of->kn->parent->priv;
457
458 seq_printf(seq, "%d\n", r->num_closid);
459
460 return 0;
461}
462
463static int rdt_cbm_mask_show(struct kernfs_open_file *of,
464 struct seq_file *seq, void *v)
465{
466 struct rdt_resource *r = of->kn->parent->priv;
467
468 seq_printf(seq, "%x\n", r->max_cbm);
469
470 return 0;
471}
472
53a114a6
SL
473static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
474 struct seq_file *seq, void *v)
475{
476 struct rdt_resource *r = of->kn->parent->priv;
477
478 seq_printf(seq, "%d\n", r->min_cbm_bits);
479
480 return 0;
481}
482
4e978d06
FY
483/* rdtgroup information files for one cache resource. */
484static struct rftype res_info_files[] = {
485 {
486 .name = "num_closids",
487 .mode = 0444,
488 .kf_ops = &rdtgroup_kf_single_ops,
489 .seq_show = rdt_num_closids_show,
490 },
491 {
492 .name = "cbm_mask",
493 .mode = 0444,
494 .kf_ops = &rdtgroup_kf_single_ops,
495 .seq_show = rdt_cbm_mask_show,
496 },
53a114a6
SL
497 {
498 .name = "min_cbm_bits",
499 .mode = 0444,
500 .kf_ops = &rdtgroup_kf_single_ops,
501 .seq_show = rdt_min_cbm_bits_show,
502 },
4e978d06
FY
503};
504
505static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
506{
507 struct kernfs_node *kn_subdir;
508 struct rdt_resource *r;
509 int ret;
510
511 /* create the directory */
512 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
513 if (IS_ERR(kn_info))
514 return PTR_ERR(kn_info);
515 kernfs_get(kn_info);
516
517 for_each_enabled_rdt_resource(r) {
518 kn_subdir = kernfs_create_dir(kn_info, r->name,
519 kn_info->mode, r);
520 if (IS_ERR(kn_subdir)) {
521 ret = PTR_ERR(kn_subdir);
522 goto out_destroy;
523 }
524 kernfs_get(kn_subdir);
525 ret = rdtgroup_kn_set_ugid(kn_subdir);
526 if (ret)
527 goto out_destroy;
528 ret = rdtgroup_add_files(kn_subdir, res_info_files,
529 ARRAY_SIZE(res_info_files));
530 if (ret)
531 goto out_destroy;
532 kernfs_activate(kn_subdir);
533 }
534
535 /*
536 * This extra ref will be put in kernfs_remove() and guarantees
537 * that @rdtgrp->kn is always accessible.
538 */
539 kernfs_get(kn_info);
540
541 ret = rdtgroup_kn_set_ugid(kn_info);
542 if (ret)
543 goto out_destroy;
544
545 kernfs_activate(kn_info);
546
547 return 0;
548
549out_destroy:
550 kernfs_remove(kn_info);
551 return ret;
552}
553
5ff193fb
FY
554static void l3_qos_cfg_update(void *arg)
555{
556 bool *enable = arg;
557
558 wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
559}
560
561static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
562{
563 cpumask_var_t cpu_mask;
564 struct rdt_domain *d;
565 int cpu;
566
567 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
568 return -ENOMEM;
569
570 list_for_each_entry(d, &r->domains, list) {
571 /* Pick one CPU from each domain instance to update MSR */
572 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
573 }
574 cpu = get_cpu();
575 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
576 if (cpumask_test_cpu(cpu, cpu_mask))
577 l3_qos_cfg_update(&enable);
578 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
579 smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
580 put_cpu();
581
582 free_cpumask_var(cpu_mask);
583
584 return 0;
585}
586
587static int cdp_enable(void)
588{
589 struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
590 struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
591 struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
592 int ret;
593
594 if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
595 return -EINVAL;
596
597 ret = set_l3_qos_cfg(r_l3, true);
598 if (!ret) {
599 r_l3->enabled = false;
600 r_l3data->enabled = true;
601 r_l3code->enabled = true;
602 }
603 return ret;
604}
605
606static void cdp_disable(void)
607{
608 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
609
610 r->enabled = r->capable;
611
612 if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
613 rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
614 rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
615 set_l3_qos_cfg(r, false);
616 }
617}
618
619static int parse_rdtgroupfs_options(char *data)
620{
621 char *token, *o = data;
622 int ret = 0;
623
624 while ((token = strsep(&o, ",")) != NULL) {
625 if (!*token)
626 return -EINVAL;
627
628 if (!strcmp(token, "cdp"))
629 ret = cdp_enable();
630 }
631
632 return ret;
633}
634
60cf5e10
FY
635/*
636 * We don't allow rdtgroup directories to be created anywhere
637 * except the root directory. Thus when looking for the rdtgroup
638 * structure for a kernfs node we are either looking at a directory,
639 * in which case the rdtgroup structure is pointed at by the "priv"
640 * field, otherwise we have a file, and need only look to the parent
641 * to find the rdtgroup.
642 */
643static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
644{
f57b3087
FY
645 if (kernfs_type(kn) == KERNFS_DIR) {
646 /*
647 * All the resource directories use "kn->priv"
648 * to point to the "struct rdtgroup" for the
649 * resource. "info" and its subdirectories don't
650 * have rdtgroup structures, so return NULL here.
651 */
652 if (kn == kn_info || kn->parent == kn_info)
653 return NULL;
654 else
655 return kn->priv;
656 } else {
60cf5e10 657 return kn->parent->priv;
f57b3087 658 }
60cf5e10
FY
659}
660
661struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
662{
663 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
664
f57b3087
FY
665 if (!rdtgrp)
666 return NULL;
667
60cf5e10
FY
668 atomic_inc(&rdtgrp->waitcount);
669 kernfs_break_active_protection(kn);
670
671 mutex_lock(&rdtgroup_mutex);
672
673 /* Was this group deleted while we waited? */
674 if (rdtgrp->flags & RDT_DELETED)
675 return NULL;
676
677 return rdtgrp;
678}
679
680void rdtgroup_kn_unlock(struct kernfs_node *kn)
681{
682 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
683
f57b3087
FY
684 if (!rdtgrp)
685 return;
686
60cf5e10
FY
687 mutex_unlock(&rdtgroup_mutex);
688
689 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
690 (rdtgrp->flags & RDT_DELETED)) {
691 kernfs_unbreak_active_protection(kn);
692 kernfs_put(kn);
693 kfree(rdtgrp);
694 } else {
695 kernfs_unbreak_active_protection(kn);
696 }
697}
698
5ff193fb
FY
699static struct dentry *rdt_mount(struct file_system_type *fs_type,
700 int flags, const char *unused_dev_name,
701 void *data)
702{
703 struct dentry *dentry;
704 int ret;
705
706 mutex_lock(&rdtgroup_mutex);
707 /*
708 * resctrl file system can only be mounted once.
709 */
710 if (static_branch_unlikely(&rdt_enable_key)) {
711 dentry = ERR_PTR(-EBUSY);
712 goto out;
713 }
714
715 ret = parse_rdtgroupfs_options(data);
716 if (ret) {
717 dentry = ERR_PTR(ret);
718 goto out_cdp;
719 }
720
60cf5e10
FY
721 closid_init();
722
4e978d06 723 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
7bff0af5
SL
724 if (ret) {
725 dentry = ERR_PTR(ret);
4e978d06 726 goto out_cdp;
7bff0af5 727 }
4e978d06 728
5ff193fb
FY
729 dentry = kernfs_mount(fs_type, flags, rdt_root,
730 RDTGROUP_SUPER_MAGIC, NULL);
731 if (IS_ERR(dentry))
732 goto out_cdp;
733
734 static_branch_enable(&rdt_enable_key);
735 goto out;
736
737out_cdp:
738 cdp_disable();
739out:
740 mutex_unlock(&rdtgroup_mutex);
741
742 return dentry;
743}
744
745static int reset_all_cbms(struct rdt_resource *r)
746{
747 struct msr_param msr_param;
748 cpumask_var_t cpu_mask;
749 struct rdt_domain *d;
750 int i, cpu;
751
752 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
753 return -ENOMEM;
754
755 msr_param.res = r;
756 msr_param.low = 0;
757 msr_param.high = r->num_closid;
758
759 /*
760 * Disable resource control for this resource by setting all
761 * CBMs in all domains to the maximum mask value. Pick one CPU
762 * from each domain to update the MSRs below.
763 */
764 list_for_each_entry(d, &r->domains, list) {
765 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
766
767 for (i = 0; i < r->num_closid; i++)
768 d->cbm[i] = r->max_cbm;
769 }
770 cpu = get_cpu();
771 /* Update CBM on this cpu if it's in cpu_mask. */
772 if (cpumask_test_cpu(cpu, cpu_mask))
773 rdt_cbm_update(&msr_param);
774 /* Update CBM on all other cpus in cpu_mask. */
775 smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
776 put_cpu();
777
778 free_cpumask_var(cpu_mask);
779
780 return 0;
781}
782
60cf5e10
FY
783/*
784 * MSR_IA32_PQR_ASSOC is scoped per logical CPU, so all updates
785 * are always in thread context.
786 */
787static void rdt_reset_pqr_assoc_closid(void *v)
788{
789 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
790
791 state->closid = 0;
792 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
793}
794
4e978d06
FY
795/*
796 * Forcibly remove all of subdirectories under root.
797 */
798static void rmdir_all_sub(void)
799{
60cf5e10 800 struct rdtgroup *rdtgrp, *tmp;
e02737d5 801 struct task_struct *p, *t;
c7cc0cc1 802 int cpu;
e02737d5
FY
803
804 /* move all tasks to default resource group */
805 read_lock(&tasklist_lock);
806 for_each_process_thread(p, t)
807 t->closid = 0;
808 read_unlock(&tasklist_lock);
60cf5e10
FY
809
810 get_cpu();
811 /* Reset PQR_ASSOC MSR on this cpu. */
812 rdt_reset_pqr_assoc_closid(NULL);
813 /* Reset PQR_ASSOC MSR on the rest of cpus. */
814 smp_call_function_many(cpu_online_mask, rdt_reset_pqr_assoc_closid,
815 NULL, 1);
816 put_cpu();
c7cc0cc1 817
60cf5e10
FY
818 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
819 /* Remove each rdtgroup other than root */
820 if (rdtgrp == &rdtgroup_default)
821 continue;
c7cc0cc1
FY
822
823 /*
824 * Give any CPUs back to the default group. We cannot copy
825 * cpu_online_mask because a CPU might have executed the
826 * offline callback already, but is still marked online.
827 */
828 cpumask_or(&rdtgroup_default.cpu_mask,
829 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
830
60cf5e10
FY
831 kernfs_remove(rdtgrp->kn);
832 list_del(&rdtgrp->rdtgroup_list);
833 kfree(rdtgrp);
834 }
c7cc0cc1
FY
835
836 /* Reset all per cpu closids to the default value */
837 for_each_cpu(cpu, &rdtgroup_default.cpu_mask)
838 per_cpu(cpu_closid, cpu) = 0;
839
4e978d06
FY
840 kernfs_remove(kn_info);
841}
842
5ff193fb
FY
843static void rdt_kill_sb(struct super_block *sb)
844{
845 struct rdt_resource *r;
846
847 mutex_lock(&rdtgroup_mutex);
848
849 /*Put everything back to default values. */
850 for_each_enabled_rdt_resource(r)
851 reset_all_cbms(r);
852 cdp_disable();
4e978d06 853 rmdir_all_sub();
5ff193fb
FY
854 static_branch_disable(&rdt_enable_key);
855 kernfs_kill_sb(sb);
856 mutex_unlock(&rdtgroup_mutex);
857}
858
859static struct file_system_type rdt_fs_type = {
860 .name = "resctrl",
861 .mount = rdt_mount,
862 .kill_sb = rdt_kill_sb,
863};
864
60cf5e10
FY
865static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
866 umode_t mode)
867{
868 struct rdtgroup *parent, *rdtgrp;
869 struct kernfs_node *kn;
870 int ret, closid;
871
872 /* Only allow mkdir in the root directory */
873 if (parent_kn != rdtgroup_default.kn)
874 return -EPERM;
875
876 /* Do not accept '\n' to avoid unparsable situation. */
877 if (strchr(name, '\n'))
878 return -EINVAL;
879
880 parent = rdtgroup_kn_lock_live(parent_kn);
881 if (!parent) {
882 ret = -ENODEV;
883 goto out_unlock;
884 }
885
886 ret = closid_alloc();
887 if (ret < 0)
888 goto out_unlock;
889 closid = ret;
890
891 /* allocate the rdtgroup. */
892 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
893 if (!rdtgrp) {
894 ret = -ENOSPC;
895 goto out_closid_free;
896 }
897 rdtgrp->closid = closid;
898 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
899
900 /* kernfs creates the directory for rdtgrp */
901 kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
902 if (IS_ERR(kn)) {
903 ret = PTR_ERR(kn);
904 goto out_cancel_ref;
905 }
906 rdtgrp->kn = kn;
907
908 /*
909 * kernfs_remove() will drop the reference count on "kn" which
910 * will free it. But we still need it to stick around for the
911 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
912 * here, which will be dropped inside rdtgroup_kn_unlock().
913 */
914 kernfs_get(kn);
915
916 ret = rdtgroup_kn_set_ugid(kn);
917 if (ret)
918 goto out_destroy;
919
12e0110c
TL
920 ret = rdtgroup_add_files(kn, rdtgroup_base_files,
921 ARRAY_SIZE(rdtgroup_base_files));
922 if (ret)
923 goto out_destroy;
924
60cf5e10
FY
925 kernfs_activate(kn);
926
927 ret = 0;
928 goto out_unlock;
929
930out_destroy:
931 kernfs_remove(rdtgrp->kn);
932out_cancel_ref:
933 list_del(&rdtgrp->rdtgroup_list);
934 kfree(rdtgrp);
935out_closid_free:
936 closid_free(closid);
937out_unlock:
938 rdtgroup_kn_unlock(parent_kn);
939 return ret;
940}
941
942static int rdtgroup_rmdir(struct kernfs_node *kn)
943{
e02737d5 944 struct task_struct *p, *t;
60cf5e10 945 struct rdtgroup *rdtgrp;
12e0110c 946 int cpu, ret = 0;
60cf5e10
FY
947
948 rdtgrp = rdtgroup_kn_lock_live(kn);
949 if (!rdtgrp) {
950 rdtgroup_kn_unlock(kn);
f57b3087 951 return -EPERM;
60cf5e10
FY
952 }
953
e02737d5
FY
954 /* Give any tasks back to the default group */
955 read_lock(&tasklist_lock);
956 for_each_process_thread(p, t) {
957 if (t->closid == rdtgrp->closid)
958 t->closid = 0;
959 }
960 read_unlock(&tasklist_lock);
961
12e0110c
TL
962 /* Give any CPUs back to the default group */
963 cpumask_or(&rdtgroup_default.cpu_mask,
964 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
965 for_each_cpu(cpu, &rdtgrp->cpu_mask)
966 per_cpu(cpu_closid, cpu) = 0;
967
60cf5e10
FY
968 rdtgrp->flags = RDT_DELETED;
969 closid_free(rdtgrp->closid);
970 list_del(&rdtgrp->rdtgroup_list);
971
972 /*
973 * one extra hold on this, will drop when we kfree(rdtgrp)
974 * in rdtgroup_kn_unlock()
975 */
976 kernfs_get(kn);
977 kernfs_remove(rdtgrp->kn);
978
979 rdtgroup_kn_unlock(kn);
980
981 return ret;
982}
983
5ff193fb 984static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
60cf5e10
FY
985 .mkdir = rdtgroup_mkdir,
986 .rmdir = rdtgroup_rmdir,
5ff193fb
FY
987};
988
989static int __init rdtgroup_setup_root(void)
990{
12e0110c
TL
991 int ret;
992
5ff193fb
FY
993 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
994 KERNFS_ROOT_CREATE_DEACTIVATED,
995 &rdtgroup_default);
996 if (IS_ERR(rdt_root))
997 return PTR_ERR(rdt_root);
998
999 mutex_lock(&rdtgroup_mutex);
1000
1001 rdtgroup_default.closid = 0;
1002 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
1003
12e0110c
TL
1004 ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
1005 ARRAY_SIZE(rdtgroup_base_files));
1006 if (ret) {
1007 kernfs_destroy_root(rdt_root);
1008 goto out;
1009 }
1010
5ff193fb
FY
1011 rdtgroup_default.kn = rdt_root->kn;
1012 kernfs_activate(rdtgroup_default.kn);
1013
12e0110c 1014out:
5ff193fb
FY
1015 mutex_unlock(&rdtgroup_mutex);
1016
12e0110c 1017 return ret;
5ff193fb
FY
1018}
1019
1020/*
1021 * rdtgroup_init - rdtgroup initialization
1022 *
1023 * Setup resctrl file system including set up root, create mount point,
1024 * register rdtgroup filesystem, and initialize files under root directory.
1025 *
1026 * Return: 0 on success or -errno
1027 */
1028int __init rdtgroup_init(void)
1029{
1030 int ret = 0;
1031
1032 ret = rdtgroup_setup_root();
1033 if (ret)
1034 return ret;
1035
1036 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
1037 if (ret)
1038 goto cleanup_root;
1039
1040 ret = register_filesystem(&rdt_fs_type);
1041 if (ret)
1042 goto cleanup_mountpoint;
1043
1044 return 0;
1045
1046cleanup_mountpoint:
1047 sysfs_remove_mount_point(fs_kobj, "resctrl");
1048cleanup_root:
1049 kernfs_destroy_root(rdt_root);
1050
1051 return ret;
1052}