2 * User interface for Resource Alloction in Resource Director Technology(RDT)
4 * Copyright (C) 2016 Intel Corporation
6 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * More information about RDT be found in the Intel (R) x86 Architecture
18 * Software Developer Manual.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/cpu.h>
25 #include <linux/sysfs.h>
26 #include <linux/kernfs.h>
27 #include <linux/seq_file.h>
28 #include <linux/sched/signal.h>
29 #include <linux/sched/task.h>
30 #include <linux/slab.h>
31 #include <linux/task_work.h>
33 #include <uapi/linux/magic.h>
35 #include <asm/intel_rdt.h>
36 #include <asm/intel_rdt_common.h>
38 DEFINE_STATIC_KEY_FALSE(rdt_enable_key
);
39 struct kernfs_root
*rdt_root
;
40 struct rdtgroup rdtgroup_default
;
41 LIST_HEAD(rdt_all_groups
);
43 /* Kernel fs node for "info" directory under root */
44 static struct kernfs_node
*kn_info
;
47 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
48 * we can keep a bitmap of free CLOSIDs in a single integer.
50 * Using a global CLOSID across all resources has some advantages and
52 * + We can simply set "current->closid" to assign a task to a resource
54 * + Context switch code can avoid extra memory references deciding which
55 * CLOSID to load into the PQR_ASSOC MSR
56 * - We give up some options in configuring resource groups across multi-socket
58 * - Our choices on how to configure each resource become progressively more
59 * limited as the number of resources grows.
61 static int closid_free_map
;
63 static void closid_init(void)
65 struct rdt_resource
*r
;
66 int rdt_min_closid
= 32;
68 /* Compute rdt_min_closid across all resources */
69 for_each_enabled_rdt_resource(r
)
70 rdt_min_closid
= min(rdt_min_closid
, r
->num_closid
);
72 closid_free_map
= BIT_MASK(rdt_min_closid
) - 1;
74 /* CLOSID 0 is always reserved for the default group */
75 closid_free_map
&= ~1;
78 int closid_alloc(void)
80 int closid
= ffs(closid_free_map
);
85 closid_free_map
&= ~(1 << closid
);
90 static void closid_free(int closid
)
92 closid_free_map
|= 1 << closid
;
95 /* set uid and gid of rdtgroup dirs and files to that of the creator */
96 static int rdtgroup_kn_set_ugid(struct kernfs_node
*kn
)
98 struct iattr iattr
= { .ia_valid
= ATTR_UID
| ATTR_GID
,
99 .ia_uid
= current_fsuid(),
100 .ia_gid
= current_fsgid(), };
102 if (uid_eq(iattr
.ia_uid
, GLOBAL_ROOT_UID
) &&
103 gid_eq(iattr
.ia_gid
, GLOBAL_ROOT_GID
))
106 return kernfs_setattr(kn
, &iattr
);
109 static int rdtgroup_add_file(struct kernfs_node
*parent_kn
, struct rftype
*rft
)
111 struct kernfs_node
*kn
;
114 kn
= __kernfs_create_file(parent_kn
, rft
->name
, rft
->mode
,
115 0, rft
->kf_ops
, rft
, NULL
, NULL
);
119 ret
= rdtgroup_kn_set_ugid(kn
);
128 static int rdtgroup_add_files(struct kernfs_node
*kn
, struct rftype
*rfts
,
134 lockdep_assert_held(&rdtgroup_mutex
);
136 for (rft
= rfts
; rft
< rfts
+ len
; rft
++) {
137 ret
= rdtgroup_add_file(kn
, rft
);
144 pr_warn("Failed to add %s, err=%d\n", rft
->name
, ret
);
145 while (--rft
>= rfts
)
146 kernfs_remove_by_name(kn
, rft
->name
);
150 static int rdtgroup_seqfile_show(struct seq_file
*m
, void *arg
)
152 struct kernfs_open_file
*of
= m
->private;
153 struct rftype
*rft
= of
->kn
->priv
;
156 return rft
->seq_show(of
, m
, arg
);
160 static ssize_t
rdtgroup_file_write(struct kernfs_open_file
*of
, char *buf
,
161 size_t nbytes
, loff_t off
)
163 struct rftype
*rft
= of
->kn
->priv
;
166 return rft
->write(of
, buf
, nbytes
, off
);
171 static struct kernfs_ops rdtgroup_kf_single_ops
= {
172 .atomic_write_len
= PAGE_SIZE
,
173 .write
= rdtgroup_file_write
,
174 .seq_show
= rdtgroup_seqfile_show
,
177 static int rdtgroup_cpus_show(struct kernfs_open_file
*of
,
178 struct seq_file
*s
, void *v
)
180 struct rdtgroup
*rdtgrp
;
183 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
186 seq_printf(s
, "%*pb\n", cpumask_pr_args(&rdtgrp
->cpu_mask
));
189 rdtgroup_kn_unlock(of
->kn
);
195 * This is safe against intel_rdt_sched_in() called from __switch_to()
196 * because __switch_to() is executed with interrupts disabled. A local call
197 * from rdt_update_closid() is proteced against __switch_to() because
198 * preemption is disabled.
200 static void rdt_update_cpu_closid(void *closid
)
203 this_cpu_write(cpu_closid
, *(int *)closid
);
205 * We cannot unconditionally write the MSR because the current
206 * executing task might have its own closid selected. Just reuse
207 * the context switch code.
209 intel_rdt_sched_in();
213 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
215 * Per task closids must have been set up before calling this function.
217 * The per cpu closids are updated with the smp function call, when @closid
218 * is not NULL. If @closid is NULL then all affected percpu closids must
219 * have been set up before calling this function.
222 rdt_update_closid(const struct cpumask
*cpu_mask
, int *closid
)
226 if (cpumask_test_cpu(cpu
, cpu_mask
))
227 rdt_update_cpu_closid(closid
);
228 smp_call_function_many(cpu_mask
, rdt_update_cpu_closid
, closid
, 1);
232 static ssize_t
rdtgroup_cpus_write(struct kernfs_open_file
*of
,
233 char *buf
, size_t nbytes
, loff_t off
)
235 cpumask_var_t tmpmask
, newmask
;
236 struct rdtgroup
*rdtgrp
, *r
;
242 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
244 if (!zalloc_cpumask_var(&newmask
, GFP_KERNEL
)) {
245 free_cpumask_var(tmpmask
);
249 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
255 ret
= cpumask_parse(buf
, newmask
);
259 /* check that user didn't specify any offline cpus */
260 cpumask_andnot(tmpmask
, newmask
, cpu_online_mask
);
261 if (cpumask_weight(tmpmask
)) {
266 /* Check whether cpus are dropped from this group */
267 cpumask_andnot(tmpmask
, &rdtgrp
->cpu_mask
, newmask
);
268 if (cpumask_weight(tmpmask
)) {
269 /* Can't drop from default group */
270 if (rdtgrp
== &rdtgroup_default
) {
274 /* Give any dropped cpus to rdtgroup_default */
275 cpumask_or(&rdtgroup_default
.cpu_mask
,
276 &rdtgroup_default
.cpu_mask
, tmpmask
);
277 rdt_update_closid(tmpmask
, &rdtgroup_default
.closid
);
281 * If we added cpus, remove them from previous group that owned them
282 * and update per-cpu closid
284 cpumask_andnot(tmpmask
, newmask
, &rdtgrp
->cpu_mask
);
285 if (cpumask_weight(tmpmask
)) {
286 list_for_each_entry(r
, &rdt_all_groups
, rdtgroup_list
) {
289 cpumask_andnot(&r
->cpu_mask
, &r
->cpu_mask
, tmpmask
);
291 rdt_update_closid(tmpmask
, &rdtgrp
->closid
);
294 /* Done pushing/pulling - update this group with new mask */
295 cpumask_copy(&rdtgrp
->cpu_mask
, newmask
);
298 rdtgroup_kn_unlock(of
->kn
);
299 free_cpumask_var(tmpmask
);
300 free_cpumask_var(newmask
);
302 return ret
?: nbytes
;
305 struct task_move_callback
{
306 struct callback_head work
;
307 struct rdtgroup
*rdtgrp
;
310 static void move_myself(struct callback_head
*head
)
312 struct task_move_callback
*callback
;
313 struct rdtgroup
*rdtgrp
;
315 callback
= container_of(head
, struct task_move_callback
, work
);
316 rdtgrp
= callback
->rdtgrp
;
319 * If resource group was deleted before this task work callback
320 * was invoked, then assign the task to root group and free the
323 if (atomic_dec_and_test(&rdtgrp
->waitcount
) &&
324 (rdtgrp
->flags
& RDT_DELETED
)) {
330 /* update PQR_ASSOC MSR to make resource group go into effect */
331 intel_rdt_sched_in();
337 static int __rdtgroup_move_task(struct task_struct
*tsk
,
338 struct rdtgroup
*rdtgrp
)
340 struct task_move_callback
*callback
;
343 callback
= kzalloc(sizeof(*callback
), GFP_KERNEL
);
346 callback
->work
.func
= move_myself
;
347 callback
->rdtgrp
= rdtgrp
;
350 * Take a refcount, so rdtgrp cannot be freed before the
351 * callback has been invoked.
353 atomic_inc(&rdtgrp
->waitcount
);
354 ret
= task_work_add(tsk
, &callback
->work
, true);
357 * Task is exiting. Drop the refcount and free the callback.
358 * No need to check the refcount as the group cannot be
359 * deleted before the write function unlocks rdtgroup_mutex.
361 atomic_dec(&rdtgrp
->waitcount
);
364 tsk
->closid
= rdtgrp
->closid
;
369 static int rdtgroup_task_write_permission(struct task_struct
*task
,
370 struct kernfs_open_file
*of
)
372 const struct cred
*tcred
= get_task_cred(task
);
373 const struct cred
*cred
= current_cred();
377 * Even if we're attaching all tasks in the thread group, we only
378 * need to check permissions on one of them.
380 if (!uid_eq(cred
->euid
, GLOBAL_ROOT_UID
) &&
381 !uid_eq(cred
->euid
, tcred
->uid
) &&
382 !uid_eq(cred
->euid
, tcred
->suid
))
389 static int rdtgroup_move_task(pid_t pid
, struct rdtgroup
*rdtgrp
,
390 struct kernfs_open_file
*of
)
392 struct task_struct
*tsk
;
397 tsk
= find_task_by_vpid(pid
);
406 get_task_struct(tsk
);
409 ret
= rdtgroup_task_write_permission(tsk
, of
);
411 ret
= __rdtgroup_move_task(tsk
, rdtgrp
);
413 put_task_struct(tsk
);
417 static ssize_t
rdtgroup_tasks_write(struct kernfs_open_file
*of
,
418 char *buf
, size_t nbytes
, loff_t off
)
420 struct rdtgroup
*rdtgrp
;
424 if (kstrtoint(strstrip(buf
), 0, &pid
) || pid
< 0)
426 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
429 ret
= rdtgroup_move_task(pid
, rdtgrp
, of
);
433 rdtgroup_kn_unlock(of
->kn
);
435 return ret
?: nbytes
;
438 static void show_rdt_tasks(struct rdtgroup
*r
, struct seq_file
*s
)
440 struct task_struct
*p
, *t
;
443 for_each_process_thread(p
, t
) {
444 if (t
->closid
== r
->closid
)
445 seq_printf(s
, "%d\n", t
->pid
);
450 static int rdtgroup_tasks_show(struct kernfs_open_file
*of
,
451 struct seq_file
*s
, void *v
)
453 struct rdtgroup
*rdtgrp
;
456 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
458 show_rdt_tasks(rdtgrp
, s
);
461 rdtgroup_kn_unlock(of
->kn
);
466 /* Files in each rdtgroup */
467 static struct rftype rdtgroup_base_files
[] = {
471 .kf_ops
= &rdtgroup_kf_single_ops
,
472 .write
= rdtgroup_cpus_write
,
473 .seq_show
= rdtgroup_cpus_show
,
478 .kf_ops
= &rdtgroup_kf_single_ops
,
479 .write
= rdtgroup_tasks_write
,
480 .seq_show
= rdtgroup_tasks_show
,
485 .kf_ops
= &rdtgroup_kf_single_ops
,
486 .write
= rdtgroup_schemata_write
,
487 .seq_show
= rdtgroup_schemata_show
,
491 static int rdt_num_closids_show(struct kernfs_open_file
*of
,
492 struct seq_file
*seq
, void *v
)
494 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
496 seq_printf(seq
, "%d\n", r
->num_closid
);
501 static int rdt_cbm_mask_show(struct kernfs_open_file
*of
,
502 struct seq_file
*seq
, void *v
)
504 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
506 seq_printf(seq
, "%x\n", r
->max_cbm
);
511 static int rdt_min_cbm_bits_show(struct kernfs_open_file
*of
,
512 struct seq_file
*seq
, void *v
)
514 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
516 seq_printf(seq
, "%d\n", r
->min_cbm_bits
);
521 /* rdtgroup information files for one cache resource. */
522 static struct rftype res_info_files
[] = {
524 .name
= "num_closids",
526 .kf_ops
= &rdtgroup_kf_single_ops
,
527 .seq_show
= rdt_num_closids_show
,
532 .kf_ops
= &rdtgroup_kf_single_ops
,
533 .seq_show
= rdt_cbm_mask_show
,
536 .name
= "min_cbm_bits",
538 .kf_ops
= &rdtgroup_kf_single_ops
,
539 .seq_show
= rdt_min_cbm_bits_show
,
543 static int rdtgroup_create_info_dir(struct kernfs_node
*parent_kn
)
545 struct kernfs_node
*kn_subdir
;
546 struct rdt_resource
*r
;
549 /* create the directory */
550 kn_info
= kernfs_create_dir(parent_kn
, "info", parent_kn
->mode
, NULL
);
552 return PTR_ERR(kn_info
);
555 for_each_enabled_rdt_resource(r
) {
556 kn_subdir
= kernfs_create_dir(kn_info
, r
->name
,
558 if (IS_ERR(kn_subdir
)) {
559 ret
= PTR_ERR(kn_subdir
);
562 kernfs_get(kn_subdir
);
563 ret
= rdtgroup_kn_set_ugid(kn_subdir
);
566 ret
= rdtgroup_add_files(kn_subdir
, res_info_files
,
567 ARRAY_SIZE(res_info_files
));
570 kernfs_activate(kn_subdir
);
574 * This extra ref will be put in kernfs_remove() and guarantees
575 * that @rdtgrp->kn is always accessible.
579 ret
= rdtgroup_kn_set_ugid(kn_info
);
583 kernfs_activate(kn_info
);
588 kernfs_remove(kn_info
);
592 static void l3_qos_cfg_update(void *arg
)
596 wrmsrl(IA32_L3_QOS_CFG
, *enable
? L3_QOS_CDP_ENABLE
: 0ULL);
599 static int set_l3_qos_cfg(struct rdt_resource
*r
, bool enable
)
601 cpumask_var_t cpu_mask
;
602 struct rdt_domain
*d
;
605 if (!zalloc_cpumask_var(&cpu_mask
, GFP_KERNEL
))
608 list_for_each_entry(d
, &r
->domains
, list
) {
609 /* Pick one CPU from each domain instance to update MSR */
610 cpumask_set_cpu(cpumask_any(&d
->cpu_mask
), cpu_mask
);
613 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
614 if (cpumask_test_cpu(cpu
, cpu_mask
))
615 l3_qos_cfg_update(&enable
);
616 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
617 smp_call_function_many(cpu_mask
, l3_qos_cfg_update
, &enable
, 1);
620 free_cpumask_var(cpu_mask
);
625 static int cdp_enable(void)
627 struct rdt_resource
*r_l3data
= &rdt_resources_all
[RDT_RESOURCE_L3DATA
];
628 struct rdt_resource
*r_l3code
= &rdt_resources_all
[RDT_RESOURCE_L3CODE
];
629 struct rdt_resource
*r_l3
= &rdt_resources_all
[RDT_RESOURCE_L3
];
632 if (!r_l3
->capable
|| !r_l3data
->capable
|| !r_l3code
->capable
)
635 ret
= set_l3_qos_cfg(r_l3
, true);
637 r_l3
->enabled
= false;
638 r_l3data
->enabled
= true;
639 r_l3code
->enabled
= true;
644 static void cdp_disable(void)
646 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
648 r
->enabled
= r
->capable
;
650 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].enabled
) {
651 rdt_resources_all
[RDT_RESOURCE_L3DATA
].enabled
= false;
652 rdt_resources_all
[RDT_RESOURCE_L3CODE
].enabled
= false;
653 set_l3_qos_cfg(r
, false);
657 static int parse_rdtgroupfs_options(char *data
)
659 char *token
, *o
= data
;
662 while ((token
= strsep(&o
, ",")) != NULL
) {
666 if (!strcmp(token
, "cdp"))
674 * We don't allow rdtgroup directories to be created anywhere
675 * except the root directory. Thus when looking for the rdtgroup
676 * structure for a kernfs node we are either looking at a directory,
677 * in which case the rdtgroup structure is pointed at by the "priv"
678 * field, otherwise we have a file, and need only look to the parent
679 * to find the rdtgroup.
681 static struct rdtgroup
*kernfs_to_rdtgroup(struct kernfs_node
*kn
)
683 if (kernfs_type(kn
) == KERNFS_DIR
) {
685 * All the resource directories use "kn->priv"
686 * to point to the "struct rdtgroup" for the
687 * resource. "info" and its subdirectories don't
688 * have rdtgroup structures, so return NULL here.
690 if (kn
== kn_info
|| kn
->parent
== kn_info
)
695 return kn
->parent
->priv
;
699 struct rdtgroup
*rdtgroup_kn_lock_live(struct kernfs_node
*kn
)
701 struct rdtgroup
*rdtgrp
= kernfs_to_rdtgroup(kn
);
706 atomic_inc(&rdtgrp
->waitcount
);
707 kernfs_break_active_protection(kn
);
709 mutex_lock(&rdtgroup_mutex
);
711 /* Was this group deleted while we waited? */
712 if (rdtgrp
->flags
& RDT_DELETED
)
718 void rdtgroup_kn_unlock(struct kernfs_node
*kn
)
720 struct rdtgroup
*rdtgrp
= kernfs_to_rdtgroup(kn
);
725 mutex_unlock(&rdtgroup_mutex
);
727 if (atomic_dec_and_test(&rdtgrp
->waitcount
) &&
728 (rdtgrp
->flags
& RDT_DELETED
)) {
729 kernfs_unbreak_active_protection(kn
);
730 kernfs_put(rdtgrp
->kn
);
733 kernfs_unbreak_active_protection(kn
);
737 static struct dentry
*rdt_mount(struct file_system_type
*fs_type
,
738 int flags
, const char *unused_dev_name
,
741 struct dentry
*dentry
;
744 mutex_lock(&rdtgroup_mutex
);
746 * resctrl file system can only be mounted once.
748 if (static_branch_unlikely(&rdt_enable_key
)) {
749 dentry
= ERR_PTR(-EBUSY
);
753 ret
= parse_rdtgroupfs_options(data
);
755 dentry
= ERR_PTR(ret
);
761 ret
= rdtgroup_create_info_dir(rdtgroup_default
.kn
);
763 dentry
= ERR_PTR(ret
);
767 dentry
= kernfs_mount(fs_type
, flags
, rdt_root
,
768 RDTGROUP_SUPER_MAGIC
, NULL
);
772 static_branch_enable(&rdt_enable_key
);
778 mutex_unlock(&rdtgroup_mutex
);
783 static int reset_all_cbms(struct rdt_resource
*r
)
785 struct msr_param msr_param
;
786 cpumask_var_t cpu_mask
;
787 struct rdt_domain
*d
;
790 if (!zalloc_cpumask_var(&cpu_mask
, GFP_KERNEL
))
795 msr_param
.high
= r
->num_closid
;
798 * Disable resource control for this resource by setting all
799 * CBMs in all domains to the maximum mask value. Pick one CPU
800 * from each domain to update the MSRs below.
802 list_for_each_entry(d
, &r
->domains
, list
) {
803 cpumask_set_cpu(cpumask_any(&d
->cpu_mask
), cpu_mask
);
805 for (i
= 0; i
< r
->num_closid
; i
++)
806 d
->cbm
[i
] = r
->max_cbm
;
809 /* Update CBM on this cpu if it's in cpu_mask. */
810 if (cpumask_test_cpu(cpu
, cpu_mask
))
811 rdt_cbm_update(&msr_param
);
812 /* Update CBM on all other cpus in cpu_mask. */
813 smp_call_function_many(cpu_mask
, rdt_cbm_update
, &msr_param
, 1);
816 free_cpumask_var(cpu_mask
);
822 * Move tasks from one to the other group. If @from is NULL, then all tasks
823 * in the systems are moved unconditionally (used for teardown).
825 * If @mask is not NULL the cpus on which moved tasks are running are set
826 * in that mask so the update smp function call is restricted to affected
829 static void rdt_move_group_tasks(struct rdtgroup
*from
, struct rdtgroup
*to
,
830 struct cpumask
*mask
)
832 struct task_struct
*p
, *t
;
834 read_lock(&tasklist_lock
);
835 for_each_process_thread(p
, t
) {
836 if (!from
|| t
->closid
== from
->closid
) {
837 t
->closid
= to
->closid
;
840 * This is safe on x86 w/o barriers as the ordering
841 * of writing to task_cpu() and t->on_cpu is
842 * reverse to the reading here. The detection is
843 * inaccurate as tasks might move or schedule
844 * before the smp function call takes place. In
845 * such a case the function call is pointless, but
846 * there is no other side effect.
848 if (mask
&& t
->on_cpu
)
849 cpumask_set_cpu(task_cpu(t
), mask
);
853 read_unlock(&tasklist_lock
);
857 * Forcibly remove all of subdirectories under root.
859 static void rmdir_all_sub(void)
861 struct rdtgroup
*rdtgrp
, *tmp
;
863 /* Move all tasks to the default resource group */
864 rdt_move_group_tasks(NULL
, &rdtgroup_default
, NULL
);
866 list_for_each_entry_safe(rdtgrp
, tmp
, &rdt_all_groups
, rdtgroup_list
) {
867 /* Remove each rdtgroup other than root */
868 if (rdtgrp
== &rdtgroup_default
)
872 * Give any CPUs back to the default group. We cannot copy
873 * cpu_online_mask because a CPU might have executed the
874 * offline callback already, but is still marked online.
876 cpumask_or(&rdtgroup_default
.cpu_mask
,
877 &rdtgroup_default
.cpu_mask
, &rdtgrp
->cpu_mask
);
879 kernfs_remove(rdtgrp
->kn
);
880 list_del(&rdtgrp
->rdtgroup_list
);
883 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
885 rdt_update_closid(cpu_online_mask
, &rdtgroup_default
.closid
);
888 kernfs_remove(kn_info
);
891 static void rdt_kill_sb(struct super_block
*sb
)
893 struct rdt_resource
*r
;
895 mutex_lock(&rdtgroup_mutex
);
897 /*Put everything back to default values. */
898 for_each_enabled_rdt_resource(r
)
902 static_branch_disable(&rdt_enable_key
);
904 mutex_unlock(&rdtgroup_mutex
);
907 static struct file_system_type rdt_fs_type
= {
910 .kill_sb
= rdt_kill_sb
,
913 static int rdtgroup_mkdir(struct kernfs_node
*parent_kn
, const char *name
,
916 struct rdtgroup
*parent
, *rdtgrp
;
917 struct kernfs_node
*kn
;
920 /* Only allow mkdir in the root directory */
921 if (parent_kn
!= rdtgroup_default
.kn
)
924 /* Do not accept '\n' to avoid unparsable situation. */
925 if (strchr(name
, '\n'))
928 parent
= rdtgroup_kn_lock_live(parent_kn
);
934 ret
= closid_alloc();
939 /* allocate the rdtgroup. */
940 rdtgrp
= kzalloc(sizeof(*rdtgrp
), GFP_KERNEL
);
943 goto out_closid_free
;
945 rdtgrp
->closid
= closid
;
946 list_add(&rdtgrp
->rdtgroup_list
, &rdt_all_groups
);
948 /* kernfs creates the directory for rdtgrp */
949 kn
= kernfs_create_dir(parent
->kn
, name
, mode
, rdtgrp
);
957 * kernfs_remove() will drop the reference count on "kn" which
958 * will free it. But we still need it to stick around for the
959 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
960 * here, which will be dropped inside rdtgroup_kn_unlock().
964 ret
= rdtgroup_kn_set_ugid(kn
);
968 ret
= rdtgroup_add_files(kn
, rdtgroup_base_files
,
969 ARRAY_SIZE(rdtgroup_base_files
));
979 kernfs_remove(rdtgrp
->kn
);
981 list_del(&rdtgrp
->rdtgroup_list
);
986 rdtgroup_kn_unlock(parent_kn
);
990 static int rdtgroup_rmdir(struct kernfs_node
*kn
)
992 int ret
, cpu
, closid
= rdtgroup_default
.closid
;
993 struct rdtgroup
*rdtgrp
;
994 cpumask_var_t tmpmask
;
996 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
999 rdtgrp
= rdtgroup_kn_lock_live(kn
);
1005 /* Give any tasks back to the default group */
1006 rdt_move_group_tasks(rdtgrp
, &rdtgroup_default
, tmpmask
);
1008 /* Give any CPUs back to the default group */
1009 cpumask_or(&rdtgroup_default
.cpu_mask
,
1010 &rdtgroup_default
.cpu_mask
, &rdtgrp
->cpu_mask
);
1012 /* Update per cpu closid of the moved CPUs first */
1013 for_each_cpu(cpu
, &rdtgrp
->cpu_mask
)
1014 per_cpu(cpu_closid
, cpu
) = closid
;
1016 * Update the MSR on moved CPUs and CPUs which have moved
1017 * task running on them.
1019 cpumask_or(tmpmask
, tmpmask
, &rdtgrp
->cpu_mask
);
1020 rdt_update_closid(tmpmask
, NULL
);
1022 rdtgrp
->flags
= RDT_DELETED
;
1023 closid_free(rdtgrp
->closid
);
1024 list_del(&rdtgrp
->rdtgroup_list
);
1027 * one extra hold on this, will drop when we kfree(rdtgrp)
1028 * in rdtgroup_kn_unlock()
1031 kernfs_remove(rdtgrp
->kn
);
1034 rdtgroup_kn_unlock(kn
);
1035 free_cpumask_var(tmpmask
);
1039 static int rdtgroup_show_options(struct seq_file
*seq
, struct kernfs_root
*kf
)
1041 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].enabled
)
1042 seq_puts(seq
, ",cdp");
1046 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops
= {
1047 .mkdir
= rdtgroup_mkdir
,
1048 .rmdir
= rdtgroup_rmdir
,
1049 .show_options
= rdtgroup_show_options
,
1052 static int __init
rdtgroup_setup_root(void)
1056 rdt_root
= kernfs_create_root(&rdtgroup_kf_syscall_ops
,
1057 KERNFS_ROOT_CREATE_DEACTIVATED
,
1059 if (IS_ERR(rdt_root
))
1060 return PTR_ERR(rdt_root
);
1062 mutex_lock(&rdtgroup_mutex
);
1064 rdtgroup_default
.closid
= 0;
1065 list_add(&rdtgroup_default
.rdtgroup_list
, &rdt_all_groups
);
1067 ret
= rdtgroup_add_files(rdt_root
->kn
, rdtgroup_base_files
,
1068 ARRAY_SIZE(rdtgroup_base_files
));
1070 kernfs_destroy_root(rdt_root
);
1074 rdtgroup_default
.kn
= rdt_root
->kn
;
1075 kernfs_activate(rdtgroup_default
.kn
);
1078 mutex_unlock(&rdtgroup_mutex
);
1084 * rdtgroup_init - rdtgroup initialization
1086 * Setup resctrl file system including set up root, create mount point,
1087 * register rdtgroup filesystem, and initialize files under root directory.
1089 * Return: 0 on success or -errno
1091 int __init
rdtgroup_init(void)
1095 ret
= rdtgroup_setup_root();
1099 ret
= sysfs_create_mount_point(fs_kobj
, "resctrl");
1103 ret
= register_filesystem(&rdt_fs_type
);
1105 goto cleanup_mountpoint
;
1110 sysfs_remove_mount_point(fs_kobj
, "resctrl");
1112 kernfs_destroy_root(rdt_root
);