1 // SPDX-License-Identifier: GPL-2.0-only
3 * User interface for Resource Alloction in Resource Director Technology(RDT)
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Fenghua Yu <fenghua.yu@intel.com>
9 * More information about RDT be found in the Intel (R) x86 Architecture
10 * Software Developer Manual.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cacheinfo.h>
16 #include <linux/cpu.h>
17 #include <linux/debugfs.h>
19 #include <linux/fs_parser.h>
20 #include <linux/sysfs.h>
21 #include <linux/kernfs.h>
22 #include <linux/seq_buf.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched/signal.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/task_work.h>
28 #include <linux/user_namespace.h>
30 #include <uapi/linux/magic.h>
32 #include <asm/resctrl_sched.h>
35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key
);
36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key
);
37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key
);
38 static struct kernfs_root
*rdt_root
;
39 struct rdtgroup rdtgroup_default
;
40 LIST_HEAD(rdt_all_groups
);
42 /* Kernel fs node for "info" directory under root */
43 static struct kernfs_node
*kn_info
;
45 /* Kernel fs node for "mon_groups" directory under root */
46 static struct kernfs_node
*kn_mongrp
;
48 /* Kernel fs node for "mon_data" directory under root */
49 static struct kernfs_node
*kn_mondata
;
51 static struct seq_buf last_cmd_status
;
52 static char last_cmd_status_buf
[512];
54 struct dentry
*debugfs_resctrl
;
56 void rdt_last_cmd_clear(void)
58 lockdep_assert_held(&rdtgroup_mutex
);
59 seq_buf_clear(&last_cmd_status
);
62 void rdt_last_cmd_puts(const char *s
)
64 lockdep_assert_held(&rdtgroup_mutex
);
65 seq_buf_puts(&last_cmd_status
, s
);
68 void rdt_last_cmd_printf(const char *fmt
, ...)
73 lockdep_assert_held(&rdtgroup_mutex
);
74 seq_buf_vprintf(&last_cmd_status
, fmt
, ap
);
79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
80 * we can keep a bitmap of free CLOSIDs in a single integer.
82 * Using a global CLOSID across all resources has some advantages and
84 * + We can simply set "current->closid" to assign a task to a resource
86 * + Context switch code can avoid extra memory references deciding which
87 * CLOSID to load into the PQR_ASSOC MSR
88 * - We give up some options in configuring resource groups across multi-socket
90 * - Our choices on how to configure each resource become progressively more
91 * limited as the number of resources grows.
93 static int closid_free_map
;
94 static int closid_free_map_len
;
96 int closids_supported(void)
98 return closid_free_map_len
;
101 static void closid_init(void)
103 struct rdt_resource
*r
;
104 int rdt_min_closid
= 32;
106 /* Compute rdt_min_closid across all resources */
107 for_each_alloc_enabled_rdt_resource(r
)
108 rdt_min_closid
= min(rdt_min_closid
, r
->num_closid
);
110 closid_free_map
= BIT_MASK(rdt_min_closid
) - 1;
112 /* CLOSID 0 is always reserved for the default group */
113 closid_free_map
&= ~1;
114 closid_free_map_len
= rdt_min_closid
;
117 static int closid_alloc(void)
119 u32 closid
= ffs(closid_free_map
);
124 closid_free_map
&= ~(1 << closid
);
129 void closid_free(int closid
)
131 closid_free_map
|= 1 << closid
;
135 * closid_allocated - test if provided closid is in use
136 * @closid: closid to be tested
138 * Return: true if @closid is currently associated with a resource group,
139 * false if @closid is free
141 static bool closid_allocated(unsigned int closid
)
143 return (closid_free_map
& (1 << closid
)) == 0;
147 * rdtgroup_mode_by_closid - Return mode of resource group with closid
148 * @closid: closid if the resource group
150 * Each resource group is associated with a @closid. Here the mode
151 * of a resource group can be queried by searching for it using its closid.
153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
155 enum rdtgrp_mode
rdtgroup_mode_by_closid(int closid
)
157 struct rdtgroup
*rdtgrp
;
159 list_for_each_entry(rdtgrp
, &rdt_all_groups
, rdtgroup_list
) {
160 if (rdtgrp
->closid
== closid
)
164 return RDT_NUM_MODES
;
167 static const char * const rdt_mode_str
[] = {
168 [RDT_MODE_SHAREABLE
] = "shareable",
169 [RDT_MODE_EXCLUSIVE
] = "exclusive",
170 [RDT_MODE_PSEUDO_LOCKSETUP
] = "pseudo-locksetup",
171 [RDT_MODE_PSEUDO_LOCKED
] = "pseudo-locked",
175 * rdtgroup_mode_str - Return the string representation of mode
176 * @mode: the resource group mode as &enum rdtgroup_mode
178 * Return: string representation of valid mode, "unknown" otherwise
180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode
)
182 if (mode
< RDT_MODE_SHAREABLE
|| mode
>= RDT_NUM_MODES
)
185 return rdt_mode_str
[mode
];
188 /* set uid and gid of rdtgroup dirs and files to that of the creator */
189 static int rdtgroup_kn_set_ugid(struct kernfs_node
*kn
)
191 struct iattr iattr
= { .ia_valid
= ATTR_UID
| ATTR_GID
,
192 .ia_uid
= current_fsuid(),
193 .ia_gid
= current_fsgid(), };
195 if (uid_eq(iattr
.ia_uid
, GLOBAL_ROOT_UID
) &&
196 gid_eq(iattr
.ia_gid
, GLOBAL_ROOT_GID
))
199 return kernfs_setattr(kn
, &iattr
);
202 static int rdtgroup_add_file(struct kernfs_node
*parent_kn
, struct rftype
*rft
)
204 struct kernfs_node
*kn
;
207 kn
= __kernfs_create_file(parent_kn
, rft
->name
, rft
->mode
,
208 GLOBAL_ROOT_UID
, GLOBAL_ROOT_GID
,
209 0, rft
->kf_ops
, rft
, NULL
, NULL
);
213 ret
= rdtgroup_kn_set_ugid(kn
);
222 static int rdtgroup_seqfile_show(struct seq_file
*m
, void *arg
)
224 struct kernfs_open_file
*of
= m
->private;
225 struct rftype
*rft
= of
->kn
->priv
;
228 return rft
->seq_show(of
, m
, arg
);
232 static ssize_t
rdtgroup_file_write(struct kernfs_open_file
*of
, char *buf
,
233 size_t nbytes
, loff_t off
)
235 struct rftype
*rft
= of
->kn
->priv
;
238 return rft
->write(of
, buf
, nbytes
, off
);
243 static struct kernfs_ops rdtgroup_kf_single_ops
= {
244 .atomic_write_len
= PAGE_SIZE
,
245 .write
= rdtgroup_file_write
,
246 .seq_show
= rdtgroup_seqfile_show
,
249 static struct kernfs_ops kf_mondata_ops
= {
250 .atomic_write_len
= PAGE_SIZE
,
251 .seq_show
= rdtgroup_mondata_show
,
254 static bool is_cpu_list(struct kernfs_open_file
*of
)
256 struct rftype
*rft
= of
->kn
->priv
;
258 return rft
->flags
& RFTYPE_FLAGS_CPUS_LIST
;
261 static int rdtgroup_cpus_show(struct kernfs_open_file
*of
,
262 struct seq_file
*s
, void *v
)
264 struct rdtgroup
*rdtgrp
;
265 struct cpumask
*mask
;
268 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
271 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
) {
272 if (!rdtgrp
->plr
->d
) {
273 rdt_last_cmd_clear();
274 rdt_last_cmd_puts("Cache domain offline\n");
277 mask
= &rdtgrp
->plr
->d
->cpu_mask
;
278 seq_printf(s
, is_cpu_list(of
) ?
279 "%*pbl\n" : "%*pb\n",
280 cpumask_pr_args(mask
));
283 seq_printf(s
, is_cpu_list(of
) ? "%*pbl\n" : "%*pb\n",
284 cpumask_pr_args(&rdtgrp
->cpu_mask
));
289 rdtgroup_kn_unlock(of
->kn
);
295 * This is safe against resctrl_sched_in() called from __switch_to()
296 * because __switch_to() is executed with interrupts disabled. A local call
297 * from update_closid_rmid() is proteced against __switch_to() because
298 * preemption is disabled.
300 static void update_cpu_closid_rmid(void *info
)
302 struct rdtgroup
*r
= info
;
305 this_cpu_write(pqr_state
.default_closid
, r
->closid
);
306 this_cpu_write(pqr_state
.default_rmid
, r
->mon
.rmid
);
310 * We cannot unconditionally write the MSR because the current
311 * executing task might have its own closid selected. Just reuse
312 * the context switch code.
318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
320 * Per task closids/rmids must have been set up before calling this function.
323 update_closid_rmid(const struct cpumask
*cpu_mask
, struct rdtgroup
*r
)
327 if (cpumask_test_cpu(cpu
, cpu_mask
))
328 update_cpu_closid_rmid(r
);
329 smp_call_function_many(cpu_mask
, update_cpu_closid_rmid
, r
, 1);
333 static int cpus_mon_write(struct rdtgroup
*rdtgrp
, cpumask_var_t newmask
,
334 cpumask_var_t tmpmask
)
336 struct rdtgroup
*prgrp
= rdtgrp
->mon
.parent
, *crgrp
;
337 struct list_head
*head
;
339 /* Check whether cpus belong to parent ctrl group */
340 cpumask_andnot(tmpmask
, newmask
, &prgrp
->cpu_mask
);
341 if (cpumask_weight(tmpmask
)) {
342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
346 /* Check whether cpus are dropped from this group */
347 cpumask_andnot(tmpmask
, &rdtgrp
->cpu_mask
, newmask
);
348 if (cpumask_weight(tmpmask
)) {
349 /* Give any dropped cpus to parent rdtgroup */
350 cpumask_or(&prgrp
->cpu_mask
, &prgrp
->cpu_mask
, tmpmask
);
351 update_closid_rmid(tmpmask
, prgrp
);
355 * If we added cpus, remove them from previous group that owned them
356 * and update per-cpu rmid
358 cpumask_andnot(tmpmask
, newmask
, &rdtgrp
->cpu_mask
);
359 if (cpumask_weight(tmpmask
)) {
360 head
= &prgrp
->mon
.crdtgrp_list
;
361 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
) {
364 cpumask_andnot(&crgrp
->cpu_mask
, &crgrp
->cpu_mask
,
367 update_closid_rmid(tmpmask
, rdtgrp
);
370 /* Done pushing/pulling - update this group with new mask */
371 cpumask_copy(&rdtgrp
->cpu_mask
, newmask
);
376 static void cpumask_rdtgrp_clear(struct rdtgroup
*r
, struct cpumask
*m
)
378 struct rdtgroup
*crgrp
;
380 cpumask_andnot(&r
->cpu_mask
, &r
->cpu_mask
, m
);
381 /* update the child mon group masks as well*/
382 list_for_each_entry(crgrp
, &r
->mon
.crdtgrp_list
, mon
.crdtgrp_list
)
383 cpumask_and(&crgrp
->cpu_mask
, &r
->cpu_mask
, &crgrp
->cpu_mask
);
386 static int cpus_ctrl_write(struct rdtgroup
*rdtgrp
, cpumask_var_t newmask
,
387 cpumask_var_t tmpmask
, cpumask_var_t tmpmask1
)
389 struct rdtgroup
*r
, *crgrp
;
390 struct list_head
*head
;
392 /* Check whether cpus are dropped from this group */
393 cpumask_andnot(tmpmask
, &rdtgrp
->cpu_mask
, newmask
);
394 if (cpumask_weight(tmpmask
)) {
395 /* Can't drop from default group */
396 if (rdtgrp
== &rdtgroup_default
) {
397 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
401 /* Give any dropped cpus to rdtgroup_default */
402 cpumask_or(&rdtgroup_default
.cpu_mask
,
403 &rdtgroup_default
.cpu_mask
, tmpmask
);
404 update_closid_rmid(tmpmask
, &rdtgroup_default
);
408 * If we added cpus, remove them from previous group and
409 * the prev group's child groups that owned them
410 * and update per-cpu closid/rmid.
412 cpumask_andnot(tmpmask
, newmask
, &rdtgrp
->cpu_mask
);
413 if (cpumask_weight(tmpmask
)) {
414 list_for_each_entry(r
, &rdt_all_groups
, rdtgroup_list
) {
417 cpumask_and(tmpmask1
, &r
->cpu_mask
, tmpmask
);
418 if (cpumask_weight(tmpmask1
))
419 cpumask_rdtgrp_clear(r
, tmpmask1
);
421 update_closid_rmid(tmpmask
, rdtgrp
);
424 /* Done pushing/pulling - update this group with new mask */
425 cpumask_copy(&rdtgrp
->cpu_mask
, newmask
);
428 * Clear child mon group masks since there is a new parent mask
429 * now and update the rmid for the cpus the child lost.
431 head
= &rdtgrp
->mon
.crdtgrp_list
;
432 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
) {
433 cpumask_and(tmpmask
, &rdtgrp
->cpu_mask
, &crgrp
->cpu_mask
);
434 update_closid_rmid(tmpmask
, rdtgrp
);
435 cpumask_clear(&crgrp
->cpu_mask
);
441 static ssize_t
rdtgroup_cpus_write(struct kernfs_open_file
*of
,
442 char *buf
, size_t nbytes
, loff_t off
)
444 cpumask_var_t tmpmask
, newmask
, tmpmask1
;
445 struct rdtgroup
*rdtgrp
;
451 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
453 if (!zalloc_cpumask_var(&newmask
, GFP_KERNEL
)) {
454 free_cpumask_var(tmpmask
);
457 if (!zalloc_cpumask_var(&tmpmask1
, GFP_KERNEL
)) {
458 free_cpumask_var(tmpmask
);
459 free_cpumask_var(newmask
);
463 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
464 rdt_last_cmd_clear();
467 rdt_last_cmd_puts("Directory was removed\n");
471 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
||
472 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
474 rdt_last_cmd_puts("Pseudo-locking in progress\n");
479 ret
= cpulist_parse(buf
, newmask
);
481 ret
= cpumask_parse(buf
, newmask
);
484 rdt_last_cmd_puts("Bad CPU list/mask\n");
488 /* check that user didn't specify any offline cpus */
489 cpumask_andnot(tmpmask
, newmask
, cpu_online_mask
);
490 if (cpumask_weight(tmpmask
)) {
492 rdt_last_cmd_puts("Can only assign online CPUs\n");
496 if (rdtgrp
->type
== RDTCTRL_GROUP
)
497 ret
= cpus_ctrl_write(rdtgrp
, newmask
, tmpmask
, tmpmask1
);
498 else if (rdtgrp
->type
== RDTMON_GROUP
)
499 ret
= cpus_mon_write(rdtgrp
, newmask
, tmpmask
);
504 rdtgroup_kn_unlock(of
->kn
);
505 free_cpumask_var(tmpmask
);
506 free_cpumask_var(newmask
);
507 free_cpumask_var(tmpmask1
);
509 return ret
?: nbytes
;
512 struct task_move_callback
{
513 struct callback_head work
;
514 struct rdtgroup
*rdtgrp
;
517 static void move_myself(struct callback_head
*head
)
519 struct task_move_callback
*callback
;
520 struct rdtgroup
*rdtgrp
;
522 callback
= container_of(head
, struct task_move_callback
, work
);
523 rdtgrp
= callback
->rdtgrp
;
526 * If resource group was deleted before this task work callback
527 * was invoked, then assign the task to root group and free the
530 if (atomic_dec_and_test(&rdtgrp
->waitcount
) &&
531 (rdtgrp
->flags
& RDT_DELETED
)) {
538 /* update PQR_ASSOC MSR to make resource group go into effect */
545 static int __rdtgroup_move_task(struct task_struct
*tsk
,
546 struct rdtgroup
*rdtgrp
)
548 struct task_move_callback
*callback
;
551 callback
= kzalloc(sizeof(*callback
), GFP_KERNEL
);
554 callback
->work
.func
= move_myself
;
555 callback
->rdtgrp
= rdtgrp
;
558 * Take a refcount, so rdtgrp cannot be freed before the
559 * callback has been invoked.
561 atomic_inc(&rdtgrp
->waitcount
);
562 ret
= task_work_add(tsk
, &callback
->work
, true);
565 * Task is exiting. Drop the refcount and free the callback.
566 * No need to check the refcount as the group cannot be
567 * deleted before the write function unlocks rdtgroup_mutex.
569 atomic_dec(&rdtgrp
->waitcount
);
571 rdt_last_cmd_puts("Task exited\n");
574 * For ctrl_mon groups move both closid and rmid.
575 * For monitor groups, can move the tasks only from
576 * their parent CTRL group.
578 if (rdtgrp
->type
== RDTCTRL_GROUP
) {
579 tsk
->closid
= rdtgrp
->closid
;
580 tsk
->rmid
= rdtgrp
->mon
.rmid
;
581 } else if (rdtgrp
->type
== RDTMON_GROUP
) {
582 if (rdtgrp
->mon
.parent
->closid
== tsk
->closid
) {
583 tsk
->rmid
= rdtgrp
->mon
.rmid
;
585 rdt_last_cmd_puts("Can't move task to different control group\n");
594 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
597 * Return: 1 if tasks have been assigned to @r, 0 otherwise
599 int rdtgroup_tasks_assigned(struct rdtgroup
*r
)
601 struct task_struct
*p
, *t
;
604 lockdep_assert_held(&rdtgroup_mutex
);
607 for_each_process_thread(p
, t
) {
608 if ((r
->type
== RDTCTRL_GROUP
&& t
->closid
== r
->closid
) ||
609 (r
->type
== RDTMON_GROUP
&& t
->rmid
== r
->mon
.rmid
)) {
619 static int rdtgroup_task_write_permission(struct task_struct
*task
,
620 struct kernfs_open_file
*of
)
622 const struct cred
*tcred
= get_task_cred(task
);
623 const struct cred
*cred
= current_cred();
627 * Even if we're attaching all tasks in the thread group, we only
628 * need to check permissions on one of them.
630 if (!uid_eq(cred
->euid
, GLOBAL_ROOT_UID
) &&
631 !uid_eq(cred
->euid
, tcred
->uid
) &&
632 !uid_eq(cred
->euid
, tcred
->suid
)) {
633 rdt_last_cmd_printf("No permission to move task %d\n", task
->pid
);
641 static int rdtgroup_move_task(pid_t pid
, struct rdtgroup
*rdtgrp
,
642 struct kernfs_open_file
*of
)
644 struct task_struct
*tsk
;
649 tsk
= find_task_by_vpid(pid
);
652 rdt_last_cmd_printf("No task %d\n", pid
);
659 get_task_struct(tsk
);
662 ret
= rdtgroup_task_write_permission(tsk
, of
);
664 ret
= __rdtgroup_move_task(tsk
, rdtgrp
);
666 put_task_struct(tsk
);
670 static ssize_t
rdtgroup_tasks_write(struct kernfs_open_file
*of
,
671 char *buf
, size_t nbytes
, loff_t off
)
673 struct rdtgroup
*rdtgrp
;
677 if (kstrtoint(strstrip(buf
), 0, &pid
) || pid
< 0)
679 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
681 rdtgroup_kn_unlock(of
->kn
);
684 rdt_last_cmd_clear();
686 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
||
687 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
689 rdt_last_cmd_puts("Pseudo-locking in progress\n");
693 ret
= rdtgroup_move_task(pid
, rdtgrp
, of
);
696 rdtgroup_kn_unlock(of
->kn
);
698 return ret
?: nbytes
;
701 static void show_rdt_tasks(struct rdtgroup
*r
, struct seq_file
*s
)
703 struct task_struct
*p
, *t
;
706 for_each_process_thread(p
, t
) {
707 if ((r
->type
== RDTCTRL_GROUP
&& t
->closid
== r
->closid
) ||
708 (r
->type
== RDTMON_GROUP
&& t
->rmid
== r
->mon
.rmid
))
709 seq_printf(s
, "%d\n", t
->pid
);
714 static int rdtgroup_tasks_show(struct kernfs_open_file
*of
,
715 struct seq_file
*s
, void *v
)
717 struct rdtgroup
*rdtgrp
;
720 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
722 show_rdt_tasks(rdtgrp
, s
);
725 rdtgroup_kn_unlock(of
->kn
);
730 static int rdt_last_cmd_status_show(struct kernfs_open_file
*of
,
731 struct seq_file
*seq
, void *v
)
735 mutex_lock(&rdtgroup_mutex
);
736 len
= seq_buf_used(&last_cmd_status
);
738 seq_printf(seq
, "%.*s", len
, last_cmd_status_buf
);
740 seq_puts(seq
, "ok\n");
741 mutex_unlock(&rdtgroup_mutex
);
745 static int rdt_num_closids_show(struct kernfs_open_file
*of
,
746 struct seq_file
*seq
, void *v
)
748 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
750 seq_printf(seq
, "%d\n", r
->num_closid
);
754 static int rdt_default_ctrl_show(struct kernfs_open_file
*of
,
755 struct seq_file
*seq
, void *v
)
757 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
759 seq_printf(seq
, "%x\n", r
->default_ctrl
);
763 static int rdt_min_cbm_bits_show(struct kernfs_open_file
*of
,
764 struct seq_file
*seq
, void *v
)
766 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
768 seq_printf(seq
, "%u\n", r
->cache
.min_cbm_bits
);
772 static int rdt_shareable_bits_show(struct kernfs_open_file
*of
,
773 struct seq_file
*seq
, void *v
)
775 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
777 seq_printf(seq
, "%x\n", r
->cache
.shareable_bits
);
782 * rdt_bit_usage_show - Display current usage of resources
784 * A domain is a shared resource that can now be allocated differently. Here
785 * we display the current regions of the domain as an annotated bitmask.
786 * For each domain of this resource its allocation bitmask
787 * is annotated as below to indicate the current usage of the corresponding bit:
788 * 0 - currently unused
789 * X - currently available for sharing and used by software and hardware
790 * H - currently used by hardware only but available for software use
791 * S - currently used and shareable by software only
792 * E - currently used exclusively by one resource group
793 * P - currently pseudo-locked by one resource group
795 static int rdt_bit_usage_show(struct kernfs_open_file
*of
,
796 struct seq_file
*seq
, void *v
)
798 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
799 u32 sw_shareable
= 0, hw_shareable
= 0;
800 u32 exclusive
= 0, pseudo_locked
= 0;
801 struct rdt_domain
*dom
;
802 int i
, hwb
, swb
, excl
, psl
;
803 enum rdtgrp_mode mode
;
807 mutex_lock(&rdtgroup_mutex
);
808 hw_shareable
= r
->cache
.shareable_bits
;
809 list_for_each_entry(dom
, &r
->domains
, list
) {
812 ctrl
= dom
->ctrl_val
;
815 seq_printf(seq
, "%d=", dom
->id
);
816 for (i
= 0; i
< closids_supported(); i
++, ctrl
++) {
817 if (!closid_allocated(i
))
819 mode
= rdtgroup_mode_by_closid(i
);
821 case RDT_MODE_SHAREABLE
:
822 sw_shareable
|= *ctrl
;
824 case RDT_MODE_EXCLUSIVE
:
827 case RDT_MODE_PSEUDO_LOCKSETUP
:
829 * RDT_MODE_PSEUDO_LOCKSETUP is possible
830 * here but not included since the CBM
831 * associated with this CLOSID in this mode
832 * is not initialized and no task or cpu can be
833 * assigned this CLOSID.
836 case RDT_MODE_PSEUDO_LOCKED
:
839 "invalid mode for closid %d\n", i
);
843 for (i
= r
->cache
.cbm_len
- 1; i
>= 0; i
--) {
844 pseudo_locked
= dom
->plr
? dom
->plr
->cbm
: 0;
845 hwb
= test_bit(i
, (unsigned long *)&hw_shareable
);
846 swb
= test_bit(i
, (unsigned long *)&sw_shareable
);
847 excl
= test_bit(i
, (unsigned long *)&exclusive
);
848 psl
= test_bit(i
, (unsigned long *)&pseudo_locked
);
851 else if (hwb
&& !swb
)
853 else if (!hwb
&& swb
)
859 else /* Unused bits remain */
865 mutex_unlock(&rdtgroup_mutex
);
869 static int rdt_min_bw_show(struct kernfs_open_file
*of
,
870 struct seq_file
*seq
, void *v
)
872 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
874 seq_printf(seq
, "%u\n", r
->membw
.min_bw
);
878 static int rdt_num_rmids_show(struct kernfs_open_file
*of
,
879 struct seq_file
*seq
, void *v
)
881 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
883 seq_printf(seq
, "%d\n", r
->num_rmid
);
888 static int rdt_mon_features_show(struct kernfs_open_file
*of
,
889 struct seq_file
*seq
, void *v
)
891 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
892 struct mon_evt
*mevt
;
894 list_for_each_entry(mevt
, &r
->evt_list
, list
)
895 seq_printf(seq
, "%s\n", mevt
->name
);
900 static int rdt_bw_gran_show(struct kernfs_open_file
*of
,
901 struct seq_file
*seq
, void *v
)
903 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
905 seq_printf(seq
, "%u\n", r
->membw
.bw_gran
);
909 static int rdt_delay_linear_show(struct kernfs_open_file
*of
,
910 struct seq_file
*seq
, void *v
)
912 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
914 seq_printf(seq
, "%u\n", r
->membw
.delay_linear
);
918 static int max_threshold_occ_show(struct kernfs_open_file
*of
,
919 struct seq_file
*seq
, void *v
)
921 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
923 seq_printf(seq
, "%u\n", resctrl_cqm_threshold
* r
->mon_scale
);
928 static ssize_t
max_threshold_occ_write(struct kernfs_open_file
*of
,
929 char *buf
, size_t nbytes
, loff_t off
)
931 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
935 ret
= kstrtouint(buf
, 0, &bytes
);
939 if (bytes
> (boot_cpu_data
.x86_cache_size
* 1024))
942 resctrl_cqm_threshold
= bytes
/ r
->mon_scale
;
948 * rdtgroup_mode_show - Display mode of this resource group
950 static int rdtgroup_mode_show(struct kernfs_open_file
*of
,
951 struct seq_file
*s
, void *v
)
953 struct rdtgroup
*rdtgrp
;
955 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
957 rdtgroup_kn_unlock(of
->kn
);
961 seq_printf(s
, "%s\n", rdtgroup_mode_str(rdtgrp
->mode
));
963 rdtgroup_kn_unlock(of
->kn
);
968 * rdt_cdp_peer_get - Retrieve CDP peer if it exists
969 * @r: RDT resource to which RDT domain @d belongs
970 * @d: Cache instance for which a CDP peer is requested
971 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
972 * Used to return the result.
973 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
974 * Used to return the result.
976 * RDT resources are managed independently and by extension the RDT domains
977 * (RDT resource instances) are managed independently also. The Code and
978 * Data Prioritization (CDP) RDT resources, while managed independently,
979 * could refer to the same underlying hardware. For example,
980 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
982 * When provided with an RDT resource @r and an instance of that RDT
983 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
984 * resource and the exact instance that shares the same hardware.
986 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
987 * If a CDP peer was found, @r_cdp will point to the peer RDT resource
988 * and @d_cdp will point to the peer RDT domain.
990 static int rdt_cdp_peer_get(struct rdt_resource
*r
, struct rdt_domain
*d
,
991 struct rdt_resource
**r_cdp
,
992 struct rdt_domain
**d_cdp
)
994 struct rdt_resource
*_r_cdp
= NULL
;
995 struct rdt_domain
*_d_cdp
= NULL
;
999 case RDT_RESOURCE_L3DATA
:
1000 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L3CODE
];
1002 case RDT_RESOURCE_L3CODE
:
1003 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L3DATA
];
1005 case RDT_RESOURCE_L2DATA
:
1006 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L2CODE
];
1008 case RDT_RESOURCE_L2CODE
:
1009 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L2DATA
];
1017 * When a new CPU comes online and CDP is enabled then the new
1018 * RDT domains (if any) associated with both CDP RDT resources
1019 * are added in the same CPU online routine while the
1020 * rdtgroup_mutex is held. It should thus not happen for one
1021 * RDT domain to exist and be associated with its RDT CDP
1022 * resource but there is no RDT domain associated with the
1023 * peer RDT CDP resource. Hence the WARN.
1025 _d_cdp
= rdt_find_domain(_r_cdp
, d
->id
, NULL
);
1026 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp
))) {
1039 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1040 * @r: Resource to which domain instance @d belongs.
1041 * @d: The domain instance for which @closid is being tested.
1042 * @cbm: Capacity bitmask being tested.
1043 * @closid: Intended closid for @cbm.
1044 * @exclusive: Only check if overlaps with exclusive resource groups
1046 * Checks if provided @cbm intended to be used for @closid on domain
1047 * @d overlaps with any other closids or other hardware usage associated
1048 * with this domain. If @exclusive is true then only overlaps with
1049 * resource groups in exclusive mode will be considered. If @exclusive
1050 * is false then overlaps with any resource group or hardware entities
1051 * will be considered.
1053 * @cbm is unsigned long, even if only 32 bits are used, to make the
1054 * bitmap functions work correctly.
1056 * Return: false if CBM does not overlap, true if it does.
1058 static bool __rdtgroup_cbm_overlaps(struct rdt_resource
*r
, struct rdt_domain
*d
,
1059 unsigned long cbm
, int closid
, bool exclusive
)
1061 enum rdtgrp_mode mode
;
1062 unsigned long ctrl_b
;
1066 /* Check for any overlap with regions used by hardware directly */
1068 ctrl_b
= r
->cache
.shareable_bits
;
1069 if (bitmap_intersects(&cbm
, &ctrl_b
, r
->cache
.cbm_len
))
1073 /* Check for overlap with other resource groups */
1075 for (i
= 0; i
< closids_supported(); i
++, ctrl
++) {
1077 mode
= rdtgroup_mode_by_closid(i
);
1078 if (closid_allocated(i
) && i
!= closid
&&
1079 mode
!= RDT_MODE_PSEUDO_LOCKSETUP
) {
1080 if (bitmap_intersects(&cbm
, &ctrl_b
, r
->cache
.cbm_len
)) {
1082 if (mode
== RDT_MODE_EXCLUSIVE
)
1095 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1096 * @r: Resource to which domain instance @d belongs.
1097 * @d: The domain instance for which @closid is being tested.
1098 * @cbm: Capacity bitmask being tested.
1099 * @closid: Intended closid for @cbm.
1100 * @exclusive: Only check if overlaps with exclusive resource groups
1102 * Resources that can be allocated using a CBM can use the CBM to control
1103 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1104 * for overlap. Overlap test is not limited to the specific resource for
1105 * which the CBM is intended though - when dealing with CDP resources that
1106 * share the underlying hardware the overlap check should be performed on
1107 * the CDP resource sharing the hardware also.
1109 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1112 * Return: true if CBM overlap detected, false if there is no overlap
1114 bool rdtgroup_cbm_overlaps(struct rdt_resource
*r
, struct rdt_domain
*d
,
1115 unsigned long cbm
, int closid
, bool exclusive
)
1117 struct rdt_resource
*r_cdp
;
1118 struct rdt_domain
*d_cdp
;
1120 if (__rdtgroup_cbm_overlaps(r
, d
, cbm
, closid
, exclusive
))
1123 if (rdt_cdp_peer_get(r
, d
, &r_cdp
, &d_cdp
) < 0)
1126 return __rdtgroup_cbm_overlaps(r_cdp
, d_cdp
, cbm
, closid
, exclusive
);
1130 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1132 * An exclusive resource group implies that there should be no sharing of
1133 * its allocated resources. At the time this group is considered to be
1134 * exclusive this test can determine if its current schemata supports this
1135 * setting by testing for overlap with all other resource groups.
1137 * Return: true if resource group can be exclusive, false if there is overlap
1138 * with allocations of other resource groups and thus this resource group
1139 * cannot be exclusive.
1141 static bool rdtgroup_mode_test_exclusive(struct rdtgroup
*rdtgrp
)
1143 int closid
= rdtgrp
->closid
;
1144 struct rdt_resource
*r
;
1145 bool has_cache
= false;
1146 struct rdt_domain
*d
;
1148 for_each_alloc_enabled_rdt_resource(r
) {
1149 if (r
->rid
== RDT_RESOURCE_MBA
)
1152 list_for_each_entry(d
, &r
->domains
, list
) {
1153 if (rdtgroup_cbm_overlaps(r
, d
, d
->ctrl_val
[closid
],
1154 rdtgrp
->closid
, false)) {
1155 rdt_last_cmd_puts("Schemata overlaps\n");
1162 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1170 * rdtgroup_mode_write - Modify the resource group's mode
1173 static ssize_t
rdtgroup_mode_write(struct kernfs_open_file
*of
,
1174 char *buf
, size_t nbytes
, loff_t off
)
1176 struct rdtgroup
*rdtgrp
;
1177 enum rdtgrp_mode mode
;
1180 /* Valid input requires a trailing newline */
1181 if (nbytes
== 0 || buf
[nbytes
- 1] != '\n')
1183 buf
[nbytes
- 1] = '\0';
1185 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
1187 rdtgroup_kn_unlock(of
->kn
);
1191 rdt_last_cmd_clear();
1193 mode
= rdtgrp
->mode
;
1195 if ((!strcmp(buf
, "shareable") && mode
== RDT_MODE_SHAREABLE
) ||
1196 (!strcmp(buf
, "exclusive") && mode
== RDT_MODE_EXCLUSIVE
) ||
1197 (!strcmp(buf
, "pseudo-locksetup") &&
1198 mode
== RDT_MODE_PSEUDO_LOCKSETUP
) ||
1199 (!strcmp(buf
, "pseudo-locked") && mode
== RDT_MODE_PSEUDO_LOCKED
))
1202 if (mode
== RDT_MODE_PSEUDO_LOCKED
) {
1203 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1208 if (!strcmp(buf
, "shareable")) {
1209 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1210 ret
= rdtgroup_locksetup_exit(rdtgrp
);
1214 rdtgrp
->mode
= RDT_MODE_SHAREABLE
;
1215 } else if (!strcmp(buf
, "exclusive")) {
1216 if (!rdtgroup_mode_test_exclusive(rdtgrp
)) {
1220 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1221 ret
= rdtgroup_locksetup_exit(rdtgrp
);
1225 rdtgrp
->mode
= RDT_MODE_EXCLUSIVE
;
1226 } else if (!strcmp(buf
, "pseudo-locksetup")) {
1227 ret
= rdtgroup_locksetup_enter(rdtgrp
);
1230 rdtgrp
->mode
= RDT_MODE_PSEUDO_LOCKSETUP
;
1232 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1237 rdtgroup_kn_unlock(of
->kn
);
1238 return ret
?: nbytes
;
1242 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1243 * @r: RDT resource to which @d belongs.
1244 * @d: RDT domain instance.
1245 * @cbm: bitmask for which the size should be computed.
1247 * The bitmask provided associated with the RDT domain instance @d will be
1248 * translated into how many bytes it represents. The size in bytes is
1249 * computed by first dividing the total cache size by the CBM length to
1250 * determine how many bytes each bit in the bitmask represents. The result
1251 * is multiplied with the number of bits set in the bitmask.
1253 * @cbm is unsigned long, even if only 32 bits are used to make the
1254 * bitmap functions work correctly.
1256 unsigned int rdtgroup_cbm_to_size(struct rdt_resource
*r
,
1257 struct rdt_domain
*d
, unsigned long cbm
)
1259 struct cpu_cacheinfo
*ci
;
1260 unsigned int size
= 0;
1263 num_b
= bitmap_weight(&cbm
, r
->cache
.cbm_len
);
1264 ci
= get_cpu_cacheinfo(cpumask_any(&d
->cpu_mask
));
1265 for (i
= 0; i
< ci
->num_leaves
; i
++) {
1266 if (ci
->info_list
[i
].level
== r
->cache_level
) {
1267 size
= ci
->info_list
[i
].size
/ r
->cache
.cbm_len
* num_b
;
1276 * rdtgroup_size_show - Display size in bytes of allocated regions
1278 * The "size" file mirrors the layout of the "schemata" file, printing the
1279 * size in bytes of each region instead of the capacity bitmask.
1282 static int rdtgroup_size_show(struct kernfs_open_file
*of
,
1283 struct seq_file
*s
, void *v
)
1285 struct rdtgroup
*rdtgrp
;
1286 struct rdt_resource
*r
;
1287 struct rdt_domain
*d
;
1293 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
1295 rdtgroup_kn_unlock(of
->kn
);
1299 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
) {
1300 if (!rdtgrp
->plr
->d
) {
1301 rdt_last_cmd_clear();
1302 rdt_last_cmd_puts("Cache domain offline\n");
1305 seq_printf(s
, "%*s:", max_name_width
,
1306 rdtgrp
->plr
->r
->name
);
1307 size
= rdtgroup_cbm_to_size(rdtgrp
->plr
->r
,
1310 seq_printf(s
, "%d=%u\n", rdtgrp
->plr
->d
->id
, size
);
1315 for_each_alloc_enabled_rdt_resource(r
) {
1317 seq_printf(s
, "%*s:", max_name_width
, r
->name
);
1318 list_for_each_entry(d
, &r
->domains
, list
) {
1321 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1324 ctrl
= (!is_mba_sc(r
) ?
1325 d
->ctrl_val
[rdtgrp
->closid
] :
1326 d
->mbps_val
[rdtgrp
->closid
]);
1327 if (r
->rid
== RDT_RESOURCE_MBA
)
1330 size
= rdtgroup_cbm_to_size(r
, d
, ctrl
);
1332 seq_printf(s
, "%d=%u", d
->id
, size
);
1339 rdtgroup_kn_unlock(of
->kn
);
1344 /* rdtgroup information files for one cache resource. */
1345 static struct rftype res_common_files
[] = {
1347 .name
= "last_cmd_status",
1349 .kf_ops
= &rdtgroup_kf_single_ops
,
1350 .seq_show
= rdt_last_cmd_status_show
,
1351 .fflags
= RF_TOP_INFO
,
1354 .name
= "num_closids",
1356 .kf_ops
= &rdtgroup_kf_single_ops
,
1357 .seq_show
= rdt_num_closids_show
,
1358 .fflags
= RF_CTRL_INFO
,
1361 .name
= "mon_features",
1363 .kf_ops
= &rdtgroup_kf_single_ops
,
1364 .seq_show
= rdt_mon_features_show
,
1365 .fflags
= RF_MON_INFO
,
1368 .name
= "num_rmids",
1370 .kf_ops
= &rdtgroup_kf_single_ops
,
1371 .seq_show
= rdt_num_rmids_show
,
1372 .fflags
= RF_MON_INFO
,
1377 .kf_ops
= &rdtgroup_kf_single_ops
,
1378 .seq_show
= rdt_default_ctrl_show
,
1379 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1382 .name
= "min_cbm_bits",
1384 .kf_ops
= &rdtgroup_kf_single_ops
,
1385 .seq_show
= rdt_min_cbm_bits_show
,
1386 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1389 .name
= "shareable_bits",
1391 .kf_ops
= &rdtgroup_kf_single_ops
,
1392 .seq_show
= rdt_shareable_bits_show
,
1393 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1396 .name
= "bit_usage",
1398 .kf_ops
= &rdtgroup_kf_single_ops
,
1399 .seq_show
= rdt_bit_usage_show
,
1400 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1403 .name
= "min_bandwidth",
1405 .kf_ops
= &rdtgroup_kf_single_ops
,
1406 .seq_show
= rdt_min_bw_show
,
1407 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_MB
,
1410 .name
= "bandwidth_gran",
1412 .kf_ops
= &rdtgroup_kf_single_ops
,
1413 .seq_show
= rdt_bw_gran_show
,
1414 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_MB
,
1417 .name
= "delay_linear",
1419 .kf_ops
= &rdtgroup_kf_single_ops
,
1420 .seq_show
= rdt_delay_linear_show
,
1421 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_MB
,
1424 .name
= "max_threshold_occupancy",
1426 .kf_ops
= &rdtgroup_kf_single_ops
,
1427 .write
= max_threshold_occ_write
,
1428 .seq_show
= max_threshold_occ_show
,
1429 .fflags
= RF_MON_INFO
| RFTYPE_RES_CACHE
,
1434 .kf_ops
= &rdtgroup_kf_single_ops
,
1435 .write
= rdtgroup_cpus_write
,
1436 .seq_show
= rdtgroup_cpus_show
,
1437 .fflags
= RFTYPE_BASE
,
1440 .name
= "cpus_list",
1442 .kf_ops
= &rdtgroup_kf_single_ops
,
1443 .write
= rdtgroup_cpus_write
,
1444 .seq_show
= rdtgroup_cpus_show
,
1445 .flags
= RFTYPE_FLAGS_CPUS_LIST
,
1446 .fflags
= RFTYPE_BASE
,
1451 .kf_ops
= &rdtgroup_kf_single_ops
,
1452 .write
= rdtgroup_tasks_write
,
1453 .seq_show
= rdtgroup_tasks_show
,
1454 .fflags
= RFTYPE_BASE
,
1459 .kf_ops
= &rdtgroup_kf_single_ops
,
1460 .write
= rdtgroup_schemata_write
,
1461 .seq_show
= rdtgroup_schemata_show
,
1462 .fflags
= RF_CTRL_BASE
,
1467 .kf_ops
= &rdtgroup_kf_single_ops
,
1468 .write
= rdtgroup_mode_write
,
1469 .seq_show
= rdtgroup_mode_show
,
1470 .fflags
= RF_CTRL_BASE
,
1475 .kf_ops
= &rdtgroup_kf_single_ops
,
1476 .seq_show
= rdtgroup_size_show
,
1477 .fflags
= RF_CTRL_BASE
,
1482 static int rdtgroup_add_files(struct kernfs_node
*kn
, unsigned long fflags
)
1484 struct rftype
*rfts
, *rft
;
1487 rfts
= res_common_files
;
1488 len
= ARRAY_SIZE(res_common_files
);
1490 lockdep_assert_held(&rdtgroup_mutex
);
1492 for (rft
= rfts
; rft
< rfts
+ len
; rft
++) {
1493 if ((fflags
& rft
->fflags
) == rft
->fflags
) {
1494 ret
= rdtgroup_add_file(kn
, rft
);
1502 pr_warn("Failed to add %s, err=%d\n", rft
->name
, ret
);
1503 while (--rft
>= rfts
) {
1504 if ((fflags
& rft
->fflags
) == rft
->fflags
)
1505 kernfs_remove_by_name(kn
, rft
->name
);
1511 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1512 * @r: The resource group with which the file is associated.
1513 * @name: Name of the file
1515 * The permissions of named resctrl file, directory, or link are modified
1516 * to not allow read, write, or execute by any user.
1518 * WARNING: This function is intended to communicate to the user that the
1519 * resctrl file has been locked down - that it is not relevant to the
1520 * particular state the system finds itself in. It should not be relied
1521 * on to protect from user access because after the file's permissions
1522 * are restricted the user can still change the permissions using chmod
1523 * from the command line.
1525 * Return: 0 on success, <0 on failure.
1527 int rdtgroup_kn_mode_restrict(struct rdtgroup
*r
, const char *name
)
1529 struct iattr iattr
= {.ia_valid
= ATTR_MODE
,};
1530 struct kernfs_node
*kn
;
1533 kn
= kernfs_find_and_get_ns(r
->kn
, name
, NULL
);
1537 switch (kernfs_type(kn
)) {
1539 iattr
.ia_mode
= S_IFDIR
;
1542 iattr
.ia_mode
= S_IFREG
;
1545 iattr
.ia_mode
= S_IFLNK
;
1549 ret
= kernfs_setattr(kn
, &iattr
);
1555 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1556 * @r: The resource group with which the file is associated.
1557 * @name: Name of the file
1558 * @mask: Mask of permissions that should be restored
1560 * Restore the permissions of the named file. If @name is a directory the
1561 * permissions of its parent will be used.
1563 * Return: 0 on success, <0 on failure.
1565 int rdtgroup_kn_mode_restore(struct rdtgroup
*r
, const char *name
,
1568 struct iattr iattr
= {.ia_valid
= ATTR_MODE
,};
1569 struct kernfs_node
*kn
, *parent
;
1570 struct rftype
*rfts
, *rft
;
1573 rfts
= res_common_files
;
1574 len
= ARRAY_SIZE(res_common_files
);
1576 for (rft
= rfts
; rft
< rfts
+ len
; rft
++) {
1577 if (!strcmp(rft
->name
, name
))
1578 iattr
.ia_mode
= rft
->mode
& mask
;
1581 kn
= kernfs_find_and_get_ns(r
->kn
, name
, NULL
);
1585 switch (kernfs_type(kn
)) {
1587 parent
= kernfs_get_parent(kn
);
1589 iattr
.ia_mode
|= parent
->mode
;
1592 iattr
.ia_mode
|= S_IFDIR
;
1595 iattr
.ia_mode
|= S_IFREG
;
1598 iattr
.ia_mode
|= S_IFLNK
;
1602 ret
= kernfs_setattr(kn
, &iattr
);
1607 static int rdtgroup_mkdir_info_resdir(struct rdt_resource
*r
, char *name
,
1608 unsigned long fflags
)
1610 struct kernfs_node
*kn_subdir
;
1613 kn_subdir
= kernfs_create_dir(kn_info
, name
,
1615 if (IS_ERR(kn_subdir
))
1616 return PTR_ERR(kn_subdir
);
1618 kernfs_get(kn_subdir
);
1619 ret
= rdtgroup_kn_set_ugid(kn_subdir
);
1623 ret
= rdtgroup_add_files(kn_subdir
, fflags
);
1625 kernfs_activate(kn_subdir
);
1630 static int rdtgroup_create_info_dir(struct kernfs_node
*parent_kn
)
1632 struct rdt_resource
*r
;
1633 unsigned long fflags
;
1637 /* create the directory */
1638 kn_info
= kernfs_create_dir(parent_kn
, "info", parent_kn
->mode
, NULL
);
1639 if (IS_ERR(kn_info
))
1640 return PTR_ERR(kn_info
);
1641 kernfs_get(kn_info
);
1643 ret
= rdtgroup_add_files(kn_info
, RF_TOP_INFO
);
1647 for_each_alloc_enabled_rdt_resource(r
) {
1648 fflags
= r
->fflags
| RF_CTRL_INFO
;
1649 ret
= rdtgroup_mkdir_info_resdir(r
, r
->name
, fflags
);
1654 for_each_mon_enabled_rdt_resource(r
) {
1655 fflags
= r
->fflags
| RF_MON_INFO
;
1656 sprintf(name
, "%s_MON", r
->name
);
1657 ret
= rdtgroup_mkdir_info_resdir(r
, name
, fflags
);
1663 * This extra ref will be put in kernfs_remove() and guarantees
1664 * that @rdtgrp->kn is always accessible.
1666 kernfs_get(kn_info
);
1668 ret
= rdtgroup_kn_set_ugid(kn_info
);
1672 kernfs_activate(kn_info
);
1677 kernfs_remove(kn_info
);
1682 mongroup_create_dir(struct kernfs_node
*parent_kn
, struct rdtgroup
*prgrp
,
1683 char *name
, struct kernfs_node
**dest_kn
)
1685 struct kernfs_node
*kn
;
1688 /* create the directory */
1689 kn
= kernfs_create_dir(parent_kn
, name
, parent_kn
->mode
, prgrp
);
1697 * This extra ref will be put in kernfs_remove() and guarantees
1698 * that @rdtgrp->kn is always accessible.
1702 ret
= rdtgroup_kn_set_ugid(kn
);
1706 kernfs_activate(kn
);
1715 static void l3_qos_cfg_update(void *arg
)
1719 wrmsrl(MSR_IA32_L3_QOS_CFG
, *enable
? L3_QOS_CDP_ENABLE
: 0ULL);
1722 static void l2_qos_cfg_update(void *arg
)
1726 wrmsrl(MSR_IA32_L2_QOS_CFG
, *enable
? L2_QOS_CDP_ENABLE
: 0ULL);
1729 static inline bool is_mba_linear(void)
1731 return rdt_resources_all
[RDT_RESOURCE_MBA
].membw
.delay_linear
;
1734 static int set_cache_qos_cfg(int level
, bool enable
)
1736 void (*update
)(void *arg
);
1737 struct rdt_resource
*r_l
;
1738 cpumask_var_t cpu_mask
;
1739 struct rdt_domain
*d
;
1742 if (!zalloc_cpumask_var(&cpu_mask
, GFP_KERNEL
))
1745 if (level
== RDT_RESOURCE_L3
)
1746 update
= l3_qos_cfg_update
;
1747 else if (level
== RDT_RESOURCE_L2
)
1748 update
= l2_qos_cfg_update
;
1752 r_l
= &rdt_resources_all
[level
];
1753 list_for_each_entry(d
, &r_l
->domains
, list
) {
1754 /* Pick one CPU from each domain instance to update MSR */
1755 cpumask_set_cpu(cpumask_any(&d
->cpu_mask
), cpu_mask
);
1758 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1759 if (cpumask_test_cpu(cpu
, cpu_mask
))
1761 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1762 smp_call_function_many(cpu_mask
, update
, &enable
, 1);
1765 free_cpumask_var(cpu_mask
);
1771 * Enable or disable the MBA software controller
1772 * which helps user specify bandwidth in MBps.
1773 * MBA software controller is supported only if
1774 * MBM is supported and MBA is in linear scale.
1776 static int set_mba_sc(bool mba_sc
)
1778 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_MBA
];
1779 struct rdt_domain
*d
;
1781 if (!is_mbm_enabled() || !is_mba_linear() ||
1782 mba_sc
== is_mba_sc(r
))
1785 r
->membw
.mba_sc
= mba_sc
;
1786 list_for_each_entry(d
, &r
->domains
, list
)
1787 setup_default_ctrlval(r
, d
->ctrl_val
, d
->mbps_val
);
1792 static int cdp_enable(int level
, int data_type
, int code_type
)
1794 struct rdt_resource
*r_ldata
= &rdt_resources_all
[data_type
];
1795 struct rdt_resource
*r_lcode
= &rdt_resources_all
[code_type
];
1796 struct rdt_resource
*r_l
= &rdt_resources_all
[level
];
1799 if (!r_l
->alloc_capable
|| !r_ldata
->alloc_capable
||
1800 !r_lcode
->alloc_capable
)
1803 ret
= set_cache_qos_cfg(level
, true);
1805 r_l
->alloc_enabled
= false;
1806 r_ldata
->alloc_enabled
= true;
1807 r_lcode
->alloc_enabled
= true;
1812 static int cdpl3_enable(void)
1814 return cdp_enable(RDT_RESOURCE_L3
, RDT_RESOURCE_L3DATA
,
1815 RDT_RESOURCE_L3CODE
);
1818 static int cdpl2_enable(void)
1820 return cdp_enable(RDT_RESOURCE_L2
, RDT_RESOURCE_L2DATA
,
1821 RDT_RESOURCE_L2CODE
);
1824 static void cdp_disable(int level
, int data_type
, int code_type
)
1826 struct rdt_resource
*r
= &rdt_resources_all
[level
];
1828 r
->alloc_enabled
= r
->alloc_capable
;
1830 if (rdt_resources_all
[data_type
].alloc_enabled
) {
1831 rdt_resources_all
[data_type
].alloc_enabled
= false;
1832 rdt_resources_all
[code_type
].alloc_enabled
= false;
1833 set_cache_qos_cfg(level
, false);
1837 static void cdpl3_disable(void)
1839 cdp_disable(RDT_RESOURCE_L3
, RDT_RESOURCE_L3DATA
, RDT_RESOURCE_L3CODE
);
1842 static void cdpl2_disable(void)
1844 cdp_disable(RDT_RESOURCE_L2
, RDT_RESOURCE_L2DATA
, RDT_RESOURCE_L2CODE
);
1847 static void cdp_disable_all(void)
1849 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].alloc_enabled
)
1851 if (rdt_resources_all
[RDT_RESOURCE_L2DATA
].alloc_enabled
)
1856 * We don't allow rdtgroup directories to be created anywhere
1857 * except the root directory. Thus when looking for the rdtgroup
1858 * structure for a kernfs node we are either looking at a directory,
1859 * in which case the rdtgroup structure is pointed at by the "priv"
1860 * field, otherwise we have a file, and need only look to the parent
1861 * to find the rdtgroup.
1863 static struct rdtgroup
*kernfs_to_rdtgroup(struct kernfs_node
*kn
)
1865 if (kernfs_type(kn
) == KERNFS_DIR
) {
1867 * All the resource directories use "kn->priv"
1868 * to point to the "struct rdtgroup" for the
1869 * resource. "info" and its subdirectories don't
1870 * have rdtgroup structures, so return NULL here.
1872 if (kn
== kn_info
|| kn
->parent
== kn_info
)
1877 return kn
->parent
->priv
;
1881 struct rdtgroup
*rdtgroup_kn_lock_live(struct kernfs_node
*kn
)
1883 struct rdtgroup
*rdtgrp
= kernfs_to_rdtgroup(kn
);
1888 atomic_inc(&rdtgrp
->waitcount
);
1889 kernfs_break_active_protection(kn
);
1891 mutex_lock(&rdtgroup_mutex
);
1893 /* Was this group deleted while we waited? */
1894 if (rdtgrp
->flags
& RDT_DELETED
)
1900 void rdtgroup_kn_unlock(struct kernfs_node
*kn
)
1902 struct rdtgroup
*rdtgrp
= kernfs_to_rdtgroup(kn
);
1907 mutex_unlock(&rdtgroup_mutex
);
1909 if (atomic_dec_and_test(&rdtgrp
->waitcount
) &&
1910 (rdtgrp
->flags
& RDT_DELETED
)) {
1911 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
1912 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
)
1913 rdtgroup_pseudo_lock_remove(rdtgrp
);
1914 kernfs_unbreak_active_protection(kn
);
1915 kernfs_put(rdtgrp
->kn
);
1918 kernfs_unbreak_active_protection(kn
);
1922 static int mkdir_mondata_all(struct kernfs_node
*parent_kn
,
1923 struct rdtgroup
*prgrp
,
1924 struct kernfs_node
**mon_data_kn
);
1926 static int rdt_enable_ctx(struct rdt_fs_context
*ctx
)
1930 if (ctx
->enable_cdpl2
)
1931 ret
= cdpl2_enable();
1933 if (!ret
&& ctx
->enable_cdpl3
)
1934 ret
= cdpl3_enable();
1936 if (!ret
&& ctx
->enable_mba_mbps
)
1937 ret
= set_mba_sc(true);
1942 static int rdt_get_tree(struct fs_context
*fc
)
1944 struct rdt_fs_context
*ctx
= rdt_fc2context(fc
);
1945 struct rdt_domain
*dom
;
1946 struct rdt_resource
*r
;
1950 mutex_lock(&rdtgroup_mutex
);
1952 * resctrl file system can only be mounted once.
1954 if (static_branch_unlikely(&rdt_enable_key
)) {
1959 ret
= rdt_enable_ctx(ctx
);
1965 ret
= rdtgroup_create_info_dir(rdtgroup_default
.kn
);
1969 if (rdt_mon_capable
) {
1970 ret
= mongroup_create_dir(rdtgroup_default
.kn
,
1975 kernfs_get(kn_mongrp
);
1977 ret
= mkdir_mondata_all(rdtgroup_default
.kn
,
1978 &rdtgroup_default
, &kn_mondata
);
1981 kernfs_get(kn_mondata
);
1982 rdtgroup_default
.mon
.mon_data_kn
= kn_mondata
;
1985 ret
= rdt_pseudo_lock_init();
1989 ret
= kernfs_get_tree(fc
);
1993 if (rdt_alloc_capable
)
1994 static_branch_enable_cpuslocked(&rdt_alloc_enable_key
);
1995 if (rdt_mon_capable
)
1996 static_branch_enable_cpuslocked(&rdt_mon_enable_key
);
1998 if (rdt_alloc_capable
|| rdt_mon_capable
)
1999 static_branch_enable_cpuslocked(&rdt_enable_key
);
2001 if (is_mbm_enabled()) {
2002 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
2003 list_for_each_entry(dom
, &r
->domains
, list
)
2004 mbm_setup_overflow_handler(dom
, MBM_OVERFLOW_INTERVAL
);
2010 rdt_pseudo_lock_release();
2012 if (rdt_mon_capable
)
2013 kernfs_remove(kn_mondata
);
2015 if (rdt_mon_capable
)
2016 kernfs_remove(kn_mongrp
);
2018 kernfs_remove(kn_info
);
2020 if (ctx
->enable_mba_mbps
)
2025 rdt_last_cmd_clear();
2026 mutex_unlock(&rdtgroup_mutex
);
2038 static const struct fs_parameter_spec rdt_param_specs
[] = {
2039 fsparam_flag("cdp", Opt_cdp
),
2040 fsparam_flag("cdpl2", Opt_cdpl2
),
2041 fsparam_flag("mba_MBps", Opt_mba_mbps
),
2045 static const struct fs_parameter_description rdt_fs_parameters
= {
2047 .specs
= rdt_param_specs
,
2050 static int rdt_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
2052 struct rdt_fs_context
*ctx
= rdt_fc2context(fc
);
2053 struct fs_parse_result result
;
2056 opt
= fs_parse(fc
, &rdt_fs_parameters
, param
, &result
);
2062 ctx
->enable_cdpl3
= true;
2065 ctx
->enable_cdpl2
= true;
2068 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
2070 ctx
->enable_mba_mbps
= true;
2077 static void rdt_fs_context_free(struct fs_context
*fc
)
2079 struct rdt_fs_context
*ctx
= rdt_fc2context(fc
);
2081 kernfs_free_fs_context(fc
);
2085 static const struct fs_context_operations rdt_fs_context_ops
= {
2086 .free
= rdt_fs_context_free
,
2087 .parse_param
= rdt_parse_param
,
2088 .get_tree
= rdt_get_tree
,
2091 static int rdt_init_fs_context(struct fs_context
*fc
)
2093 struct rdt_fs_context
*ctx
;
2095 ctx
= kzalloc(sizeof(struct rdt_fs_context
), GFP_KERNEL
);
2099 ctx
->kfc
.root
= rdt_root
;
2100 ctx
->kfc
.magic
= RDTGROUP_SUPER_MAGIC
;
2101 fc
->fs_private
= &ctx
->kfc
;
2102 fc
->ops
= &rdt_fs_context_ops
;
2104 put_user_ns(fc
->user_ns
);
2105 fc
->user_ns
= get_user_ns(&init_user_ns
);
2110 static int reset_all_ctrls(struct rdt_resource
*r
)
2112 struct msr_param msr_param
;
2113 cpumask_var_t cpu_mask
;
2114 struct rdt_domain
*d
;
2117 if (!zalloc_cpumask_var(&cpu_mask
, GFP_KERNEL
))
2122 msr_param
.high
= r
->num_closid
;
2125 * Disable resource control for this resource by setting all
2126 * CBMs in all domains to the maximum mask value. Pick one CPU
2127 * from each domain to update the MSRs below.
2129 list_for_each_entry(d
, &r
->domains
, list
) {
2130 cpumask_set_cpu(cpumask_any(&d
->cpu_mask
), cpu_mask
);
2132 for (i
= 0; i
< r
->num_closid
; i
++)
2133 d
->ctrl_val
[i
] = r
->default_ctrl
;
2136 /* Update CBM on this cpu if it's in cpu_mask. */
2137 if (cpumask_test_cpu(cpu
, cpu_mask
))
2138 rdt_ctrl_update(&msr_param
);
2139 /* Update CBM on all other cpus in cpu_mask. */
2140 smp_call_function_many(cpu_mask
, rdt_ctrl_update
, &msr_param
, 1);
2143 free_cpumask_var(cpu_mask
);
2148 static bool is_closid_match(struct task_struct
*t
, struct rdtgroup
*r
)
2150 return (rdt_alloc_capable
&&
2151 (r
->type
== RDTCTRL_GROUP
) && (t
->closid
== r
->closid
));
2154 static bool is_rmid_match(struct task_struct
*t
, struct rdtgroup
*r
)
2156 return (rdt_mon_capable
&&
2157 (r
->type
== RDTMON_GROUP
) && (t
->rmid
== r
->mon
.rmid
));
2161 * Move tasks from one to the other group. If @from is NULL, then all tasks
2162 * in the systems are moved unconditionally (used for teardown).
2164 * If @mask is not NULL the cpus on which moved tasks are running are set
2165 * in that mask so the update smp function call is restricted to affected
2168 static void rdt_move_group_tasks(struct rdtgroup
*from
, struct rdtgroup
*to
,
2169 struct cpumask
*mask
)
2171 struct task_struct
*p
, *t
;
2173 read_lock(&tasklist_lock
);
2174 for_each_process_thread(p
, t
) {
2175 if (!from
|| is_closid_match(t
, from
) ||
2176 is_rmid_match(t
, from
)) {
2177 t
->closid
= to
->closid
;
2178 t
->rmid
= to
->mon
.rmid
;
2182 * This is safe on x86 w/o barriers as the ordering
2183 * of writing to task_cpu() and t->on_cpu is
2184 * reverse to the reading here. The detection is
2185 * inaccurate as tasks might move or schedule
2186 * before the smp function call takes place. In
2187 * such a case the function call is pointless, but
2188 * there is no other side effect.
2190 if (mask
&& t
->on_cpu
)
2191 cpumask_set_cpu(task_cpu(t
), mask
);
2195 read_unlock(&tasklist_lock
);
2198 static void free_all_child_rdtgrp(struct rdtgroup
*rdtgrp
)
2200 struct rdtgroup
*sentry
, *stmp
;
2201 struct list_head
*head
;
2203 head
= &rdtgrp
->mon
.crdtgrp_list
;
2204 list_for_each_entry_safe(sentry
, stmp
, head
, mon
.crdtgrp_list
) {
2205 free_rmid(sentry
->mon
.rmid
);
2206 list_del(&sentry
->mon
.crdtgrp_list
);
2212 * Forcibly remove all of subdirectories under root.
2214 static void rmdir_all_sub(void)
2216 struct rdtgroup
*rdtgrp
, *tmp
;
2218 /* Move all tasks to the default resource group */
2219 rdt_move_group_tasks(NULL
, &rdtgroup_default
, NULL
);
2221 list_for_each_entry_safe(rdtgrp
, tmp
, &rdt_all_groups
, rdtgroup_list
) {
2222 /* Free any child rmids */
2223 free_all_child_rdtgrp(rdtgrp
);
2225 /* Remove each rdtgroup other than root */
2226 if (rdtgrp
== &rdtgroup_default
)
2229 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
2230 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
)
2231 rdtgroup_pseudo_lock_remove(rdtgrp
);
2234 * Give any CPUs back to the default group. We cannot copy
2235 * cpu_online_mask because a CPU might have executed the
2236 * offline callback already, but is still marked online.
2238 cpumask_or(&rdtgroup_default
.cpu_mask
,
2239 &rdtgroup_default
.cpu_mask
, &rdtgrp
->cpu_mask
);
2241 free_rmid(rdtgrp
->mon
.rmid
);
2243 kernfs_remove(rdtgrp
->kn
);
2244 list_del(&rdtgrp
->rdtgroup_list
);
2247 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2248 update_closid_rmid(cpu_online_mask
, &rdtgroup_default
);
2250 kernfs_remove(kn_info
);
2251 kernfs_remove(kn_mongrp
);
2252 kernfs_remove(kn_mondata
);
2255 static void rdt_kill_sb(struct super_block
*sb
)
2257 struct rdt_resource
*r
;
2260 mutex_lock(&rdtgroup_mutex
);
2264 /*Put everything back to default values. */
2265 for_each_alloc_enabled_rdt_resource(r
)
2269 rdt_pseudo_lock_release();
2270 rdtgroup_default
.mode
= RDT_MODE_SHAREABLE
;
2271 static_branch_disable_cpuslocked(&rdt_alloc_enable_key
);
2272 static_branch_disable_cpuslocked(&rdt_mon_enable_key
);
2273 static_branch_disable_cpuslocked(&rdt_enable_key
);
2275 mutex_unlock(&rdtgroup_mutex
);
2279 static struct file_system_type rdt_fs_type
= {
2281 .init_fs_context
= rdt_init_fs_context
,
2282 .parameters
= &rdt_fs_parameters
,
2283 .kill_sb
= rdt_kill_sb
,
2286 static int mon_addfile(struct kernfs_node
*parent_kn
, const char *name
,
2289 struct kernfs_node
*kn
;
2292 kn
= __kernfs_create_file(parent_kn
, name
, 0444,
2293 GLOBAL_ROOT_UID
, GLOBAL_ROOT_GID
, 0,
2294 &kf_mondata_ops
, priv
, NULL
, NULL
);
2298 ret
= rdtgroup_kn_set_ugid(kn
);
2308 * Remove all subdirectories of mon_data of ctrl_mon groups
2309 * and monitor groups with given domain id.
2311 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource
*r
, unsigned int dom_id
)
2313 struct rdtgroup
*prgrp
, *crgrp
;
2316 if (!r
->mon_enabled
)
2319 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
2320 sprintf(name
, "mon_%s_%02d", r
->name
, dom_id
);
2321 kernfs_remove_by_name(prgrp
->mon
.mon_data_kn
, name
);
2323 list_for_each_entry(crgrp
, &prgrp
->mon
.crdtgrp_list
, mon
.crdtgrp_list
)
2324 kernfs_remove_by_name(crgrp
->mon
.mon_data_kn
, name
);
2328 static int mkdir_mondata_subdir(struct kernfs_node
*parent_kn
,
2329 struct rdt_domain
*d
,
2330 struct rdt_resource
*r
, struct rdtgroup
*prgrp
)
2332 union mon_data_bits priv
;
2333 struct kernfs_node
*kn
;
2334 struct mon_evt
*mevt
;
2335 struct rmid_read rr
;
2339 sprintf(name
, "mon_%s_%02d", r
->name
, d
->id
);
2340 /* create the directory */
2341 kn
= kernfs_create_dir(parent_kn
, name
, parent_kn
->mode
, prgrp
);
2346 * This extra ref will be put in kernfs_remove() and guarantees
2347 * that kn is always accessible.
2350 ret
= rdtgroup_kn_set_ugid(kn
);
2354 if (WARN_ON(list_empty(&r
->evt_list
))) {
2359 priv
.u
.rid
= r
->rid
;
2360 priv
.u
.domid
= d
->id
;
2361 list_for_each_entry(mevt
, &r
->evt_list
, list
) {
2362 priv
.u
.evtid
= mevt
->evtid
;
2363 ret
= mon_addfile(kn
, mevt
->name
, priv
.priv
);
2367 if (is_mbm_event(mevt
->evtid
))
2368 mon_event_read(&rr
, d
, prgrp
, mevt
->evtid
, true);
2370 kernfs_activate(kn
);
2379 * Add all subdirectories of mon_data for "ctrl_mon" groups
2380 * and "monitor" groups with given domain id.
2382 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource
*r
,
2383 struct rdt_domain
*d
)
2385 struct kernfs_node
*parent_kn
;
2386 struct rdtgroup
*prgrp
, *crgrp
;
2387 struct list_head
*head
;
2389 if (!r
->mon_enabled
)
2392 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
2393 parent_kn
= prgrp
->mon
.mon_data_kn
;
2394 mkdir_mondata_subdir(parent_kn
, d
, r
, prgrp
);
2396 head
= &prgrp
->mon
.crdtgrp_list
;
2397 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
) {
2398 parent_kn
= crgrp
->mon
.mon_data_kn
;
2399 mkdir_mondata_subdir(parent_kn
, d
, r
, crgrp
);
2404 static int mkdir_mondata_subdir_alldom(struct kernfs_node
*parent_kn
,
2405 struct rdt_resource
*r
,
2406 struct rdtgroup
*prgrp
)
2408 struct rdt_domain
*dom
;
2411 list_for_each_entry(dom
, &r
->domains
, list
) {
2412 ret
= mkdir_mondata_subdir(parent_kn
, dom
, r
, prgrp
);
2421 * This creates a directory mon_data which contains the monitored data.
2423 * mon_data has one directory for each domain whic are named
2424 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2425 * with L3 domain looks as below:
2432 * Each domain directory has one file per event:
2437 static int mkdir_mondata_all(struct kernfs_node
*parent_kn
,
2438 struct rdtgroup
*prgrp
,
2439 struct kernfs_node
**dest_kn
)
2441 struct rdt_resource
*r
;
2442 struct kernfs_node
*kn
;
2446 * Create the mon_data directory first.
2448 ret
= mongroup_create_dir(parent_kn
, NULL
, "mon_data", &kn
);
2456 * Create the subdirectories for each domain. Note that all events
2457 * in a domain like L3 are grouped into a resource whose domain is L3
2459 for_each_mon_enabled_rdt_resource(r
) {
2460 ret
= mkdir_mondata_subdir_alldom(kn
, r
, prgrp
);
2473 * cbm_ensure_valid - Enforce validity on provided CBM
2474 * @_val: Candidate CBM
2475 * @r: RDT resource to which the CBM belongs
2477 * The provided CBM represents all cache portions available for use. This
2478 * may be represented by a bitmap that does not consist of contiguous ones
2479 * and thus be an invalid CBM.
2480 * Here the provided CBM is forced to be a valid CBM by only considering
2481 * the first set of contiguous bits as valid and clearing all bits.
2482 * The intention here is to provide a valid default CBM with which a new
2483 * resource group is initialized. The user can follow this with a
2484 * modification to the CBM if the default does not satisfy the
2487 static void cbm_ensure_valid(u32
*_val
, struct rdt_resource
*r
)
2490 * Convert the u32 _val to an unsigned long required by all the bit
2491 * operations within this function. No more than 32 bits of this
2492 * converted value can be accessed because all bit operations are
2493 * additionally provided with cbm_len that is initialized during
2494 * hardware enumeration using five bits from the EAX register and
2495 * thus never can exceed 32 bits.
2497 unsigned long *val
= (unsigned long *)_val
;
2498 unsigned int cbm_len
= r
->cache
.cbm_len
;
2499 unsigned long first_bit
, zero_bit
;
2504 first_bit
= find_first_bit(val
, cbm_len
);
2505 zero_bit
= find_next_zero_bit(val
, cbm_len
, first_bit
);
2507 /* Clear any remaining bits to ensure contiguous region */
2508 bitmap_clear(val
, zero_bit
, cbm_len
- zero_bit
);
2512 * Initialize cache resources per RDT domain
2514 * Set the RDT domain up to start off with all usable allocations. That is,
2515 * all shareable and unused bits. All-zero CBM is invalid.
2517 static int __init_one_rdt_domain(struct rdt_domain
*d
, struct rdt_resource
*r
,
2520 struct rdt_resource
*r_cdp
= NULL
;
2521 struct rdt_domain
*d_cdp
= NULL
;
2522 u32 used_b
= 0, unused_b
= 0;
2523 unsigned long tmp_cbm
;
2524 enum rdtgrp_mode mode
;
2525 u32 peer_ctl
, *ctrl
;
2528 rdt_cdp_peer_get(r
, d
, &r_cdp
, &d_cdp
);
2529 d
->have_new_ctrl
= false;
2530 d
->new_ctrl
= r
->cache
.shareable_bits
;
2531 used_b
= r
->cache
.shareable_bits
;
2533 for (i
= 0; i
< closids_supported(); i
++, ctrl
++) {
2534 if (closid_allocated(i
) && i
!= closid
) {
2535 mode
= rdtgroup_mode_by_closid(i
);
2536 if (mode
== RDT_MODE_PSEUDO_LOCKSETUP
)
2539 * If CDP is active include peer domain's
2540 * usage to ensure there is no overlap
2541 * with an exclusive group.
2544 peer_ctl
= d_cdp
->ctrl_val
[i
];
2547 used_b
|= *ctrl
| peer_ctl
;
2548 if (mode
== RDT_MODE_SHAREABLE
)
2549 d
->new_ctrl
|= *ctrl
| peer_ctl
;
2552 if (d
->plr
&& d
->plr
->cbm
> 0)
2553 used_b
|= d
->plr
->cbm
;
2554 unused_b
= used_b
^ (BIT_MASK(r
->cache
.cbm_len
) - 1);
2555 unused_b
&= BIT_MASK(r
->cache
.cbm_len
) - 1;
2556 d
->new_ctrl
|= unused_b
;
2558 * Force the initial CBM to be valid, user can
2559 * modify the CBM based on system availability.
2561 cbm_ensure_valid(&d
->new_ctrl
, r
);
2563 * Assign the u32 CBM to an unsigned long to ensure that
2564 * bitmap_weight() does not access out-of-bound memory.
2566 tmp_cbm
= d
->new_ctrl
;
2567 if (bitmap_weight(&tmp_cbm
, r
->cache
.cbm_len
) < r
->cache
.min_cbm_bits
) {
2568 rdt_last_cmd_printf("No space on %s:%d\n", r
->name
, d
->id
);
2571 d
->have_new_ctrl
= true;
2577 * Initialize cache resources with default values.
2579 * A new RDT group is being created on an allocation capable (CAT)
2580 * supporting system. Set this group up to start off with all usable
2583 * If there are no more shareable bits available on any domain then
2584 * the entire allocation will fail.
2586 static int rdtgroup_init_cat(struct rdt_resource
*r
, u32 closid
)
2588 struct rdt_domain
*d
;
2591 list_for_each_entry(d
, &r
->domains
, list
) {
2592 ret
= __init_one_rdt_domain(d
, r
, closid
);
2600 /* Initialize MBA resource with default values. */
2601 static void rdtgroup_init_mba(struct rdt_resource
*r
)
2603 struct rdt_domain
*d
;
2605 list_for_each_entry(d
, &r
->domains
, list
) {
2606 d
->new_ctrl
= is_mba_sc(r
) ? MBA_MAX_MBPS
: r
->default_ctrl
;
2607 d
->have_new_ctrl
= true;
2611 /* Initialize the RDT group's allocations. */
2612 static int rdtgroup_init_alloc(struct rdtgroup
*rdtgrp
)
2614 struct rdt_resource
*r
;
2617 for_each_alloc_enabled_rdt_resource(r
) {
2618 if (r
->rid
== RDT_RESOURCE_MBA
) {
2619 rdtgroup_init_mba(r
);
2621 ret
= rdtgroup_init_cat(r
, rdtgrp
->closid
);
2626 ret
= update_domains(r
, rdtgrp
->closid
);
2628 rdt_last_cmd_puts("Failed to initialize allocations\n");
2634 rdtgrp
->mode
= RDT_MODE_SHAREABLE
;
2639 static int mkdir_rdt_prepare(struct kernfs_node
*parent_kn
,
2640 struct kernfs_node
*prgrp_kn
,
2641 const char *name
, umode_t mode
,
2642 enum rdt_group_type rtype
, struct rdtgroup
**r
)
2644 struct rdtgroup
*prdtgrp
, *rdtgrp
;
2645 struct kernfs_node
*kn
;
2649 prdtgrp
= rdtgroup_kn_lock_live(prgrp_kn
);
2650 rdt_last_cmd_clear();
2653 rdt_last_cmd_puts("Directory was removed\n");
2657 if (rtype
== RDTMON_GROUP
&&
2658 (prdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
2659 prdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
)) {
2661 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2665 /* allocate the rdtgroup. */
2666 rdtgrp
= kzalloc(sizeof(*rdtgrp
), GFP_KERNEL
);
2669 rdt_last_cmd_puts("Kernel out of memory\n");
2673 rdtgrp
->mon
.parent
= prdtgrp
;
2674 rdtgrp
->type
= rtype
;
2675 INIT_LIST_HEAD(&rdtgrp
->mon
.crdtgrp_list
);
2677 /* kernfs creates the directory for rdtgrp */
2678 kn
= kernfs_create_dir(parent_kn
, name
, mode
, rdtgrp
);
2681 rdt_last_cmd_puts("kernfs create error\n");
2687 * kernfs_remove() will drop the reference count on "kn" which
2688 * will free it. But we still need it to stick around for the
2689 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
2690 * here, which will be dropped inside rdtgroup_kn_unlock().
2694 ret
= rdtgroup_kn_set_ugid(kn
);
2696 rdt_last_cmd_puts("kernfs perm error\n");
2700 files
= RFTYPE_BASE
| BIT(RF_CTRLSHIFT
+ rtype
);
2701 ret
= rdtgroup_add_files(kn
, files
);
2703 rdt_last_cmd_puts("kernfs fill error\n");
2707 if (rdt_mon_capable
) {
2710 rdt_last_cmd_puts("Out of RMIDs\n");
2713 rdtgrp
->mon
.rmid
= ret
;
2715 ret
= mkdir_mondata_all(kn
, rdtgrp
, &rdtgrp
->mon
.mon_data_kn
);
2717 rdt_last_cmd_puts("kernfs subdir error\n");
2721 kernfs_activate(kn
);
2724 * The caller unlocks the prgrp_kn upon success.
2729 free_rmid(rdtgrp
->mon
.rmid
);
2731 kernfs_remove(rdtgrp
->kn
);
2735 rdtgroup_kn_unlock(prgrp_kn
);
2739 static void mkdir_rdt_prepare_clean(struct rdtgroup
*rgrp
)
2741 kernfs_remove(rgrp
->kn
);
2742 free_rmid(rgrp
->mon
.rmid
);
2747 * Create a monitor group under "mon_groups" directory of a control
2748 * and monitor group(ctrl_mon). This is a resource group
2749 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2751 static int rdtgroup_mkdir_mon(struct kernfs_node
*parent_kn
,
2752 struct kernfs_node
*prgrp_kn
,
2756 struct rdtgroup
*rdtgrp
, *prgrp
;
2759 ret
= mkdir_rdt_prepare(parent_kn
, prgrp_kn
, name
, mode
, RDTMON_GROUP
,
2764 prgrp
= rdtgrp
->mon
.parent
;
2765 rdtgrp
->closid
= prgrp
->closid
;
2768 * Add the rdtgrp to the list of rdtgrps the parent
2769 * ctrl_mon group has to track.
2771 list_add_tail(&rdtgrp
->mon
.crdtgrp_list
, &prgrp
->mon
.crdtgrp_list
);
2773 rdtgroup_kn_unlock(prgrp_kn
);
2778 * These are rdtgroups created under the root directory. Can be used
2779 * to allocate and monitor resources.
2781 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node
*parent_kn
,
2782 struct kernfs_node
*prgrp_kn
,
2783 const char *name
, umode_t mode
)
2785 struct rdtgroup
*rdtgrp
;
2786 struct kernfs_node
*kn
;
2790 ret
= mkdir_rdt_prepare(parent_kn
, prgrp_kn
, name
, mode
, RDTCTRL_GROUP
,
2796 ret
= closid_alloc();
2798 rdt_last_cmd_puts("Out of CLOSIDs\n");
2799 goto out_common_fail
;
2804 rdtgrp
->closid
= closid
;
2805 ret
= rdtgroup_init_alloc(rdtgrp
);
2809 list_add(&rdtgrp
->rdtgroup_list
, &rdt_all_groups
);
2811 if (rdt_mon_capable
) {
2813 * Create an empty mon_groups directory to hold the subset
2814 * of tasks and cpus to monitor.
2816 ret
= mongroup_create_dir(kn
, NULL
, "mon_groups", NULL
);
2818 rdt_last_cmd_puts("kernfs subdir error\n");
2826 list_del(&rdtgrp
->rdtgroup_list
);
2828 closid_free(closid
);
2830 mkdir_rdt_prepare_clean(rdtgrp
);
2832 rdtgroup_kn_unlock(prgrp_kn
);
2837 * We allow creating mon groups only with in a directory called "mon_groups"
2838 * which is present in every ctrl_mon group. Check if this is a valid
2839 * "mon_groups" directory.
2841 * 1. The directory should be named "mon_groups".
2842 * 2. The mon group itself should "not" be named "mon_groups".
2843 * This makes sure "mon_groups" directory always has a ctrl_mon group
2846 static bool is_mon_groups(struct kernfs_node
*kn
, const char *name
)
2848 return (!strcmp(kn
->name
, "mon_groups") &&
2849 strcmp(name
, "mon_groups"));
2852 static int rdtgroup_mkdir(struct kernfs_node
*parent_kn
, const char *name
,
2855 /* Do not accept '\n' to avoid unparsable situation. */
2856 if (strchr(name
, '\n'))
2860 * If the parent directory is the root directory and RDT
2861 * allocation is supported, add a control and monitoring
2864 if (rdt_alloc_capable
&& parent_kn
== rdtgroup_default
.kn
)
2865 return rdtgroup_mkdir_ctrl_mon(parent_kn
, parent_kn
, name
, mode
);
2868 * If RDT monitoring is supported and the parent directory is a valid
2869 * "mon_groups" directory, add a monitoring subdirectory.
2871 if (rdt_mon_capable
&& is_mon_groups(parent_kn
, name
))
2872 return rdtgroup_mkdir_mon(parent_kn
, parent_kn
->parent
, name
, mode
);
2877 static int rdtgroup_rmdir_mon(struct kernfs_node
*kn
, struct rdtgroup
*rdtgrp
,
2878 cpumask_var_t tmpmask
)
2880 struct rdtgroup
*prdtgrp
= rdtgrp
->mon
.parent
;
2883 /* Give any tasks back to the parent group */
2884 rdt_move_group_tasks(rdtgrp
, prdtgrp
, tmpmask
);
2886 /* Update per cpu rmid of the moved CPUs first */
2887 for_each_cpu(cpu
, &rdtgrp
->cpu_mask
)
2888 per_cpu(pqr_state
.default_rmid
, cpu
) = prdtgrp
->mon
.rmid
;
2890 * Update the MSR on moved CPUs and CPUs which have moved
2891 * task running on them.
2893 cpumask_or(tmpmask
, tmpmask
, &rdtgrp
->cpu_mask
);
2894 update_closid_rmid(tmpmask
, NULL
);
2896 rdtgrp
->flags
= RDT_DELETED
;
2897 free_rmid(rdtgrp
->mon
.rmid
);
2900 * Remove the rdtgrp from the parent ctrl_mon group's list
2902 WARN_ON(list_empty(&prdtgrp
->mon
.crdtgrp_list
));
2903 list_del(&rdtgrp
->mon
.crdtgrp_list
);
2906 * one extra hold on this, will drop when we kfree(rdtgrp)
2907 * in rdtgroup_kn_unlock()
2910 kernfs_remove(rdtgrp
->kn
);
2915 static int rdtgroup_ctrl_remove(struct kernfs_node
*kn
,
2916 struct rdtgroup
*rdtgrp
)
2918 rdtgrp
->flags
= RDT_DELETED
;
2919 list_del(&rdtgrp
->rdtgroup_list
);
2922 * one extra hold on this, will drop when we kfree(rdtgrp)
2923 * in rdtgroup_kn_unlock()
2926 kernfs_remove(rdtgrp
->kn
);
2930 static int rdtgroup_rmdir_ctrl(struct kernfs_node
*kn
, struct rdtgroup
*rdtgrp
,
2931 cpumask_var_t tmpmask
)
2935 /* Give any tasks back to the default group */
2936 rdt_move_group_tasks(rdtgrp
, &rdtgroup_default
, tmpmask
);
2938 /* Give any CPUs back to the default group */
2939 cpumask_or(&rdtgroup_default
.cpu_mask
,
2940 &rdtgroup_default
.cpu_mask
, &rdtgrp
->cpu_mask
);
2942 /* Update per cpu closid and rmid of the moved CPUs first */
2943 for_each_cpu(cpu
, &rdtgrp
->cpu_mask
) {
2944 per_cpu(pqr_state
.default_closid
, cpu
) = rdtgroup_default
.closid
;
2945 per_cpu(pqr_state
.default_rmid
, cpu
) = rdtgroup_default
.mon
.rmid
;
2949 * Update the MSR on moved CPUs and CPUs which have moved
2950 * task running on them.
2952 cpumask_or(tmpmask
, tmpmask
, &rdtgrp
->cpu_mask
);
2953 update_closid_rmid(tmpmask
, NULL
);
2955 closid_free(rdtgrp
->closid
);
2956 free_rmid(rdtgrp
->mon
.rmid
);
2959 * Free all the child monitor group rmids.
2961 free_all_child_rdtgrp(rdtgrp
);
2963 rdtgroup_ctrl_remove(kn
, rdtgrp
);
2968 static int rdtgroup_rmdir(struct kernfs_node
*kn
)
2970 struct kernfs_node
*parent_kn
= kn
->parent
;
2971 struct rdtgroup
*rdtgrp
;
2972 cpumask_var_t tmpmask
;
2975 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
2978 rdtgrp
= rdtgroup_kn_lock_live(kn
);
2985 * If the rdtgroup is a ctrl_mon group and parent directory
2986 * is the root directory, remove the ctrl_mon group.
2988 * If the rdtgroup is a mon group and parent directory
2989 * is a valid "mon_groups" directory, remove the mon group.
2991 if (rdtgrp
->type
== RDTCTRL_GROUP
&& parent_kn
== rdtgroup_default
.kn
) {
2992 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
2993 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
) {
2994 ret
= rdtgroup_ctrl_remove(kn
, rdtgrp
);
2996 ret
= rdtgroup_rmdir_ctrl(kn
, rdtgrp
, tmpmask
);
2998 } else if (rdtgrp
->type
== RDTMON_GROUP
&&
2999 is_mon_groups(parent_kn
, kn
->name
)) {
3000 ret
= rdtgroup_rmdir_mon(kn
, rdtgrp
, tmpmask
);
3006 rdtgroup_kn_unlock(kn
);
3007 free_cpumask_var(tmpmask
);
3011 static int rdtgroup_show_options(struct seq_file
*seq
, struct kernfs_root
*kf
)
3013 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].alloc_enabled
)
3014 seq_puts(seq
, ",cdp");
3016 if (rdt_resources_all
[RDT_RESOURCE_L2DATA
].alloc_enabled
)
3017 seq_puts(seq
, ",cdpl2");
3019 if (is_mba_sc(&rdt_resources_all
[RDT_RESOURCE_MBA
]))
3020 seq_puts(seq
, ",mba_MBps");
3025 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops
= {
3026 .mkdir
= rdtgroup_mkdir
,
3027 .rmdir
= rdtgroup_rmdir
,
3028 .show_options
= rdtgroup_show_options
,
3031 static int __init
rdtgroup_setup_root(void)
3035 rdt_root
= kernfs_create_root(&rdtgroup_kf_syscall_ops
,
3036 KERNFS_ROOT_CREATE_DEACTIVATED
|
3037 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK
,
3039 if (IS_ERR(rdt_root
))
3040 return PTR_ERR(rdt_root
);
3042 mutex_lock(&rdtgroup_mutex
);
3044 rdtgroup_default
.closid
= 0;
3045 rdtgroup_default
.mon
.rmid
= 0;
3046 rdtgroup_default
.type
= RDTCTRL_GROUP
;
3047 INIT_LIST_HEAD(&rdtgroup_default
.mon
.crdtgrp_list
);
3049 list_add(&rdtgroup_default
.rdtgroup_list
, &rdt_all_groups
);
3051 ret
= rdtgroup_add_files(rdt_root
->kn
, RF_CTRL_BASE
);
3053 kernfs_destroy_root(rdt_root
);
3057 rdtgroup_default
.kn
= rdt_root
->kn
;
3058 kernfs_activate(rdtgroup_default
.kn
);
3061 mutex_unlock(&rdtgroup_mutex
);
3067 * rdtgroup_init - rdtgroup initialization
3069 * Setup resctrl file system including set up root, create mount point,
3070 * register rdtgroup filesystem, and initialize files under root directory.
3072 * Return: 0 on success or -errno
3074 int __init
rdtgroup_init(void)
3078 seq_buf_init(&last_cmd_status
, last_cmd_status_buf
,
3079 sizeof(last_cmd_status_buf
));
3081 ret
= rdtgroup_setup_root();
3085 ret
= sysfs_create_mount_point(fs_kobj
, "resctrl");
3089 ret
= register_filesystem(&rdt_fs_type
);
3091 goto cleanup_mountpoint
;
3094 * Adding the resctrl debugfs directory here may not be ideal since
3095 * it would let the resctrl debugfs directory appear on the debugfs
3096 * filesystem before the resctrl filesystem is mounted.
3097 * It may also be ok since that would enable debugging of RDT before
3098 * resctrl is mounted.
3099 * The reason why the debugfs directory is created here and not in
3100 * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
3101 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3102 * (the lockdep class of inode->i_rwsem). Other filesystem
3103 * interactions (eg. SyS_getdents) have the lock ordering:
3104 * &sb->s_type->i_mutex_key --> &mm->mmap_sem
3105 * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
3106 * is taken, thus creating dependency:
3107 * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
3108 * issues considering the other two lock dependencies.
3109 * By creating the debugfs directory here we avoid a dependency
3110 * that may cause deadlock (even though file operations cannot
3111 * occur until the filesystem is mounted, but I do not know how to
3112 * tell lockdep that).
3114 debugfs_resctrl
= debugfs_create_dir("resctrl", NULL
);
3119 sysfs_remove_mount_point(fs_kobj
, "resctrl");
3121 kernfs_destroy_root(rdt_root
);
3126 void __exit
rdtgroup_exit(void)
3128 debugfs_remove_recursive(debugfs_resctrl
);
3129 unregister_filesystem(&rdt_fs_type
);
3130 sysfs_remove_mount_point(fs_kobj
, "resctrl");
3131 kernfs_destroy_root(rdt_root
);