2 * User interface for Resource Alloction in Resource Director Technology(RDT)
4 * Copyright (C) 2016 Intel Corporation
6 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * More information about RDT be found in the Intel (R) x86 Architecture
18 * Software Developer Manual.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/cacheinfo.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
27 #include <linux/fs_parser.h>
28 #include <linux/sysfs.h>
29 #include <linux/kernfs.h>
30 #include <linux/seq_buf.h>
31 #include <linux/seq_file.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/task.h>
34 #include <linux/slab.h>
35 #include <linux/task_work.h>
36 #include <linux/user_namespace.h>
38 #include <uapi/linux/magic.h>
40 #include <asm/resctrl_sched.h>
43 DEFINE_STATIC_KEY_FALSE(rdt_enable_key
);
44 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key
);
45 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key
);
46 static struct kernfs_root
*rdt_root
;
47 struct rdtgroup rdtgroup_default
;
48 LIST_HEAD(rdt_all_groups
);
50 /* Kernel fs node for "info" directory under root */
51 static struct kernfs_node
*kn_info
;
53 /* Kernel fs node for "mon_groups" directory under root */
54 static struct kernfs_node
*kn_mongrp
;
56 /* Kernel fs node for "mon_data" directory under root */
57 static struct kernfs_node
*kn_mondata
;
59 static struct seq_buf last_cmd_status
;
60 static char last_cmd_status_buf
[512];
62 struct dentry
*debugfs_resctrl
;
64 void rdt_last_cmd_clear(void)
66 lockdep_assert_held(&rdtgroup_mutex
);
67 seq_buf_clear(&last_cmd_status
);
70 void rdt_last_cmd_puts(const char *s
)
72 lockdep_assert_held(&rdtgroup_mutex
);
73 seq_buf_puts(&last_cmd_status
, s
);
76 void rdt_last_cmd_printf(const char *fmt
, ...)
81 lockdep_assert_held(&rdtgroup_mutex
);
82 seq_buf_vprintf(&last_cmd_status
, fmt
, ap
);
87 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
88 * we can keep a bitmap of free CLOSIDs in a single integer.
90 * Using a global CLOSID across all resources has some advantages and
92 * + We can simply set "current->closid" to assign a task to a resource
94 * + Context switch code can avoid extra memory references deciding which
95 * CLOSID to load into the PQR_ASSOC MSR
96 * - We give up some options in configuring resource groups across multi-socket
98 * - Our choices on how to configure each resource become progressively more
99 * limited as the number of resources grows.
101 static int closid_free_map
;
102 static int closid_free_map_len
;
104 int closids_supported(void)
106 return closid_free_map_len
;
109 static void closid_init(void)
111 struct rdt_resource
*r
;
112 int rdt_min_closid
= 32;
114 /* Compute rdt_min_closid across all resources */
115 for_each_alloc_enabled_rdt_resource(r
)
116 rdt_min_closid
= min(rdt_min_closid
, r
->num_closid
);
118 closid_free_map
= BIT_MASK(rdt_min_closid
) - 1;
120 /* CLOSID 0 is always reserved for the default group */
121 closid_free_map
&= ~1;
122 closid_free_map_len
= rdt_min_closid
;
125 static int closid_alloc(void)
127 u32 closid
= ffs(closid_free_map
);
132 closid_free_map
&= ~(1 << closid
);
137 void closid_free(int closid
)
139 closid_free_map
|= 1 << closid
;
143 * closid_allocated - test if provided closid is in use
144 * @closid: closid to be tested
146 * Return: true if @closid is currently associated with a resource group,
147 * false if @closid is free
149 static bool closid_allocated(unsigned int closid
)
151 return (closid_free_map
& (1 << closid
)) == 0;
155 * rdtgroup_mode_by_closid - Return mode of resource group with closid
156 * @closid: closid if the resource group
158 * Each resource group is associated with a @closid. Here the mode
159 * of a resource group can be queried by searching for it using its closid.
161 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
163 enum rdtgrp_mode
rdtgroup_mode_by_closid(int closid
)
165 struct rdtgroup
*rdtgrp
;
167 list_for_each_entry(rdtgrp
, &rdt_all_groups
, rdtgroup_list
) {
168 if (rdtgrp
->closid
== closid
)
172 return RDT_NUM_MODES
;
175 static const char * const rdt_mode_str
[] = {
176 [RDT_MODE_SHAREABLE
] = "shareable",
177 [RDT_MODE_EXCLUSIVE
] = "exclusive",
178 [RDT_MODE_PSEUDO_LOCKSETUP
] = "pseudo-locksetup",
179 [RDT_MODE_PSEUDO_LOCKED
] = "pseudo-locked",
183 * rdtgroup_mode_str - Return the string representation of mode
184 * @mode: the resource group mode as &enum rdtgroup_mode
186 * Return: string representation of valid mode, "unknown" otherwise
188 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode
)
190 if (mode
< RDT_MODE_SHAREABLE
|| mode
>= RDT_NUM_MODES
)
193 return rdt_mode_str
[mode
];
196 /* set uid and gid of rdtgroup dirs and files to that of the creator */
197 static int rdtgroup_kn_set_ugid(struct kernfs_node
*kn
)
199 struct iattr iattr
= { .ia_valid
= ATTR_UID
| ATTR_GID
,
200 .ia_uid
= current_fsuid(),
201 .ia_gid
= current_fsgid(), };
203 if (uid_eq(iattr
.ia_uid
, GLOBAL_ROOT_UID
) &&
204 gid_eq(iattr
.ia_gid
, GLOBAL_ROOT_GID
))
207 return kernfs_setattr(kn
, &iattr
);
210 static int rdtgroup_add_file(struct kernfs_node
*parent_kn
, struct rftype
*rft
)
212 struct kernfs_node
*kn
;
215 kn
= __kernfs_create_file(parent_kn
, rft
->name
, rft
->mode
,
216 GLOBAL_ROOT_UID
, GLOBAL_ROOT_GID
,
217 0, rft
->kf_ops
, rft
, NULL
, NULL
);
221 ret
= rdtgroup_kn_set_ugid(kn
);
230 static int rdtgroup_seqfile_show(struct seq_file
*m
, void *arg
)
232 struct kernfs_open_file
*of
= m
->private;
233 struct rftype
*rft
= of
->kn
->priv
;
236 return rft
->seq_show(of
, m
, arg
);
240 static ssize_t
rdtgroup_file_write(struct kernfs_open_file
*of
, char *buf
,
241 size_t nbytes
, loff_t off
)
243 struct rftype
*rft
= of
->kn
->priv
;
246 return rft
->write(of
, buf
, nbytes
, off
);
251 static struct kernfs_ops rdtgroup_kf_single_ops
= {
252 .atomic_write_len
= PAGE_SIZE
,
253 .write
= rdtgroup_file_write
,
254 .seq_show
= rdtgroup_seqfile_show
,
257 static struct kernfs_ops kf_mondata_ops
= {
258 .atomic_write_len
= PAGE_SIZE
,
259 .seq_show
= rdtgroup_mondata_show
,
262 static bool is_cpu_list(struct kernfs_open_file
*of
)
264 struct rftype
*rft
= of
->kn
->priv
;
266 return rft
->flags
& RFTYPE_FLAGS_CPUS_LIST
;
269 static int rdtgroup_cpus_show(struct kernfs_open_file
*of
,
270 struct seq_file
*s
, void *v
)
272 struct rdtgroup
*rdtgrp
;
273 struct cpumask
*mask
;
276 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
279 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
) {
280 if (!rdtgrp
->plr
->d
) {
281 rdt_last_cmd_clear();
282 rdt_last_cmd_puts("Cache domain offline\n");
285 mask
= &rdtgrp
->plr
->d
->cpu_mask
;
286 seq_printf(s
, is_cpu_list(of
) ?
287 "%*pbl\n" : "%*pb\n",
288 cpumask_pr_args(mask
));
291 seq_printf(s
, is_cpu_list(of
) ? "%*pbl\n" : "%*pb\n",
292 cpumask_pr_args(&rdtgrp
->cpu_mask
));
297 rdtgroup_kn_unlock(of
->kn
);
303 * This is safe against resctrl_sched_in() called from __switch_to()
304 * because __switch_to() is executed with interrupts disabled. A local call
305 * from update_closid_rmid() is proteced against __switch_to() because
306 * preemption is disabled.
308 static void update_cpu_closid_rmid(void *info
)
310 struct rdtgroup
*r
= info
;
313 this_cpu_write(pqr_state
.default_closid
, r
->closid
);
314 this_cpu_write(pqr_state
.default_rmid
, r
->mon
.rmid
);
318 * We cannot unconditionally write the MSR because the current
319 * executing task might have its own closid selected. Just reuse
320 * the context switch code.
326 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
328 * Per task closids/rmids must have been set up before calling this function.
331 update_closid_rmid(const struct cpumask
*cpu_mask
, struct rdtgroup
*r
)
335 if (cpumask_test_cpu(cpu
, cpu_mask
))
336 update_cpu_closid_rmid(r
);
337 smp_call_function_many(cpu_mask
, update_cpu_closid_rmid
, r
, 1);
341 static int cpus_mon_write(struct rdtgroup
*rdtgrp
, cpumask_var_t newmask
,
342 cpumask_var_t tmpmask
)
344 struct rdtgroup
*prgrp
= rdtgrp
->mon
.parent
, *crgrp
;
345 struct list_head
*head
;
347 /* Check whether cpus belong to parent ctrl group */
348 cpumask_andnot(tmpmask
, newmask
, &prgrp
->cpu_mask
);
349 if (cpumask_weight(tmpmask
)) {
350 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
354 /* Check whether cpus are dropped from this group */
355 cpumask_andnot(tmpmask
, &rdtgrp
->cpu_mask
, newmask
);
356 if (cpumask_weight(tmpmask
)) {
357 /* Give any dropped cpus to parent rdtgroup */
358 cpumask_or(&prgrp
->cpu_mask
, &prgrp
->cpu_mask
, tmpmask
);
359 update_closid_rmid(tmpmask
, prgrp
);
363 * If we added cpus, remove them from previous group that owned them
364 * and update per-cpu rmid
366 cpumask_andnot(tmpmask
, newmask
, &rdtgrp
->cpu_mask
);
367 if (cpumask_weight(tmpmask
)) {
368 head
= &prgrp
->mon
.crdtgrp_list
;
369 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
) {
372 cpumask_andnot(&crgrp
->cpu_mask
, &crgrp
->cpu_mask
,
375 update_closid_rmid(tmpmask
, rdtgrp
);
378 /* Done pushing/pulling - update this group with new mask */
379 cpumask_copy(&rdtgrp
->cpu_mask
, newmask
);
384 static void cpumask_rdtgrp_clear(struct rdtgroup
*r
, struct cpumask
*m
)
386 struct rdtgroup
*crgrp
;
388 cpumask_andnot(&r
->cpu_mask
, &r
->cpu_mask
, m
);
389 /* update the child mon group masks as well*/
390 list_for_each_entry(crgrp
, &r
->mon
.crdtgrp_list
, mon
.crdtgrp_list
)
391 cpumask_and(&crgrp
->cpu_mask
, &r
->cpu_mask
, &crgrp
->cpu_mask
);
394 static int cpus_ctrl_write(struct rdtgroup
*rdtgrp
, cpumask_var_t newmask
,
395 cpumask_var_t tmpmask
, cpumask_var_t tmpmask1
)
397 struct rdtgroup
*r
, *crgrp
;
398 struct list_head
*head
;
400 /* Check whether cpus are dropped from this group */
401 cpumask_andnot(tmpmask
, &rdtgrp
->cpu_mask
, newmask
);
402 if (cpumask_weight(tmpmask
)) {
403 /* Can't drop from default group */
404 if (rdtgrp
== &rdtgroup_default
) {
405 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
409 /* Give any dropped cpus to rdtgroup_default */
410 cpumask_or(&rdtgroup_default
.cpu_mask
,
411 &rdtgroup_default
.cpu_mask
, tmpmask
);
412 update_closid_rmid(tmpmask
, &rdtgroup_default
);
416 * If we added cpus, remove them from previous group and
417 * the prev group's child groups that owned them
418 * and update per-cpu closid/rmid.
420 cpumask_andnot(tmpmask
, newmask
, &rdtgrp
->cpu_mask
);
421 if (cpumask_weight(tmpmask
)) {
422 list_for_each_entry(r
, &rdt_all_groups
, rdtgroup_list
) {
425 cpumask_and(tmpmask1
, &r
->cpu_mask
, tmpmask
);
426 if (cpumask_weight(tmpmask1
))
427 cpumask_rdtgrp_clear(r
, tmpmask1
);
429 update_closid_rmid(tmpmask
, rdtgrp
);
432 /* Done pushing/pulling - update this group with new mask */
433 cpumask_copy(&rdtgrp
->cpu_mask
, newmask
);
436 * Clear child mon group masks since there is a new parent mask
437 * now and update the rmid for the cpus the child lost.
439 head
= &rdtgrp
->mon
.crdtgrp_list
;
440 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
) {
441 cpumask_and(tmpmask
, &rdtgrp
->cpu_mask
, &crgrp
->cpu_mask
);
442 update_closid_rmid(tmpmask
, rdtgrp
);
443 cpumask_clear(&crgrp
->cpu_mask
);
449 static ssize_t
rdtgroup_cpus_write(struct kernfs_open_file
*of
,
450 char *buf
, size_t nbytes
, loff_t off
)
452 cpumask_var_t tmpmask
, newmask
, tmpmask1
;
453 struct rdtgroup
*rdtgrp
;
459 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
461 if (!zalloc_cpumask_var(&newmask
, GFP_KERNEL
)) {
462 free_cpumask_var(tmpmask
);
465 if (!zalloc_cpumask_var(&tmpmask1
, GFP_KERNEL
)) {
466 free_cpumask_var(tmpmask
);
467 free_cpumask_var(newmask
);
471 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
472 rdt_last_cmd_clear();
475 rdt_last_cmd_puts("Directory was removed\n");
479 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
||
480 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
482 rdt_last_cmd_puts("Pseudo-locking in progress\n");
487 ret
= cpulist_parse(buf
, newmask
);
489 ret
= cpumask_parse(buf
, newmask
);
492 rdt_last_cmd_puts("Bad CPU list/mask\n");
496 /* check that user didn't specify any offline cpus */
497 cpumask_andnot(tmpmask
, newmask
, cpu_online_mask
);
498 if (cpumask_weight(tmpmask
)) {
500 rdt_last_cmd_puts("Can only assign online CPUs\n");
504 if (rdtgrp
->type
== RDTCTRL_GROUP
)
505 ret
= cpus_ctrl_write(rdtgrp
, newmask
, tmpmask
, tmpmask1
);
506 else if (rdtgrp
->type
== RDTMON_GROUP
)
507 ret
= cpus_mon_write(rdtgrp
, newmask
, tmpmask
);
512 rdtgroup_kn_unlock(of
->kn
);
513 free_cpumask_var(tmpmask
);
514 free_cpumask_var(newmask
);
515 free_cpumask_var(tmpmask1
);
517 return ret
?: nbytes
;
520 struct task_move_callback
{
521 struct callback_head work
;
522 struct rdtgroup
*rdtgrp
;
525 static void move_myself(struct callback_head
*head
)
527 struct task_move_callback
*callback
;
528 struct rdtgroup
*rdtgrp
;
530 callback
= container_of(head
, struct task_move_callback
, work
);
531 rdtgrp
= callback
->rdtgrp
;
534 * If resource group was deleted before this task work callback
535 * was invoked, then assign the task to root group and free the
538 if (atomic_dec_and_test(&rdtgrp
->waitcount
) &&
539 (rdtgrp
->flags
& RDT_DELETED
)) {
546 /* update PQR_ASSOC MSR to make resource group go into effect */
553 static int __rdtgroup_move_task(struct task_struct
*tsk
,
554 struct rdtgroup
*rdtgrp
)
556 struct task_move_callback
*callback
;
559 callback
= kzalloc(sizeof(*callback
), GFP_KERNEL
);
562 callback
->work
.func
= move_myself
;
563 callback
->rdtgrp
= rdtgrp
;
566 * Take a refcount, so rdtgrp cannot be freed before the
567 * callback has been invoked.
569 atomic_inc(&rdtgrp
->waitcount
);
570 ret
= task_work_add(tsk
, &callback
->work
, true);
573 * Task is exiting. Drop the refcount and free the callback.
574 * No need to check the refcount as the group cannot be
575 * deleted before the write function unlocks rdtgroup_mutex.
577 atomic_dec(&rdtgrp
->waitcount
);
579 rdt_last_cmd_puts("Task exited\n");
582 * For ctrl_mon groups move both closid and rmid.
583 * For monitor groups, can move the tasks only from
584 * their parent CTRL group.
586 if (rdtgrp
->type
== RDTCTRL_GROUP
) {
587 tsk
->closid
= rdtgrp
->closid
;
588 tsk
->rmid
= rdtgrp
->mon
.rmid
;
589 } else if (rdtgrp
->type
== RDTMON_GROUP
) {
590 if (rdtgrp
->mon
.parent
->closid
== tsk
->closid
) {
591 tsk
->rmid
= rdtgrp
->mon
.rmid
;
593 rdt_last_cmd_puts("Can't move task to different control group\n");
602 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
605 * Return: 1 if tasks have been assigned to @r, 0 otherwise
607 int rdtgroup_tasks_assigned(struct rdtgroup
*r
)
609 struct task_struct
*p
, *t
;
612 lockdep_assert_held(&rdtgroup_mutex
);
615 for_each_process_thread(p
, t
) {
616 if ((r
->type
== RDTCTRL_GROUP
&& t
->closid
== r
->closid
) ||
617 (r
->type
== RDTMON_GROUP
&& t
->rmid
== r
->mon
.rmid
)) {
627 static int rdtgroup_task_write_permission(struct task_struct
*task
,
628 struct kernfs_open_file
*of
)
630 const struct cred
*tcred
= get_task_cred(task
);
631 const struct cred
*cred
= current_cred();
635 * Even if we're attaching all tasks in the thread group, we only
636 * need to check permissions on one of them.
638 if (!uid_eq(cred
->euid
, GLOBAL_ROOT_UID
) &&
639 !uid_eq(cred
->euid
, tcred
->uid
) &&
640 !uid_eq(cred
->euid
, tcred
->suid
)) {
641 rdt_last_cmd_printf("No permission to move task %d\n", task
->pid
);
649 static int rdtgroup_move_task(pid_t pid
, struct rdtgroup
*rdtgrp
,
650 struct kernfs_open_file
*of
)
652 struct task_struct
*tsk
;
657 tsk
= find_task_by_vpid(pid
);
660 rdt_last_cmd_printf("No task %d\n", pid
);
667 get_task_struct(tsk
);
670 ret
= rdtgroup_task_write_permission(tsk
, of
);
672 ret
= __rdtgroup_move_task(tsk
, rdtgrp
);
674 put_task_struct(tsk
);
678 static ssize_t
rdtgroup_tasks_write(struct kernfs_open_file
*of
,
679 char *buf
, size_t nbytes
, loff_t off
)
681 struct rdtgroup
*rdtgrp
;
685 if (kstrtoint(strstrip(buf
), 0, &pid
) || pid
< 0)
687 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
689 rdtgroup_kn_unlock(of
->kn
);
692 rdt_last_cmd_clear();
694 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
||
695 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
697 rdt_last_cmd_puts("Pseudo-locking in progress\n");
701 ret
= rdtgroup_move_task(pid
, rdtgrp
, of
);
704 rdtgroup_kn_unlock(of
->kn
);
706 return ret
?: nbytes
;
709 static void show_rdt_tasks(struct rdtgroup
*r
, struct seq_file
*s
)
711 struct task_struct
*p
, *t
;
714 for_each_process_thread(p
, t
) {
715 if ((r
->type
== RDTCTRL_GROUP
&& t
->closid
== r
->closid
) ||
716 (r
->type
== RDTMON_GROUP
&& t
->rmid
== r
->mon
.rmid
))
717 seq_printf(s
, "%d\n", t
->pid
);
722 static int rdtgroup_tasks_show(struct kernfs_open_file
*of
,
723 struct seq_file
*s
, void *v
)
725 struct rdtgroup
*rdtgrp
;
728 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
730 show_rdt_tasks(rdtgrp
, s
);
733 rdtgroup_kn_unlock(of
->kn
);
738 static int rdt_last_cmd_status_show(struct kernfs_open_file
*of
,
739 struct seq_file
*seq
, void *v
)
743 mutex_lock(&rdtgroup_mutex
);
744 len
= seq_buf_used(&last_cmd_status
);
746 seq_printf(seq
, "%.*s", len
, last_cmd_status_buf
);
748 seq_puts(seq
, "ok\n");
749 mutex_unlock(&rdtgroup_mutex
);
753 static int rdt_num_closids_show(struct kernfs_open_file
*of
,
754 struct seq_file
*seq
, void *v
)
756 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
758 seq_printf(seq
, "%d\n", r
->num_closid
);
762 static int rdt_default_ctrl_show(struct kernfs_open_file
*of
,
763 struct seq_file
*seq
, void *v
)
765 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
767 seq_printf(seq
, "%x\n", r
->default_ctrl
);
771 static int rdt_min_cbm_bits_show(struct kernfs_open_file
*of
,
772 struct seq_file
*seq
, void *v
)
774 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
776 seq_printf(seq
, "%u\n", r
->cache
.min_cbm_bits
);
780 static int rdt_shareable_bits_show(struct kernfs_open_file
*of
,
781 struct seq_file
*seq
, void *v
)
783 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
785 seq_printf(seq
, "%x\n", r
->cache
.shareable_bits
);
790 * rdt_bit_usage_show - Display current usage of resources
792 * A domain is a shared resource that can now be allocated differently. Here
793 * we display the current regions of the domain as an annotated bitmask.
794 * For each domain of this resource its allocation bitmask
795 * is annotated as below to indicate the current usage of the corresponding bit:
796 * 0 - currently unused
797 * X - currently available for sharing and used by software and hardware
798 * H - currently used by hardware only but available for software use
799 * S - currently used and shareable by software only
800 * E - currently used exclusively by one resource group
801 * P - currently pseudo-locked by one resource group
803 static int rdt_bit_usage_show(struct kernfs_open_file
*of
,
804 struct seq_file
*seq
, void *v
)
806 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
807 u32 sw_shareable
= 0, hw_shareable
= 0;
808 u32 exclusive
= 0, pseudo_locked
= 0;
809 struct rdt_domain
*dom
;
810 int i
, hwb
, swb
, excl
, psl
;
811 enum rdtgrp_mode mode
;
815 mutex_lock(&rdtgroup_mutex
);
816 hw_shareable
= r
->cache
.shareable_bits
;
817 list_for_each_entry(dom
, &r
->domains
, list
) {
820 ctrl
= dom
->ctrl_val
;
823 seq_printf(seq
, "%d=", dom
->id
);
824 for (i
= 0; i
< closids_supported(); i
++, ctrl
++) {
825 if (!closid_allocated(i
))
827 mode
= rdtgroup_mode_by_closid(i
);
829 case RDT_MODE_SHAREABLE
:
830 sw_shareable
|= *ctrl
;
832 case RDT_MODE_EXCLUSIVE
:
835 case RDT_MODE_PSEUDO_LOCKSETUP
:
837 * RDT_MODE_PSEUDO_LOCKSETUP is possible
838 * here but not included since the CBM
839 * associated with this CLOSID in this mode
840 * is not initialized and no task or cpu can be
841 * assigned this CLOSID.
844 case RDT_MODE_PSEUDO_LOCKED
:
847 "invalid mode for closid %d\n", i
);
851 for (i
= r
->cache
.cbm_len
- 1; i
>= 0; i
--) {
852 pseudo_locked
= dom
->plr
? dom
->plr
->cbm
: 0;
853 hwb
= test_bit(i
, (unsigned long *)&hw_shareable
);
854 swb
= test_bit(i
, (unsigned long *)&sw_shareable
);
855 excl
= test_bit(i
, (unsigned long *)&exclusive
);
856 psl
= test_bit(i
, (unsigned long *)&pseudo_locked
);
859 else if (hwb
&& !swb
)
861 else if (!hwb
&& swb
)
867 else /* Unused bits remain */
873 mutex_unlock(&rdtgroup_mutex
);
877 static int rdt_min_bw_show(struct kernfs_open_file
*of
,
878 struct seq_file
*seq
, void *v
)
880 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
882 seq_printf(seq
, "%u\n", r
->membw
.min_bw
);
886 static int rdt_num_rmids_show(struct kernfs_open_file
*of
,
887 struct seq_file
*seq
, void *v
)
889 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
891 seq_printf(seq
, "%d\n", r
->num_rmid
);
896 static int rdt_mon_features_show(struct kernfs_open_file
*of
,
897 struct seq_file
*seq
, void *v
)
899 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
900 struct mon_evt
*mevt
;
902 list_for_each_entry(mevt
, &r
->evt_list
, list
)
903 seq_printf(seq
, "%s\n", mevt
->name
);
908 static int rdt_bw_gran_show(struct kernfs_open_file
*of
,
909 struct seq_file
*seq
, void *v
)
911 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
913 seq_printf(seq
, "%u\n", r
->membw
.bw_gran
);
917 static int rdt_delay_linear_show(struct kernfs_open_file
*of
,
918 struct seq_file
*seq
, void *v
)
920 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
922 seq_printf(seq
, "%u\n", r
->membw
.delay_linear
);
926 static int max_threshold_occ_show(struct kernfs_open_file
*of
,
927 struct seq_file
*seq
, void *v
)
929 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
931 seq_printf(seq
, "%u\n", resctrl_cqm_threshold
* r
->mon_scale
);
936 static ssize_t
max_threshold_occ_write(struct kernfs_open_file
*of
,
937 char *buf
, size_t nbytes
, loff_t off
)
939 struct rdt_resource
*r
= of
->kn
->parent
->priv
;
943 ret
= kstrtouint(buf
, 0, &bytes
);
947 if (bytes
> (boot_cpu_data
.x86_cache_size
* 1024))
950 resctrl_cqm_threshold
= bytes
/ r
->mon_scale
;
956 * rdtgroup_mode_show - Display mode of this resource group
958 static int rdtgroup_mode_show(struct kernfs_open_file
*of
,
959 struct seq_file
*s
, void *v
)
961 struct rdtgroup
*rdtgrp
;
963 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
965 rdtgroup_kn_unlock(of
->kn
);
969 seq_printf(s
, "%s\n", rdtgroup_mode_str(rdtgrp
->mode
));
971 rdtgroup_kn_unlock(of
->kn
);
976 * rdt_cdp_peer_get - Retrieve CDP peer if it exists
977 * @r: RDT resource to which RDT domain @d belongs
978 * @d: Cache instance for which a CDP peer is requested
979 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
980 * Used to return the result.
981 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
982 * Used to return the result.
984 * RDT resources are managed independently and by extension the RDT domains
985 * (RDT resource instances) are managed independently also. The Code and
986 * Data Prioritization (CDP) RDT resources, while managed independently,
987 * could refer to the same underlying hardware. For example,
988 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
990 * When provided with an RDT resource @r and an instance of that RDT
991 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
992 * resource and the exact instance that shares the same hardware.
994 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
995 * If a CDP peer was found, @r_cdp will point to the peer RDT resource
996 * and @d_cdp will point to the peer RDT domain.
998 static int rdt_cdp_peer_get(struct rdt_resource
*r
, struct rdt_domain
*d
,
999 struct rdt_resource
**r_cdp
,
1000 struct rdt_domain
**d_cdp
)
1002 struct rdt_resource
*_r_cdp
= NULL
;
1003 struct rdt_domain
*_d_cdp
= NULL
;
1007 case RDT_RESOURCE_L3DATA
:
1008 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L3CODE
];
1010 case RDT_RESOURCE_L3CODE
:
1011 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L3DATA
];
1013 case RDT_RESOURCE_L2DATA
:
1014 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L2CODE
];
1016 case RDT_RESOURCE_L2CODE
:
1017 _r_cdp
= &rdt_resources_all
[RDT_RESOURCE_L2DATA
];
1025 * When a new CPU comes online and CDP is enabled then the new
1026 * RDT domains (if any) associated with both CDP RDT resources
1027 * are added in the same CPU online routine while the
1028 * rdtgroup_mutex is held. It should thus not happen for one
1029 * RDT domain to exist and be associated with its RDT CDP
1030 * resource but there is no RDT domain associated with the
1031 * peer RDT CDP resource. Hence the WARN.
1033 _d_cdp
= rdt_find_domain(_r_cdp
, d
->id
, NULL
);
1034 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp
))) {
1047 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1048 * @r: Resource to which domain instance @d belongs.
1049 * @d: The domain instance for which @closid is being tested.
1050 * @cbm: Capacity bitmask being tested.
1051 * @closid: Intended closid for @cbm.
1052 * @exclusive: Only check if overlaps with exclusive resource groups
1054 * Checks if provided @cbm intended to be used for @closid on domain
1055 * @d overlaps with any other closids or other hardware usage associated
1056 * with this domain. If @exclusive is true then only overlaps with
1057 * resource groups in exclusive mode will be considered. If @exclusive
1058 * is false then overlaps with any resource group or hardware entities
1059 * will be considered.
1061 * @cbm is unsigned long, even if only 32 bits are used, to make the
1062 * bitmap functions work correctly.
1064 * Return: false if CBM does not overlap, true if it does.
1066 static bool __rdtgroup_cbm_overlaps(struct rdt_resource
*r
, struct rdt_domain
*d
,
1067 unsigned long cbm
, int closid
, bool exclusive
)
1069 enum rdtgrp_mode mode
;
1070 unsigned long ctrl_b
;
1074 /* Check for any overlap with regions used by hardware directly */
1076 ctrl_b
= r
->cache
.shareable_bits
;
1077 if (bitmap_intersects(&cbm
, &ctrl_b
, r
->cache
.cbm_len
))
1081 /* Check for overlap with other resource groups */
1083 for (i
= 0; i
< closids_supported(); i
++, ctrl
++) {
1085 mode
= rdtgroup_mode_by_closid(i
);
1086 if (closid_allocated(i
) && i
!= closid
&&
1087 mode
!= RDT_MODE_PSEUDO_LOCKSETUP
) {
1088 if (bitmap_intersects(&cbm
, &ctrl_b
, r
->cache
.cbm_len
)) {
1090 if (mode
== RDT_MODE_EXCLUSIVE
)
1103 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1104 * @r: Resource to which domain instance @d belongs.
1105 * @d: The domain instance for which @closid is being tested.
1106 * @cbm: Capacity bitmask being tested.
1107 * @closid: Intended closid for @cbm.
1108 * @exclusive: Only check if overlaps with exclusive resource groups
1110 * Resources that can be allocated using a CBM can use the CBM to control
1111 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1112 * for overlap. Overlap test is not limited to the specific resource for
1113 * which the CBM is intended though - when dealing with CDP resources that
1114 * share the underlying hardware the overlap check should be performed on
1115 * the CDP resource sharing the hardware also.
1117 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1120 * Return: true if CBM overlap detected, false if there is no overlap
1122 bool rdtgroup_cbm_overlaps(struct rdt_resource
*r
, struct rdt_domain
*d
,
1123 unsigned long cbm
, int closid
, bool exclusive
)
1125 struct rdt_resource
*r_cdp
;
1126 struct rdt_domain
*d_cdp
;
1128 if (__rdtgroup_cbm_overlaps(r
, d
, cbm
, closid
, exclusive
))
1131 if (rdt_cdp_peer_get(r
, d
, &r_cdp
, &d_cdp
) < 0)
1134 return __rdtgroup_cbm_overlaps(r_cdp
, d_cdp
, cbm
, closid
, exclusive
);
1138 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1140 * An exclusive resource group implies that there should be no sharing of
1141 * its allocated resources. At the time this group is considered to be
1142 * exclusive this test can determine if its current schemata supports this
1143 * setting by testing for overlap with all other resource groups.
1145 * Return: true if resource group can be exclusive, false if there is overlap
1146 * with allocations of other resource groups and thus this resource group
1147 * cannot be exclusive.
1149 static bool rdtgroup_mode_test_exclusive(struct rdtgroup
*rdtgrp
)
1151 int closid
= rdtgrp
->closid
;
1152 struct rdt_resource
*r
;
1153 bool has_cache
= false;
1154 struct rdt_domain
*d
;
1156 for_each_alloc_enabled_rdt_resource(r
) {
1157 if (r
->rid
== RDT_RESOURCE_MBA
)
1160 list_for_each_entry(d
, &r
->domains
, list
) {
1161 if (rdtgroup_cbm_overlaps(r
, d
, d
->ctrl_val
[closid
],
1162 rdtgrp
->closid
, false)) {
1163 rdt_last_cmd_puts("Schemata overlaps\n");
1170 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1178 * rdtgroup_mode_write - Modify the resource group's mode
1181 static ssize_t
rdtgroup_mode_write(struct kernfs_open_file
*of
,
1182 char *buf
, size_t nbytes
, loff_t off
)
1184 struct rdtgroup
*rdtgrp
;
1185 enum rdtgrp_mode mode
;
1188 /* Valid input requires a trailing newline */
1189 if (nbytes
== 0 || buf
[nbytes
- 1] != '\n')
1191 buf
[nbytes
- 1] = '\0';
1193 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
1195 rdtgroup_kn_unlock(of
->kn
);
1199 rdt_last_cmd_clear();
1201 mode
= rdtgrp
->mode
;
1203 if ((!strcmp(buf
, "shareable") && mode
== RDT_MODE_SHAREABLE
) ||
1204 (!strcmp(buf
, "exclusive") && mode
== RDT_MODE_EXCLUSIVE
) ||
1205 (!strcmp(buf
, "pseudo-locksetup") &&
1206 mode
== RDT_MODE_PSEUDO_LOCKSETUP
) ||
1207 (!strcmp(buf
, "pseudo-locked") && mode
== RDT_MODE_PSEUDO_LOCKED
))
1210 if (mode
== RDT_MODE_PSEUDO_LOCKED
) {
1211 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1216 if (!strcmp(buf
, "shareable")) {
1217 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1218 ret
= rdtgroup_locksetup_exit(rdtgrp
);
1222 rdtgrp
->mode
= RDT_MODE_SHAREABLE
;
1223 } else if (!strcmp(buf
, "exclusive")) {
1224 if (!rdtgroup_mode_test_exclusive(rdtgrp
)) {
1228 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1229 ret
= rdtgroup_locksetup_exit(rdtgrp
);
1233 rdtgrp
->mode
= RDT_MODE_EXCLUSIVE
;
1234 } else if (!strcmp(buf
, "pseudo-locksetup")) {
1235 ret
= rdtgroup_locksetup_enter(rdtgrp
);
1238 rdtgrp
->mode
= RDT_MODE_PSEUDO_LOCKSETUP
;
1240 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1245 rdtgroup_kn_unlock(of
->kn
);
1246 return ret
?: nbytes
;
1250 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1251 * @r: RDT resource to which @d belongs.
1252 * @d: RDT domain instance.
1253 * @cbm: bitmask for which the size should be computed.
1255 * The bitmask provided associated with the RDT domain instance @d will be
1256 * translated into how many bytes it represents. The size in bytes is
1257 * computed by first dividing the total cache size by the CBM length to
1258 * determine how many bytes each bit in the bitmask represents. The result
1259 * is multiplied with the number of bits set in the bitmask.
1261 * @cbm is unsigned long, even if only 32 bits are used to make the
1262 * bitmap functions work correctly.
1264 unsigned int rdtgroup_cbm_to_size(struct rdt_resource
*r
,
1265 struct rdt_domain
*d
, unsigned long cbm
)
1267 struct cpu_cacheinfo
*ci
;
1268 unsigned int size
= 0;
1271 num_b
= bitmap_weight(&cbm
, r
->cache
.cbm_len
);
1272 ci
= get_cpu_cacheinfo(cpumask_any(&d
->cpu_mask
));
1273 for (i
= 0; i
< ci
->num_leaves
; i
++) {
1274 if (ci
->info_list
[i
].level
== r
->cache_level
) {
1275 size
= ci
->info_list
[i
].size
/ r
->cache
.cbm_len
* num_b
;
1284 * rdtgroup_size_show - Display size in bytes of allocated regions
1286 * The "size" file mirrors the layout of the "schemata" file, printing the
1287 * size in bytes of each region instead of the capacity bitmask.
1290 static int rdtgroup_size_show(struct kernfs_open_file
*of
,
1291 struct seq_file
*s
, void *v
)
1293 struct rdtgroup
*rdtgrp
;
1294 struct rdt_resource
*r
;
1295 struct rdt_domain
*d
;
1301 rdtgrp
= rdtgroup_kn_lock_live(of
->kn
);
1303 rdtgroup_kn_unlock(of
->kn
);
1307 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
) {
1308 if (!rdtgrp
->plr
->d
) {
1309 rdt_last_cmd_clear();
1310 rdt_last_cmd_puts("Cache domain offline\n");
1313 seq_printf(s
, "%*s:", max_name_width
,
1314 rdtgrp
->plr
->r
->name
);
1315 size
= rdtgroup_cbm_to_size(rdtgrp
->plr
->r
,
1318 seq_printf(s
, "%d=%u\n", rdtgrp
->plr
->d
->id
, size
);
1323 for_each_alloc_enabled_rdt_resource(r
) {
1325 seq_printf(s
, "%*s:", max_name_width
, r
->name
);
1326 list_for_each_entry(d
, &r
->domains
, list
) {
1329 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1332 ctrl
= (!is_mba_sc(r
) ?
1333 d
->ctrl_val
[rdtgrp
->closid
] :
1334 d
->mbps_val
[rdtgrp
->closid
]);
1335 if (r
->rid
== RDT_RESOURCE_MBA
)
1338 size
= rdtgroup_cbm_to_size(r
, d
, ctrl
);
1340 seq_printf(s
, "%d=%u", d
->id
, size
);
1347 rdtgroup_kn_unlock(of
->kn
);
1352 /* rdtgroup information files for one cache resource. */
1353 static struct rftype res_common_files
[] = {
1355 .name
= "last_cmd_status",
1357 .kf_ops
= &rdtgroup_kf_single_ops
,
1358 .seq_show
= rdt_last_cmd_status_show
,
1359 .fflags
= RF_TOP_INFO
,
1362 .name
= "num_closids",
1364 .kf_ops
= &rdtgroup_kf_single_ops
,
1365 .seq_show
= rdt_num_closids_show
,
1366 .fflags
= RF_CTRL_INFO
,
1369 .name
= "mon_features",
1371 .kf_ops
= &rdtgroup_kf_single_ops
,
1372 .seq_show
= rdt_mon_features_show
,
1373 .fflags
= RF_MON_INFO
,
1376 .name
= "num_rmids",
1378 .kf_ops
= &rdtgroup_kf_single_ops
,
1379 .seq_show
= rdt_num_rmids_show
,
1380 .fflags
= RF_MON_INFO
,
1385 .kf_ops
= &rdtgroup_kf_single_ops
,
1386 .seq_show
= rdt_default_ctrl_show
,
1387 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1390 .name
= "min_cbm_bits",
1392 .kf_ops
= &rdtgroup_kf_single_ops
,
1393 .seq_show
= rdt_min_cbm_bits_show
,
1394 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1397 .name
= "shareable_bits",
1399 .kf_ops
= &rdtgroup_kf_single_ops
,
1400 .seq_show
= rdt_shareable_bits_show
,
1401 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1404 .name
= "bit_usage",
1406 .kf_ops
= &rdtgroup_kf_single_ops
,
1407 .seq_show
= rdt_bit_usage_show
,
1408 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_CACHE
,
1411 .name
= "min_bandwidth",
1413 .kf_ops
= &rdtgroup_kf_single_ops
,
1414 .seq_show
= rdt_min_bw_show
,
1415 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_MB
,
1418 .name
= "bandwidth_gran",
1420 .kf_ops
= &rdtgroup_kf_single_ops
,
1421 .seq_show
= rdt_bw_gran_show
,
1422 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_MB
,
1425 .name
= "delay_linear",
1427 .kf_ops
= &rdtgroup_kf_single_ops
,
1428 .seq_show
= rdt_delay_linear_show
,
1429 .fflags
= RF_CTRL_INFO
| RFTYPE_RES_MB
,
1432 .name
= "max_threshold_occupancy",
1434 .kf_ops
= &rdtgroup_kf_single_ops
,
1435 .write
= max_threshold_occ_write
,
1436 .seq_show
= max_threshold_occ_show
,
1437 .fflags
= RF_MON_INFO
| RFTYPE_RES_CACHE
,
1442 .kf_ops
= &rdtgroup_kf_single_ops
,
1443 .write
= rdtgroup_cpus_write
,
1444 .seq_show
= rdtgroup_cpus_show
,
1445 .fflags
= RFTYPE_BASE
,
1448 .name
= "cpus_list",
1450 .kf_ops
= &rdtgroup_kf_single_ops
,
1451 .write
= rdtgroup_cpus_write
,
1452 .seq_show
= rdtgroup_cpus_show
,
1453 .flags
= RFTYPE_FLAGS_CPUS_LIST
,
1454 .fflags
= RFTYPE_BASE
,
1459 .kf_ops
= &rdtgroup_kf_single_ops
,
1460 .write
= rdtgroup_tasks_write
,
1461 .seq_show
= rdtgroup_tasks_show
,
1462 .fflags
= RFTYPE_BASE
,
1467 .kf_ops
= &rdtgroup_kf_single_ops
,
1468 .write
= rdtgroup_schemata_write
,
1469 .seq_show
= rdtgroup_schemata_show
,
1470 .fflags
= RF_CTRL_BASE
,
1475 .kf_ops
= &rdtgroup_kf_single_ops
,
1476 .write
= rdtgroup_mode_write
,
1477 .seq_show
= rdtgroup_mode_show
,
1478 .fflags
= RF_CTRL_BASE
,
1483 .kf_ops
= &rdtgroup_kf_single_ops
,
1484 .seq_show
= rdtgroup_size_show
,
1485 .fflags
= RF_CTRL_BASE
,
1490 static int rdtgroup_add_files(struct kernfs_node
*kn
, unsigned long fflags
)
1492 struct rftype
*rfts
, *rft
;
1495 rfts
= res_common_files
;
1496 len
= ARRAY_SIZE(res_common_files
);
1498 lockdep_assert_held(&rdtgroup_mutex
);
1500 for (rft
= rfts
; rft
< rfts
+ len
; rft
++) {
1501 if ((fflags
& rft
->fflags
) == rft
->fflags
) {
1502 ret
= rdtgroup_add_file(kn
, rft
);
1510 pr_warn("Failed to add %s, err=%d\n", rft
->name
, ret
);
1511 while (--rft
>= rfts
) {
1512 if ((fflags
& rft
->fflags
) == rft
->fflags
)
1513 kernfs_remove_by_name(kn
, rft
->name
);
1519 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1520 * @r: The resource group with which the file is associated.
1521 * @name: Name of the file
1523 * The permissions of named resctrl file, directory, or link are modified
1524 * to not allow read, write, or execute by any user.
1526 * WARNING: This function is intended to communicate to the user that the
1527 * resctrl file has been locked down - that it is not relevant to the
1528 * particular state the system finds itself in. It should not be relied
1529 * on to protect from user access because after the file's permissions
1530 * are restricted the user can still change the permissions using chmod
1531 * from the command line.
1533 * Return: 0 on success, <0 on failure.
1535 int rdtgroup_kn_mode_restrict(struct rdtgroup
*r
, const char *name
)
1537 struct iattr iattr
= {.ia_valid
= ATTR_MODE
,};
1538 struct kernfs_node
*kn
;
1541 kn
= kernfs_find_and_get_ns(r
->kn
, name
, NULL
);
1545 switch (kernfs_type(kn
)) {
1547 iattr
.ia_mode
= S_IFDIR
;
1550 iattr
.ia_mode
= S_IFREG
;
1553 iattr
.ia_mode
= S_IFLNK
;
1557 ret
= kernfs_setattr(kn
, &iattr
);
1563 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1564 * @r: The resource group with which the file is associated.
1565 * @name: Name of the file
1566 * @mask: Mask of permissions that should be restored
1568 * Restore the permissions of the named file. If @name is a directory the
1569 * permissions of its parent will be used.
1571 * Return: 0 on success, <0 on failure.
1573 int rdtgroup_kn_mode_restore(struct rdtgroup
*r
, const char *name
,
1576 struct iattr iattr
= {.ia_valid
= ATTR_MODE
,};
1577 struct kernfs_node
*kn
, *parent
;
1578 struct rftype
*rfts
, *rft
;
1581 rfts
= res_common_files
;
1582 len
= ARRAY_SIZE(res_common_files
);
1584 for (rft
= rfts
; rft
< rfts
+ len
; rft
++) {
1585 if (!strcmp(rft
->name
, name
))
1586 iattr
.ia_mode
= rft
->mode
& mask
;
1589 kn
= kernfs_find_and_get_ns(r
->kn
, name
, NULL
);
1593 switch (kernfs_type(kn
)) {
1595 parent
= kernfs_get_parent(kn
);
1597 iattr
.ia_mode
|= parent
->mode
;
1600 iattr
.ia_mode
|= S_IFDIR
;
1603 iattr
.ia_mode
|= S_IFREG
;
1606 iattr
.ia_mode
|= S_IFLNK
;
1610 ret
= kernfs_setattr(kn
, &iattr
);
1615 static int rdtgroup_mkdir_info_resdir(struct rdt_resource
*r
, char *name
,
1616 unsigned long fflags
)
1618 struct kernfs_node
*kn_subdir
;
1621 kn_subdir
= kernfs_create_dir(kn_info
, name
,
1623 if (IS_ERR(kn_subdir
))
1624 return PTR_ERR(kn_subdir
);
1626 kernfs_get(kn_subdir
);
1627 ret
= rdtgroup_kn_set_ugid(kn_subdir
);
1631 ret
= rdtgroup_add_files(kn_subdir
, fflags
);
1633 kernfs_activate(kn_subdir
);
1638 static int rdtgroup_create_info_dir(struct kernfs_node
*parent_kn
)
1640 struct rdt_resource
*r
;
1641 unsigned long fflags
;
1645 /* create the directory */
1646 kn_info
= kernfs_create_dir(parent_kn
, "info", parent_kn
->mode
, NULL
);
1647 if (IS_ERR(kn_info
))
1648 return PTR_ERR(kn_info
);
1649 kernfs_get(kn_info
);
1651 ret
= rdtgroup_add_files(kn_info
, RF_TOP_INFO
);
1655 for_each_alloc_enabled_rdt_resource(r
) {
1656 fflags
= r
->fflags
| RF_CTRL_INFO
;
1657 ret
= rdtgroup_mkdir_info_resdir(r
, r
->name
, fflags
);
1662 for_each_mon_enabled_rdt_resource(r
) {
1663 fflags
= r
->fflags
| RF_MON_INFO
;
1664 sprintf(name
, "%s_MON", r
->name
);
1665 ret
= rdtgroup_mkdir_info_resdir(r
, name
, fflags
);
1671 * This extra ref will be put in kernfs_remove() and guarantees
1672 * that @rdtgrp->kn is always accessible.
1674 kernfs_get(kn_info
);
1676 ret
= rdtgroup_kn_set_ugid(kn_info
);
1680 kernfs_activate(kn_info
);
1685 kernfs_remove(kn_info
);
1690 mongroup_create_dir(struct kernfs_node
*parent_kn
, struct rdtgroup
*prgrp
,
1691 char *name
, struct kernfs_node
**dest_kn
)
1693 struct kernfs_node
*kn
;
1696 /* create the directory */
1697 kn
= kernfs_create_dir(parent_kn
, name
, parent_kn
->mode
, prgrp
);
1705 * This extra ref will be put in kernfs_remove() and guarantees
1706 * that @rdtgrp->kn is always accessible.
1710 ret
= rdtgroup_kn_set_ugid(kn
);
1714 kernfs_activate(kn
);
1723 static void l3_qos_cfg_update(void *arg
)
1727 wrmsrl(MSR_IA32_L3_QOS_CFG
, *enable
? L3_QOS_CDP_ENABLE
: 0ULL);
1730 static void l2_qos_cfg_update(void *arg
)
1734 wrmsrl(MSR_IA32_L2_QOS_CFG
, *enable
? L2_QOS_CDP_ENABLE
: 0ULL);
1737 static inline bool is_mba_linear(void)
1739 return rdt_resources_all
[RDT_RESOURCE_MBA
].membw
.delay_linear
;
1742 static int set_cache_qos_cfg(int level
, bool enable
)
1744 void (*update
)(void *arg
);
1745 struct rdt_resource
*r_l
;
1746 cpumask_var_t cpu_mask
;
1747 struct rdt_domain
*d
;
1750 if (!zalloc_cpumask_var(&cpu_mask
, GFP_KERNEL
))
1753 if (level
== RDT_RESOURCE_L3
)
1754 update
= l3_qos_cfg_update
;
1755 else if (level
== RDT_RESOURCE_L2
)
1756 update
= l2_qos_cfg_update
;
1760 r_l
= &rdt_resources_all
[level
];
1761 list_for_each_entry(d
, &r_l
->domains
, list
) {
1762 /* Pick one CPU from each domain instance to update MSR */
1763 cpumask_set_cpu(cpumask_any(&d
->cpu_mask
), cpu_mask
);
1766 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1767 if (cpumask_test_cpu(cpu
, cpu_mask
))
1769 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1770 smp_call_function_many(cpu_mask
, update
, &enable
, 1);
1773 free_cpumask_var(cpu_mask
);
1779 * Enable or disable the MBA software controller
1780 * which helps user specify bandwidth in MBps.
1781 * MBA software controller is supported only if
1782 * MBM is supported and MBA is in linear scale.
1784 static int set_mba_sc(bool mba_sc
)
1786 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_MBA
];
1787 struct rdt_domain
*d
;
1789 if (!is_mbm_enabled() || !is_mba_linear() ||
1790 mba_sc
== is_mba_sc(r
))
1793 r
->membw
.mba_sc
= mba_sc
;
1794 list_for_each_entry(d
, &r
->domains
, list
)
1795 setup_default_ctrlval(r
, d
->ctrl_val
, d
->mbps_val
);
1800 static int cdp_enable(int level
, int data_type
, int code_type
)
1802 struct rdt_resource
*r_ldata
= &rdt_resources_all
[data_type
];
1803 struct rdt_resource
*r_lcode
= &rdt_resources_all
[code_type
];
1804 struct rdt_resource
*r_l
= &rdt_resources_all
[level
];
1807 if (!r_l
->alloc_capable
|| !r_ldata
->alloc_capable
||
1808 !r_lcode
->alloc_capable
)
1811 ret
= set_cache_qos_cfg(level
, true);
1813 r_l
->alloc_enabled
= false;
1814 r_ldata
->alloc_enabled
= true;
1815 r_lcode
->alloc_enabled
= true;
1820 static int cdpl3_enable(void)
1822 return cdp_enable(RDT_RESOURCE_L3
, RDT_RESOURCE_L3DATA
,
1823 RDT_RESOURCE_L3CODE
);
1826 static int cdpl2_enable(void)
1828 return cdp_enable(RDT_RESOURCE_L2
, RDT_RESOURCE_L2DATA
,
1829 RDT_RESOURCE_L2CODE
);
1832 static void cdp_disable(int level
, int data_type
, int code_type
)
1834 struct rdt_resource
*r
= &rdt_resources_all
[level
];
1836 r
->alloc_enabled
= r
->alloc_capable
;
1838 if (rdt_resources_all
[data_type
].alloc_enabled
) {
1839 rdt_resources_all
[data_type
].alloc_enabled
= false;
1840 rdt_resources_all
[code_type
].alloc_enabled
= false;
1841 set_cache_qos_cfg(level
, false);
1845 static void cdpl3_disable(void)
1847 cdp_disable(RDT_RESOURCE_L3
, RDT_RESOURCE_L3DATA
, RDT_RESOURCE_L3CODE
);
1850 static void cdpl2_disable(void)
1852 cdp_disable(RDT_RESOURCE_L2
, RDT_RESOURCE_L2DATA
, RDT_RESOURCE_L2CODE
);
1855 static void cdp_disable_all(void)
1857 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].alloc_enabled
)
1859 if (rdt_resources_all
[RDT_RESOURCE_L2DATA
].alloc_enabled
)
1864 * We don't allow rdtgroup directories to be created anywhere
1865 * except the root directory. Thus when looking for the rdtgroup
1866 * structure for a kernfs node we are either looking at a directory,
1867 * in which case the rdtgroup structure is pointed at by the "priv"
1868 * field, otherwise we have a file, and need only look to the parent
1869 * to find the rdtgroup.
1871 static struct rdtgroup
*kernfs_to_rdtgroup(struct kernfs_node
*kn
)
1873 if (kernfs_type(kn
) == KERNFS_DIR
) {
1875 * All the resource directories use "kn->priv"
1876 * to point to the "struct rdtgroup" for the
1877 * resource. "info" and its subdirectories don't
1878 * have rdtgroup structures, so return NULL here.
1880 if (kn
== kn_info
|| kn
->parent
== kn_info
)
1885 return kn
->parent
->priv
;
1889 struct rdtgroup
*rdtgroup_kn_lock_live(struct kernfs_node
*kn
)
1891 struct rdtgroup
*rdtgrp
= kernfs_to_rdtgroup(kn
);
1896 atomic_inc(&rdtgrp
->waitcount
);
1897 kernfs_break_active_protection(kn
);
1899 mutex_lock(&rdtgroup_mutex
);
1901 /* Was this group deleted while we waited? */
1902 if (rdtgrp
->flags
& RDT_DELETED
)
1908 void rdtgroup_kn_unlock(struct kernfs_node
*kn
)
1910 struct rdtgroup
*rdtgrp
= kernfs_to_rdtgroup(kn
);
1915 mutex_unlock(&rdtgroup_mutex
);
1917 if (atomic_dec_and_test(&rdtgrp
->waitcount
) &&
1918 (rdtgrp
->flags
& RDT_DELETED
)) {
1919 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
1920 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
)
1921 rdtgroup_pseudo_lock_remove(rdtgrp
);
1922 kernfs_unbreak_active_protection(kn
);
1923 kernfs_put(rdtgrp
->kn
);
1926 kernfs_unbreak_active_protection(kn
);
1930 static int mkdir_mondata_all(struct kernfs_node
*parent_kn
,
1931 struct rdtgroup
*prgrp
,
1932 struct kernfs_node
**mon_data_kn
);
1934 static int rdt_enable_ctx(struct rdt_fs_context
*ctx
)
1938 if (ctx
->enable_cdpl2
)
1939 ret
= cdpl2_enable();
1941 if (!ret
&& ctx
->enable_cdpl3
)
1942 ret
= cdpl3_enable();
1944 if (!ret
&& ctx
->enable_mba_mbps
)
1945 ret
= set_mba_sc(true);
1950 static int rdt_get_tree(struct fs_context
*fc
)
1952 struct rdt_fs_context
*ctx
= rdt_fc2context(fc
);
1953 struct rdt_domain
*dom
;
1954 struct rdt_resource
*r
;
1958 mutex_lock(&rdtgroup_mutex
);
1960 * resctrl file system can only be mounted once.
1962 if (static_branch_unlikely(&rdt_enable_key
)) {
1967 ret
= rdt_enable_ctx(ctx
);
1973 ret
= rdtgroup_create_info_dir(rdtgroup_default
.kn
);
1977 if (rdt_mon_capable
) {
1978 ret
= mongroup_create_dir(rdtgroup_default
.kn
,
1983 kernfs_get(kn_mongrp
);
1985 ret
= mkdir_mondata_all(rdtgroup_default
.kn
,
1986 &rdtgroup_default
, &kn_mondata
);
1989 kernfs_get(kn_mondata
);
1990 rdtgroup_default
.mon
.mon_data_kn
= kn_mondata
;
1993 ret
= rdt_pseudo_lock_init();
1997 ret
= kernfs_get_tree(fc
);
2001 if (rdt_alloc_capable
)
2002 static_branch_enable_cpuslocked(&rdt_alloc_enable_key
);
2003 if (rdt_mon_capable
)
2004 static_branch_enable_cpuslocked(&rdt_mon_enable_key
);
2006 if (rdt_alloc_capable
|| rdt_mon_capable
)
2007 static_branch_enable_cpuslocked(&rdt_enable_key
);
2009 if (is_mbm_enabled()) {
2010 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
2011 list_for_each_entry(dom
, &r
->domains
, list
)
2012 mbm_setup_overflow_handler(dom
, MBM_OVERFLOW_INTERVAL
);
2018 rdt_pseudo_lock_release();
2020 if (rdt_mon_capable
)
2021 kernfs_remove(kn_mondata
);
2023 if (rdt_mon_capable
)
2024 kernfs_remove(kn_mongrp
);
2026 kernfs_remove(kn_info
);
2028 if (ctx
->enable_mba_mbps
)
2033 rdt_last_cmd_clear();
2034 mutex_unlock(&rdtgroup_mutex
);
2046 static const struct fs_parameter_spec rdt_param_specs
[] = {
2047 fsparam_flag("cdp", Opt_cdp
),
2048 fsparam_flag("cdpl2", Opt_cdpl2
),
2049 fsparam_flag("mba_MBps", Opt_mba_mbps
),
2053 static const struct fs_parameter_description rdt_fs_parameters
= {
2055 .specs
= rdt_param_specs
,
2058 static int rdt_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
2060 struct rdt_fs_context
*ctx
= rdt_fc2context(fc
);
2061 struct fs_parse_result result
;
2064 opt
= fs_parse(fc
, &rdt_fs_parameters
, param
, &result
);
2070 ctx
->enable_cdpl3
= true;
2073 ctx
->enable_cdpl2
= true;
2076 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
2078 ctx
->enable_mba_mbps
= true;
2085 static void rdt_fs_context_free(struct fs_context
*fc
)
2087 struct rdt_fs_context
*ctx
= rdt_fc2context(fc
);
2089 kernfs_free_fs_context(fc
);
2093 static const struct fs_context_operations rdt_fs_context_ops
= {
2094 .free
= rdt_fs_context_free
,
2095 .parse_param
= rdt_parse_param
,
2096 .get_tree
= rdt_get_tree
,
2099 static int rdt_init_fs_context(struct fs_context
*fc
)
2101 struct rdt_fs_context
*ctx
;
2103 ctx
= kzalloc(sizeof(struct rdt_fs_context
), GFP_KERNEL
);
2107 ctx
->kfc
.root
= rdt_root
;
2108 ctx
->kfc
.magic
= RDTGROUP_SUPER_MAGIC
;
2109 fc
->fs_private
= &ctx
->kfc
;
2110 fc
->ops
= &rdt_fs_context_ops
;
2112 put_user_ns(fc
->user_ns
);
2113 fc
->user_ns
= get_user_ns(&init_user_ns
);
2118 static int reset_all_ctrls(struct rdt_resource
*r
)
2120 struct msr_param msr_param
;
2121 cpumask_var_t cpu_mask
;
2122 struct rdt_domain
*d
;
2125 if (!zalloc_cpumask_var(&cpu_mask
, GFP_KERNEL
))
2130 msr_param
.high
= r
->num_closid
;
2133 * Disable resource control for this resource by setting all
2134 * CBMs in all domains to the maximum mask value. Pick one CPU
2135 * from each domain to update the MSRs below.
2137 list_for_each_entry(d
, &r
->domains
, list
) {
2138 cpumask_set_cpu(cpumask_any(&d
->cpu_mask
), cpu_mask
);
2140 for (i
= 0; i
< r
->num_closid
; i
++)
2141 d
->ctrl_val
[i
] = r
->default_ctrl
;
2144 /* Update CBM on this cpu if it's in cpu_mask. */
2145 if (cpumask_test_cpu(cpu
, cpu_mask
))
2146 rdt_ctrl_update(&msr_param
);
2147 /* Update CBM on all other cpus in cpu_mask. */
2148 smp_call_function_many(cpu_mask
, rdt_ctrl_update
, &msr_param
, 1);
2151 free_cpumask_var(cpu_mask
);
2156 static bool is_closid_match(struct task_struct
*t
, struct rdtgroup
*r
)
2158 return (rdt_alloc_capable
&&
2159 (r
->type
== RDTCTRL_GROUP
) && (t
->closid
== r
->closid
));
2162 static bool is_rmid_match(struct task_struct
*t
, struct rdtgroup
*r
)
2164 return (rdt_mon_capable
&&
2165 (r
->type
== RDTMON_GROUP
) && (t
->rmid
== r
->mon
.rmid
));
2169 * Move tasks from one to the other group. If @from is NULL, then all tasks
2170 * in the systems are moved unconditionally (used for teardown).
2172 * If @mask is not NULL the cpus on which moved tasks are running are set
2173 * in that mask so the update smp function call is restricted to affected
2176 static void rdt_move_group_tasks(struct rdtgroup
*from
, struct rdtgroup
*to
,
2177 struct cpumask
*mask
)
2179 struct task_struct
*p
, *t
;
2181 read_lock(&tasklist_lock
);
2182 for_each_process_thread(p
, t
) {
2183 if (!from
|| is_closid_match(t
, from
) ||
2184 is_rmid_match(t
, from
)) {
2185 t
->closid
= to
->closid
;
2186 t
->rmid
= to
->mon
.rmid
;
2190 * This is safe on x86 w/o barriers as the ordering
2191 * of writing to task_cpu() and t->on_cpu is
2192 * reverse to the reading here. The detection is
2193 * inaccurate as tasks might move or schedule
2194 * before the smp function call takes place. In
2195 * such a case the function call is pointless, but
2196 * there is no other side effect.
2198 if (mask
&& t
->on_cpu
)
2199 cpumask_set_cpu(task_cpu(t
), mask
);
2203 read_unlock(&tasklist_lock
);
2206 static void free_all_child_rdtgrp(struct rdtgroup
*rdtgrp
)
2208 struct rdtgroup
*sentry
, *stmp
;
2209 struct list_head
*head
;
2211 head
= &rdtgrp
->mon
.crdtgrp_list
;
2212 list_for_each_entry_safe(sentry
, stmp
, head
, mon
.crdtgrp_list
) {
2213 free_rmid(sentry
->mon
.rmid
);
2214 list_del(&sentry
->mon
.crdtgrp_list
);
2220 * Forcibly remove all of subdirectories under root.
2222 static void rmdir_all_sub(void)
2224 struct rdtgroup
*rdtgrp
, *tmp
;
2226 /* Move all tasks to the default resource group */
2227 rdt_move_group_tasks(NULL
, &rdtgroup_default
, NULL
);
2229 list_for_each_entry_safe(rdtgrp
, tmp
, &rdt_all_groups
, rdtgroup_list
) {
2230 /* Free any child rmids */
2231 free_all_child_rdtgrp(rdtgrp
);
2233 /* Remove each rdtgroup other than root */
2234 if (rdtgrp
== &rdtgroup_default
)
2237 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
2238 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
)
2239 rdtgroup_pseudo_lock_remove(rdtgrp
);
2242 * Give any CPUs back to the default group. We cannot copy
2243 * cpu_online_mask because a CPU might have executed the
2244 * offline callback already, but is still marked online.
2246 cpumask_or(&rdtgroup_default
.cpu_mask
,
2247 &rdtgroup_default
.cpu_mask
, &rdtgrp
->cpu_mask
);
2249 free_rmid(rdtgrp
->mon
.rmid
);
2251 kernfs_remove(rdtgrp
->kn
);
2252 list_del(&rdtgrp
->rdtgroup_list
);
2255 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2256 update_closid_rmid(cpu_online_mask
, &rdtgroup_default
);
2258 kernfs_remove(kn_info
);
2259 kernfs_remove(kn_mongrp
);
2260 kernfs_remove(kn_mondata
);
2263 static void rdt_kill_sb(struct super_block
*sb
)
2265 struct rdt_resource
*r
;
2268 mutex_lock(&rdtgroup_mutex
);
2272 /*Put everything back to default values. */
2273 for_each_alloc_enabled_rdt_resource(r
)
2277 rdt_pseudo_lock_release();
2278 rdtgroup_default
.mode
= RDT_MODE_SHAREABLE
;
2279 static_branch_disable_cpuslocked(&rdt_alloc_enable_key
);
2280 static_branch_disable_cpuslocked(&rdt_mon_enable_key
);
2281 static_branch_disable_cpuslocked(&rdt_enable_key
);
2283 mutex_unlock(&rdtgroup_mutex
);
2287 static struct file_system_type rdt_fs_type
= {
2289 .init_fs_context
= rdt_init_fs_context
,
2290 .parameters
= &rdt_fs_parameters
,
2291 .kill_sb
= rdt_kill_sb
,
2294 static int mon_addfile(struct kernfs_node
*parent_kn
, const char *name
,
2297 struct kernfs_node
*kn
;
2300 kn
= __kernfs_create_file(parent_kn
, name
, 0444,
2301 GLOBAL_ROOT_UID
, GLOBAL_ROOT_GID
, 0,
2302 &kf_mondata_ops
, priv
, NULL
, NULL
);
2306 ret
= rdtgroup_kn_set_ugid(kn
);
2316 * Remove all subdirectories of mon_data of ctrl_mon groups
2317 * and monitor groups with given domain id.
2319 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource
*r
, unsigned int dom_id
)
2321 struct rdtgroup
*prgrp
, *crgrp
;
2324 if (!r
->mon_enabled
)
2327 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
2328 sprintf(name
, "mon_%s_%02d", r
->name
, dom_id
);
2329 kernfs_remove_by_name(prgrp
->mon
.mon_data_kn
, name
);
2331 list_for_each_entry(crgrp
, &prgrp
->mon
.crdtgrp_list
, mon
.crdtgrp_list
)
2332 kernfs_remove_by_name(crgrp
->mon
.mon_data_kn
, name
);
2336 static int mkdir_mondata_subdir(struct kernfs_node
*parent_kn
,
2337 struct rdt_domain
*d
,
2338 struct rdt_resource
*r
, struct rdtgroup
*prgrp
)
2340 union mon_data_bits priv
;
2341 struct kernfs_node
*kn
;
2342 struct mon_evt
*mevt
;
2343 struct rmid_read rr
;
2347 sprintf(name
, "mon_%s_%02d", r
->name
, d
->id
);
2348 /* create the directory */
2349 kn
= kernfs_create_dir(parent_kn
, name
, parent_kn
->mode
, prgrp
);
2354 * This extra ref will be put in kernfs_remove() and guarantees
2355 * that kn is always accessible.
2358 ret
= rdtgroup_kn_set_ugid(kn
);
2362 if (WARN_ON(list_empty(&r
->evt_list
))) {
2367 priv
.u
.rid
= r
->rid
;
2368 priv
.u
.domid
= d
->id
;
2369 list_for_each_entry(mevt
, &r
->evt_list
, list
) {
2370 priv
.u
.evtid
= mevt
->evtid
;
2371 ret
= mon_addfile(kn
, mevt
->name
, priv
.priv
);
2375 if (is_mbm_event(mevt
->evtid
))
2376 mon_event_read(&rr
, d
, prgrp
, mevt
->evtid
, true);
2378 kernfs_activate(kn
);
2387 * Add all subdirectories of mon_data for "ctrl_mon" groups
2388 * and "monitor" groups with given domain id.
2390 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource
*r
,
2391 struct rdt_domain
*d
)
2393 struct kernfs_node
*parent_kn
;
2394 struct rdtgroup
*prgrp
, *crgrp
;
2395 struct list_head
*head
;
2397 if (!r
->mon_enabled
)
2400 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
2401 parent_kn
= prgrp
->mon
.mon_data_kn
;
2402 mkdir_mondata_subdir(parent_kn
, d
, r
, prgrp
);
2404 head
= &prgrp
->mon
.crdtgrp_list
;
2405 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
) {
2406 parent_kn
= crgrp
->mon
.mon_data_kn
;
2407 mkdir_mondata_subdir(parent_kn
, d
, r
, crgrp
);
2412 static int mkdir_mondata_subdir_alldom(struct kernfs_node
*parent_kn
,
2413 struct rdt_resource
*r
,
2414 struct rdtgroup
*prgrp
)
2416 struct rdt_domain
*dom
;
2419 list_for_each_entry(dom
, &r
->domains
, list
) {
2420 ret
= mkdir_mondata_subdir(parent_kn
, dom
, r
, prgrp
);
2429 * This creates a directory mon_data which contains the monitored data.
2431 * mon_data has one directory for each domain whic are named
2432 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2433 * with L3 domain looks as below:
2440 * Each domain directory has one file per event:
2445 static int mkdir_mondata_all(struct kernfs_node
*parent_kn
,
2446 struct rdtgroup
*prgrp
,
2447 struct kernfs_node
**dest_kn
)
2449 struct rdt_resource
*r
;
2450 struct kernfs_node
*kn
;
2454 * Create the mon_data directory first.
2456 ret
= mongroup_create_dir(parent_kn
, NULL
, "mon_data", &kn
);
2464 * Create the subdirectories for each domain. Note that all events
2465 * in a domain like L3 are grouped into a resource whose domain is L3
2467 for_each_mon_enabled_rdt_resource(r
) {
2468 ret
= mkdir_mondata_subdir_alldom(kn
, r
, prgrp
);
2481 * cbm_ensure_valid - Enforce validity on provided CBM
2482 * @_val: Candidate CBM
2483 * @r: RDT resource to which the CBM belongs
2485 * The provided CBM represents all cache portions available for use. This
2486 * may be represented by a bitmap that does not consist of contiguous ones
2487 * and thus be an invalid CBM.
2488 * Here the provided CBM is forced to be a valid CBM by only considering
2489 * the first set of contiguous bits as valid and clearing all bits.
2490 * The intention here is to provide a valid default CBM with which a new
2491 * resource group is initialized. The user can follow this with a
2492 * modification to the CBM if the default does not satisfy the
2495 static void cbm_ensure_valid(u32
*_val
, struct rdt_resource
*r
)
2498 * Convert the u32 _val to an unsigned long required by all the bit
2499 * operations within this function. No more than 32 bits of this
2500 * converted value can be accessed because all bit operations are
2501 * additionally provided with cbm_len that is initialized during
2502 * hardware enumeration using five bits from the EAX register and
2503 * thus never can exceed 32 bits.
2505 unsigned long *val
= (unsigned long *)_val
;
2506 unsigned int cbm_len
= r
->cache
.cbm_len
;
2507 unsigned long first_bit
, zero_bit
;
2512 first_bit
= find_first_bit(val
, cbm_len
);
2513 zero_bit
= find_next_zero_bit(val
, cbm_len
, first_bit
);
2515 /* Clear any remaining bits to ensure contiguous region */
2516 bitmap_clear(val
, zero_bit
, cbm_len
- zero_bit
);
2520 * Initialize cache resources per RDT domain
2522 * Set the RDT domain up to start off with all usable allocations. That is,
2523 * all shareable and unused bits. All-zero CBM is invalid.
2525 static int __init_one_rdt_domain(struct rdt_domain
*d
, struct rdt_resource
*r
,
2528 struct rdt_resource
*r_cdp
= NULL
;
2529 struct rdt_domain
*d_cdp
= NULL
;
2530 u32 used_b
= 0, unused_b
= 0;
2531 unsigned long tmp_cbm
;
2532 enum rdtgrp_mode mode
;
2533 u32 peer_ctl
, *ctrl
;
2536 rdt_cdp_peer_get(r
, d
, &r_cdp
, &d_cdp
);
2537 d
->have_new_ctrl
= false;
2538 d
->new_ctrl
= r
->cache
.shareable_bits
;
2539 used_b
= r
->cache
.shareable_bits
;
2541 for (i
= 0; i
< closids_supported(); i
++, ctrl
++) {
2542 if (closid_allocated(i
) && i
!= closid
) {
2543 mode
= rdtgroup_mode_by_closid(i
);
2544 if (mode
== RDT_MODE_PSEUDO_LOCKSETUP
)
2547 * If CDP is active include peer domain's
2548 * usage to ensure there is no overlap
2549 * with an exclusive group.
2552 peer_ctl
= d_cdp
->ctrl_val
[i
];
2555 used_b
|= *ctrl
| peer_ctl
;
2556 if (mode
== RDT_MODE_SHAREABLE
)
2557 d
->new_ctrl
|= *ctrl
| peer_ctl
;
2560 if (d
->plr
&& d
->plr
->cbm
> 0)
2561 used_b
|= d
->plr
->cbm
;
2562 unused_b
= used_b
^ (BIT_MASK(r
->cache
.cbm_len
) - 1);
2563 unused_b
&= BIT_MASK(r
->cache
.cbm_len
) - 1;
2564 d
->new_ctrl
|= unused_b
;
2566 * Force the initial CBM to be valid, user can
2567 * modify the CBM based on system availability.
2569 cbm_ensure_valid(&d
->new_ctrl
, r
);
2571 * Assign the u32 CBM to an unsigned long to ensure that
2572 * bitmap_weight() does not access out-of-bound memory.
2574 tmp_cbm
= d
->new_ctrl
;
2575 if (bitmap_weight(&tmp_cbm
, r
->cache
.cbm_len
) < r
->cache
.min_cbm_bits
) {
2576 rdt_last_cmd_printf("No space on %s:%d\n", r
->name
, d
->id
);
2579 d
->have_new_ctrl
= true;
2585 * Initialize cache resources with default values.
2587 * A new RDT group is being created on an allocation capable (CAT)
2588 * supporting system. Set this group up to start off with all usable
2591 * If there are no more shareable bits available on any domain then
2592 * the entire allocation will fail.
2594 static int rdtgroup_init_cat(struct rdt_resource
*r
, u32 closid
)
2596 struct rdt_domain
*d
;
2599 list_for_each_entry(d
, &r
->domains
, list
) {
2600 ret
= __init_one_rdt_domain(d
, r
, closid
);
2608 /* Initialize MBA resource with default values. */
2609 static void rdtgroup_init_mba(struct rdt_resource
*r
)
2611 struct rdt_domain
*d
;
2613 list_for_each_entry(d
, &r
->domains
, list
) {
2614 d
->new_ctrl
= is_mba_sc(r
) ? MBA_MAX_MBPS
: r
->default_ctrl
;
2615 d
->have_new_ctrl
= true;
2619 /* Initialize the RDT group's allocations. */
2620 static int rdtgroup_init_alloc(struct rdtgroup
*rdtgrp
)
2622 struct rdt_resource
*r
;
2625 for_each_alloc_enabled_rdt_resource(r
) {
2626 if (r
->rid
== RDT_RESOURCE_MBA
) {
2627 rdtgroup_init_mba(r
);
2629 ret
= rdtgroup_init_cat(r
, rdtgrp
->closid
);
2634 ret
= update_domains(r
, rdtgrp
->closid
);
2636 rdt_last_cmd_puts("Failed to initialize allocations\n");
2642 rdtgrp
->mode
= RDT_MODE_SHAREABLE
;
2647 static int mkdir_rdt_prepare(struct kernfs_node
*parent_kn
,
2648 struct kernfs_node
*prgrp_kn
,
2649 const char *name
, umode_t mode
,
2650 enum rdt_group_type rtype
, struct rdtgroup
**r
)
2652 struct rdtgroup
*prdtgrp
, *rdtgrp
;
2653 struct kernfs_node
*kn
;
2657 prdtgrp
= rdtgroup_kn_lock_live(prgrp_kn
);
2658 rdt_last_cmd_clear();
2661 rdt_last_cmd_puts("Directory was removed\n");
2665 if (rtype
== RDTMON_GROUP
&&
2666 (prdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
2667 prdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
)) {
2669 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2673 /* allocate the rdtgroup. */
2674 rdtgrp
= kzalloc(sizeof(*rdtgrp
), GFP_KERNEL
);
2677 rdt_last_cmd_puts("Kernel out of memory\n");
2681 rdtgrp
->mon
.parent
= prdtgrp
;
2682 rdtgrp
->type
= rtype
;
2683 INIT_LIST_HEAD(&rdtgrp
->mon
.crdtgrp_list
);
2685 /* kernfs creates the directory for rdtgrp */
2686 kn
= kernfs_create_dir(parent_kn
, name
, mode
, rdtgrp
);
2689 rdt_last_cmd_puts("kernfs create error\n");
2695 * kernfs_remove() will drop the reference count on "kn" which
2696 * will free it. But we still need it to stick around for the
2697 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
2698 * here, which will be dropped inside rdtgroup_kn_unlock().
2702 ret
= rdtgroup_kn_set_ugid(kn
);
2704 rdt_last_cmd_puts("kernfs perm error\n");
2708 files
= RFTYPE_BASE
| BIT(RF_CTRLSHIFT
+ rtype
);
2709 ret
= rdtgroup_add_files(kn
, files
);
2711 rdt_last_cmd_puts("kernfs fill error\n");
2715 if (rdt_mon_capable
) {
2718 rdt_last_cmd_puts("Out of RMIDs\n");
2721 rdtgrp
->mon
.rmid
= ret
;
2723 ret
= mkdir_mondata_all(kn
, rdtgrp
, &rdtgrp
->mon
.mon_data_kn
);
2725 rdt_last_cmd_puts("kernfs subdir error\n");
2729 kernfs_activate(kn
);
2732 * The caller unlocks the prgrp_kn upon success.
2737 free_rmid(rdtgrp
->mon
.rmid
);
2739 kernfs_remove(rdtgrp
->kn
);
2743 rdtgroup_kn_unlock(prgrp_kn
);
2747 static void mkdir_rdt_prepare_clean(struct rdtgroup
*rgrp
)
2749 kernfs_remove(rgrp
->kn
);
2750 free_rmid(rgrp
->mon
.rmid
);
2755 * Create a monitor group under "mon_groups" directory of a control
2756 * and monitor group(ctrl_mon). This is a resource group
2757 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2759 static int rdtgroup_mkdir_mon(struct kernfs_node
*parent_kn
,
2760 struct kernfs_node
*prgrp_kn
,
2764 struct rdtgroup
*rdtgrp
, *prgrp
;
2767 ret
= mkdir_rdt_prepare(parent_kn
, prgrp_kn
, name
, mode
, RDTMON_GROUP
,
2772 prgrp
= rdtgrp
->mon
.parent
;
2773 rdtgrp
->closid
= prgrp
->closid
;
2776 * Add the rdtgrp to the list of rdtgrps the parent
2777 * ctrl_mon group has to track.
2779 list_add_tail(&rdtgrp
->mon
.crdtgrp_list
, &prgrp
->mon
.crdtgrp_list
);
2781 rdtgroup_kn_unlock(prgrp_kn
);
2786 * These are rdtgroups created under the root directory. Can be used
2787 * to allocate and monitor resources.
2789 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node
*parent_kn
,
2790 struct kernfs_node
*prgrp_kn
,
2791 const char *name
, umode_t mode
)
2793 struct rdtgroup
*rdtgrp
;
2794 struct kernfs_node
*kn
;
2798 ret
= mkdir_rdt_prepare(parent_kn
, prgrp_kn
, name
, mode
, RDTCTRL_GROUP
,
2804 ret
= closid_alloc();
2806 rdt_last_cmd_puts("Out of CLOSIDs\n");
2807 goto out_common_fail
;
2812 rdtgrp
->closid
= closid
;
2813 ret
= rdtgroup_init_alloc(rdtgrp
);
2817 list_add(&rdtgrp
->rdtgroup_list
, &rdt_all_groups
);
2819 if (rdt_mon_capable
) {
2821 * Create an empty mon_groups directory to hold the subset
2822 * of tasks and cpus to monitor.
2824 ret
= mongroup_create_dir(kn
, NULL
, "mon_groups", NULL
);
2826 rdt_last_cmd_puts("kernfs subdir error\n");
2834 list_del(&rdtgrp
->rdtgroup_list
);
2836 closid_free(closid
);
2838 mkdir_rdt_prepare_clean(rdtgrp
);
2840 rdtgroup_kn_unlock(prgrp_kn
);
2845 * We allow creating mon groups only with in a directory called "mon_groups"
2846 * which is present in every ctrl_mon group. Check if this is a valid
2847 * "mon_groups" directory.
2849 * 1. The directory should be named "mon_groups".
2850 * 2. The mon group itself should "not" be named "mon_groups".
2851 * This makes sure "mon_groups" directory always has a ctrl_mon group
2854 static bool is_mon_groups(struct kernfs_node
*kn
, const char *name
)
2856 return (!strcmp(kn
->name
, "mon_groups") &&
2857 strcmp(name
, "mon_groups"));
2860 static int rdtgroup_mkdir(struct kernfs_node
*parent_kn
, const char *name
,
2863 /* Do not accept '\n' to avoid unparsable situation. */
2864 if (strchr(name
, '\n'))
2868 * If the parent directory is the root directory and RDT
2869 * allocation is supported, add a control and monitoring
2872 if (rdt_alloc_capable
&& parent_kn
== rdtgroup_default
.kn
)
2873 return rdtgroup_mkdir_ctrl_mon(parent_kn
, parent_kn
, name
, mode
);
2876 * If RDT monitoring is supported and the parent directory is a valid
2877 * "mon_groups" directory, add a monitoring subdirectory.
2879 if (rdt_mon_capable
&& is_mon_groups(parent_kn
, name
))
2880 return rdtgroup_mkdir_mon(parent_kn
, parent_kn
->parent
, name
, mode
);
2885 static int rdtgroup_rmdir_mon(struct kernfs_node
*kn
, struct rdtgroup
*rdtgrp
,
2886 cpumask_var_t tmpmask
)
2888 struct rdtgroup
*prdtgrp
= rdtgrp
->mon
.parent
;
2891 /* Give any tasks back to the parent group */
2892 rdt_move_group_tasks(rdtgrp
, prdtgrp
, tmpmask
);
2894 /* Update per cpu rmid of the moved CPUs first */
2895 for_each_cpu(cpu
, &rdtgrp
->cpu_mask
)
2896 per_cpu(pqr_state
.default_rmid
, cpu
) = prdtgrp
->mon
.rmid
;
2898 * Update the MSR on moved CPUs and CPUs which have moved
2899 * task running on them.
2901 cpumask_or(tmpmask
, tmpmask
, &rdtgrp
->cpu_mask
);
2902 update_closid_rmid(tmpmask
, NULL
);
2904 rdtgrp
->flags
= RDT_DELETED
;
2905 free_rmid(rdtgrp
->mon
.rmid
);
2908 * Remove the rdtgrp from the parent ctrl_mon group's list
2910 WARN_ON(list_empty(&prdtgrp
->mon
.crdtgrp_list
));
2911 list_del(&rdtgrp
->mon
.crdtgrp_list
);
2914 * one extra hold on this, will drop when we kfree(rdtgrp)
2915 * in rdtgroup_kn_unlock()
2918 kernfs_remove(rdtgrp
->kn
);
2923 static int rdtgroup_ctrl_remove(struct kernfs_node
*kn
,
2924 struct rdtgroup
*rdtgrp
)
2926 rdtgrp
->flags
= RDT_DELETED
;
2927 list_del(&rdtgrp
->rdtgroup_list
);
2930 * one extra hold on this, will drop when we kfree(rdtgrp)
2931 * in rdtgroup_kn_unlock()
2934 kernfs_remove(rdtgrp
->kn
);
2938 static int rdtgroup_rmdir_ctrl(struct kernfs_node
*kn
, struct rdtgroup
*rdtgrp
,
2939 cpumask_var_t tmpmask
)
2943 /* Give any tasks back to the default group */
2944 rdt_move_group_tasks(rdtgrp
, &rdtgroup_default
, tmpmask
);
2946 /* Give any CPUs back to the default group */
2947 cpumask_or(&rdtgroup_default
.cpu_mask
,
2948 &rdtgroup_default
.cpu_mask
, &rdtgrp
->cpu_mask
);
2950 /* Update per cpu closid and rmid of the moved CPUs first */
2951 for_each_cpu(cpu
, &rdtgrp
->cpu_mask
) {
2952 per_cpu(pqr_state
.default_closid
, cpu
) = rdtgroup_default
.closid
;
2953 per_cpu(pqr_state
.default_rmid
, cpu
) = rdtgroup_default
.mon
.rmid
;
2957 * Update the MSR on moved CPUs and CPUs which have moved
2958 * task running on them.
2960 cpumask_or(tmpmask
, tmpmask
, &rdtgrp
->cpu_mask
);
2961 update_closid_rmid(tmpmask
, NULL
);
2963 closid_free(rdtgrp
->closid
);
2964 free_rmid(rdtgrp
->mon
.rmid
);
2967 * Free all the child monitor group rmids.
2969 free_all_child_rdtgrp(rdtgrp
);
2971 rdtgroup_ctrl_remove(kn
, rdtgrp
);
2976 static int rdtgroup_rmdir(struct kernfs_node
*kn
)
2978 struct kernfs_node
*parent_kn
= kn
->parent
;
2979 struct rdtgroup
*rdtgrp
;
2980 cpumask_var_t tmpmask
;
2983 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
2986 rdtgrp
= rdtgroup_kn_lock_live(kn
);
2993 * If the rdtgroup is a ctrl_mon group and parent directory
2994 * is the root directory, remove the ctrl_mon group.
2996 * If the rdtgroup is a mon group and parent directory
2997 * is a valid "mon_groups" directory, remove the mon group.
2999 if (rdtgrp
->type
== RDTCTRL_GROUP
&& parent_kn
== rdtgroup_default
.kn
) {
3000 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
||
3001 rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKED
) {
3002 ret
= rdtgroup_ctrl_remove(kn
, rdtgrp
);
3004 ret
= rdtgroup_rmdir_ctrl(kn
, rdtgrp
, tmpmask
);
3006 } else if (rdtgrp
->type
== RDTMON_GROUP
&&
3007 is_mon_groups(parent_kn
, kn
->name
)) {
3008 ret
= rdtgroup_rmdir_mon(kn
, rdtgrp
, tmpmask
);
3014 rdtgroup_kn_unlock(kn
);
3015 free_cpumask_var(tmpmask
);
3019 static int rdtgroup_show_options(struct seq_file
*seq
, struct kernfs_root
*kf
)
3021 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].alloc_enabled
)
3022 seq_puts(seq
, ",cdp");
3024 if (rdt_resources_all
[RDT_RESOURCE_L2DATA
].alloc_enabled
)
3025 seq_puts(seq
, ",cdpl2");
3027 if (is_mba_sc(&rdt_resources_all
[RDT_RESOURCE_MBA
]))
3028 seq_puts(seq
, ",mba_MBps");
3033 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops
= {
3034 .mkdir
= rdtgroup_mkdir
,
3035 .rmdir
= rdtgroup_rmdir
,
3036 .show_options
= rdtgroup_show_options
,
3039 static int __init
rdtgroup_setup_root(void)
3043 rdt_root
= kernfs_create_root(&rdtgroup_kf_syscall_ops
,
3044 KERNFS_ROOT_CREATE_DEACTIVATED
|
3045 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK
,
3047 if (IS_ERR(rdt_root
))
3048 return PTR_ERR(rdt_root
);
3050 mutex_lock(&rdtgroup_mutex
);
3052 rdtgroup_default
.closid
= 0;
3053 rdtgroup_default
.mon
.rmid
= 0;
3054 rdtgroup_default
.type
= RDTCTRL_GROUP
;
3055 INIT_LIST_HEAD(&rdtgroup_default
.mon
.crdtgrp_list
);
3057 list_add(&rdtgroup_default
.rdtgroup_list
, &rdt_all_groups
);
3059 ret
= rdtgroup_add_files(rdt_root
->kn
, RF_CTRL_BASE
);
3061 kernfs_destroy_root(rdt_root
);
3065 rdtgroup_default
.kn
= rdt_root
->kn
;
3066 kernfs_activate(rdtgroup_default
.kn
);
3069 mutex_unlock(&rdtgroup_mutex
);
3075 * rdtgroup_init - rdtgroup initialization
3077 * Setup resctrl file system including set up root, create mount point,
3078 * register rdtgroup filesystem, and initialize files under root directory.
3080 * Return: 0 on success or -errno
3082 int __init
rdtgroup_init(void)
3086 seq_buf_init(&last_cmd_status
, last_cmd_status_buf
,
3087 sizeof(last_cmd_status_buf
));
3089 ret
= rdtgroup_setup_root();
3093 ret
= sysfs_create_mount_point(fs_kobj
, "resctrl");
3097 ret
= register_filesystem(&rdt_fs_type
);
3099 goto cleanup_mountpoint
;
3102 * Adding the resctrl debugfs directory here may not be ideal since
3103 * it would let the resctrl debugfs directory appear on the debugfs
3104 * filesystem before the resctrl filesystem is mounted.
3105 * It may also be ok since that would enable debugging of RDT before
3106 * resctrl is mounted.
3107 * The reason why the debugfs directory is created here and not in
3108 * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
3109 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3110 * (the lockdep class of inode->i_rwsem). Other filesystem
3111 * interactions (eg. SyS_getdents) have the lock ordering:
3112 * &sb->s_type->i_mutex_key --> &mm->mmap_sem
3113 * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
3114 * is taken, thus creating dependency:
3115 * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
3116 * issues considering the other two lock dependencies.
3117 * By creating the debugfs directory here we avoid a dependency
3118 * that may cause deadlock (even though file operations cannot
3119 * occur until the filesystem is mounted, but I do not know how to
3120 * tell lockdep that).
3122 debugfs_resctrl
= debugfs_create_dir("resctrl", NULL
);
3127 sysfs_remove_mount_point(fs_kobj
, "resctrl");
3129 kernfs_destroy_root(rdt_root
);
3134 void __exit
rdtgroup_exit(void)
3136 debugfs_remove_recursive(debugfs_resctrl
);
3137 unregister_filesystem(&rdt_fs_type
);
3138 sysfs_remove_mount_point(fs_kobj
, "resctrl");
3139 kernfs_destroy_root(rdt_root
);