]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/intel_rdt.h
Merge branch 'tee/initial-merge' into fixes
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / intel_rdt.h
1 #ifndef _ASM_X86_INTEL_RDT_H
2 #define _ASM_X86_INTEL_RDT_H
3
4 #ifdef CONFIG_INTEL_RDT_A
5
6 #include <linux/sched.h>
7 #include <linux/kernfs.h>
8 #include <linux/jump_label.h>
9
10 #include <asm/intel_rdt_common.h>
11
12 #define IA32_L3_QOS_CFG 0xc81
13 #define IA32_L3_CBM_BASE 0xc90
14 #define IA32_L2_CBM_BASE 0xd10
15 #define IA32_MBA_THRTL_BASE 0xd50
16
17 #define L3_QOS_CDP_ENABLE 0x01ULL
18
19 /**
20 * struct rdtgroup - store rdtgroup's data in resctrl file system.
21 * @kn: kernfs node
22 * @rdtgroup_list: linked list for all rdtgroups
23 * @closid: closid for this rdtgroup
24 * @cpu_mask: CPUs assigned to this rdtgroup
25 * @flags: status bits
26 * @waitcount: how many cpus expect to find this
27 * group when they acquire rdtgroup_mutex
28 */
29 struct rdtgroup {
30 struct kernfs_node *kn;
31 struct list_head rdtgroup_list;
32 int closid;
33 struct cpumask cpu_mask;
34 int flags;
35 atomic_t waitcount;
36 };
37
38 /* rdtgroup.flags */
39 #define RDT_DELETED 1
40
41 /* rftype.flags */
42 #define RFTYPE_FLAGS_CPUS_LIST 1
43
44 /* List of all resource groups */
45 extern struct list_head rdt_all_groups;
46
47 extern int max_name_width, max_data_width;
48
49 int __init rdtgroup_init(void);
50
51 /**
52 * struct rftype - describe each file in the resctrl file system
53 * @name: File name
54 * @mode: Access mode
55 * @kf_ops: File operations
56 * @flags: File specific RFTYPE_FLAGS_* flags
57 * @seq_show: Show content of the file
58 * @write: Write to the file
59 */
60 struct rftype {
61 char *name;
62 umode_t mode;
63 struct kernfs_ops *kf_ops;
64 unsigned long flags;
65
66 int (*seq_show)(struct kernfs_open_file *of,
67 struct seq_file *sf, void *v);
68 /*
69 * write() is the generic write callback which maps directly to
70 * kernfs write operation and overrides all other operations.
71 * Maximum write size is determined by ->max_write_len.
72 */
73 ssize_t (*write)(struct kernfs_open_file *of,
74 char *buf, size_t nbytes, loff_t off);
75 };
76
77 /**
78 * struct rdt_domain - group of cpus sharing an RDT resource
79 * @list: all instances of this resource
80 * @id: unique id for this instance
81 * @cpu_mask: which cpus share this resource
82 * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
83 * @new_ctrl: new ctrl value to be loaded
84 * @have_new_ctrl: did user provide new_ctrl for this domain
85 */
86 struct rdt_domain {
87 struct list_head list;
88 int id;
89 struct cpumask cpu_mask;
90 u32 *ctrl_val;
91 u32 new_ctrl;
92 bool have_new_ctrl;
93 };
94
95 /**
96 * struct msr_param - set a range of MSRs from a domain
97 * @res: The resource to use
98 * @low: Beginning index from base MSR
99 * @high: End index
100 */
101 struct msr_param {
102 struct rdt_resource *res;
103 int low;
104 int high;
105 };
106
107 /**
108 * struct rdt_cache - Cache allocation related data
109 * @cbm_len: Length of the cache bit mask
110 * @min_cbm_bits: Minimum number of consecutive bits to be set
111 * @cbm_idx_mult: Multiplier of CBM index
112 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
113 * closid * cbm_idx_multi + cbm_idx_offset
114 * in a cache bit mask
115 */
116 struct rdt_cache {
117 unsigned int cbm_len;
118 unsigned int min_cbm_bits;
119 unsigned int cbm_idx_mult;
120 unsigned int cbm_idx_offset;
121 };
122
123 /**
124 * struct rdt_membw - Memory bandwidth allocation related data
125 * @max_delay: Max throttle delay. Delay is the hardware
126 * representation for memory bandwidth.
127 * @min_bw: Minimum memory bandwidth percentage user can request
128 * @bw_gran: Granularity at which the memory bandwidth is allocated
129 * @delay_linear: True if memory B/W delay is in linear scale
130 * @mb_map: Mapping of memory B/W percentage to memory B/W delay
131 */
132 struct rdt_membw {
133 u32 max_delay;
134 u32 min_bw;
135 u32 bw_gran;
136 u32 delay_linear;
137 u32 *mb_map;
138 };
139
140 /**
141 * struct rdt_resource - attributes of an RDT resource
142 * @enabled: Is this feature enabled on this machine
143 * @capable: Is this feature available on this machine
144 * @name: Name to use in "schemata" file
145 * @num_closid: Number of CLOSIDs available
146 * @cache_level: Which cache level defines scope of this resource
147 * @default_ctrl: Specifies default cache cbm or memory B/W percent.
148 * @msr_base: Base MSR address for CBMs
149 * @msr_update: Function pointer to update QOS MSRs
150 * @data_width: Character width of data when displaying
151 * @domains: All domains for this resource
152 * @cache: Cache allocation related data
153 * @info_files: resctrl info files for the resource
154 * @nr_info_files: Number of info files
155 * @format_str: Per resource format string to show domain value
156 * @parse_ctrlval: Per resource function pointer to parse control values
157 */
158 struct rdt_resource {
159 bool enabled;
160 bool capable;
161 char *name;
162 int num_closid;
163 int cache_level;
164 u32 default_ctrl;
165 unsigned int msr_base;
166 void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
167 struct rdt_resource *r);
168 int data_width;
169 struct list_head domains;
170 struct rdt_cache cache;
171 struct rdt_membw membw;
172 struct rftype *info_files;
173 int nr_info_files;
174 const char *format_str;
175 int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
176 struct rdt_domain *d);
177 };
178
179 void rdt_get_cache_infofile(struct rdt_resource *r);
180 void rdt_get_mba_infofile(struct rdt_resource *r);
181 int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
182 int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
183
184 extern struct mutex rdtgroup_mutex;
185
186 extern struct rdt_resource rdt_resources_all[];
187 extern struct rdtgroup rdtgroup_default;
188 DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
189
190 int __init rdtgroup_init(void);
191
192 enum {
193 RDT_RESOURCE_L3,
194 RDT_RESOURCE_L3DATA,
195 RDT_RESOURCE_L3CODE,
196 RDT_RESOURCE_L2,
197 RDT_RESOURCE_MBA,
198
199 /* Must be the last */
200 RDT_NUM_RESOURCES,
201 };
202
203 #define for_each_capable_rdt_resource(r) \
204 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
205 r++) \
206 if (r->capable)
207
208 #define for_each_enabled_rdt_resource(r) \
209 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
210 r++) \
211 if (r->enabled)
212
213 /* CPUID.(EAX=10H, ECX=ResID=1).EAX */
214 union cpuid_0x10_1_eax {
215 struct {
216 unsigned int cbm_len:5;
217 } split;
218 unsigned int full;
219 };
220
221 /* CPUID.(EAX=10H, ECX=ResID=3).EAX */
222 union cpuid_0x10_3_eax {
223 struct {
224 unsigned int max_delay:12;
225 } split;
226 unsigned int full;
227 };
228
229 /* CPUID.(EAX=10H, ECX=ResID).EDX */
230 union cpuid_0x10_x_edx {
231 struct {
232 unsigned int cos_max:16;
233 } split;
234 unsigned int full;
235 };
236
237 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
238
239 void rdt_ctrl_update(void *arg);
240 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
241 void rdtgroup_kn_unlock(struct kernfs_node *kn);
242 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
243 char *buf, size_t nbytes, loff_t off);
244 int rdtgroup_schemata_show(struct kernfs_open_file *of,
245 struct seq_file *s, void *v);
246
247 /*
248 * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
249 *
250 * Following considerations are made so that this has minimal impact
251 * on scheduler hot path:
252 * - This will stay as no-op unless we are running on an Intel SKU
253 * which supports resource control and we enable by mounting the
254 * resctrl file system.
255 * - Caches the per cpu CLOSid values and does the MSR write only
256 * when a task with a different CLOSid is scheduled in.
257 *
258 * Must be called with preemption disabled.
259 */
260 static inline void intel_rdt_sched_in(void)
261 {
262 if (static_branch_likely(&rdt_enable_key)) {
263 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
264 int closid;
265
266 /*
267 * If this task has a closid assigned, use it.
268 * Else use the closid assigned to this cpu.
269 */
270 closid = current->closid;
271 if (closid == 0)
272 closid = this_cpu_read(cpu_closid);
273
274 if (closid != state->closid) {
275 state->closid = closid;
276 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
277 }
278 }
279 }
280
281 #else
282
283 static inline void intel_rdt_sched_in(void) {}
284
285 #endif /* CONFIG_INTEL_RDT_A */
286 #endif /* _ASM_X86_INTEL_RDT_H */