]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/iommu/intel-iommu-debugfs.c
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / iommu / intel-iommu-debugfs.c
CommitLineData
ee2636b8
SM
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2018 Intel Corporation.
4 *
5 * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
6 * Sohil Mehta <sohil.mehta@intel.com>
7 * Jacob Pan <jacob.jun.pan@linux.intel.com>
8 */
9
10#include <linux/debugfs.h>
11#include <linux/dmar.h>
12#include <linux/intel-iommu.h>
13#include <linux/pci.h>
14
15#include <asm/irq_remapping.h>
16
dd5142ca
SPP
17#include "intel-pasid.h"
18
ea09506c
SPP
19struct tbl_walk {
20 u16 bus;
21 u16 devfn;
dd5142ca 22 u32 pasid;
ea09506c
SPP
23 struct root_entry *rt_entry;
24 struct context_entry *ctx_entry;
dd5142ca 25 struct pasid_entry *pasid_tbl_entry;
ea09506c
SPP
26};
27
6825d3ea
GK
28struct iommu_regset {
29 int offset;
30 const char *regs;
31};
32
33#define IOMMU_REGSET_ENTRY(_reg_) \
34 { DMAR_##_reg_##_REG, __stringify(_reg_) }
35static const struct iommu_regset iommu_regs[] = {
36 IOMMU_REGSET_ENTRY(VER),
37 IOMMU_REGSET_ENTRY(CAP),
38 IOMMU_REGSET_ENTRY(ECAP),
39 IOMMU_REGSET_ENTRY(GCMD),
40 IOMMU_REGSET_ENTRY(GSTS),
41 IOMMU_REGSET_ENTRY(RTADDR),
42 IOMMU_REGSET_ENTRY(CCMD),
43 IOMMU_REGSET_ENTRY(FSTS),
44 IOMMU_REGSET_ENTRY(FECTL),
45 IOMMU_REGSET_ENTRY(FEDATA),
46 IOMMU_REGSET_ENTRY(FEADDR),
47 IOMMU_REGSET_ENTRY(FEUADDR),
48 IOMMU_REGSET_ENTRY(AFLOG),
49 IOMMU_REGSET_ENTRY(PMEN),
50 IOMMU_REGSET_ENTRY(PLMBASE),
51 IOMMU_REGSET_ENTRY(PLMLIMIT),
52 IOMMU_REGSET_ENTRY(PHMBASE),
53 IOMMU_REGSET_ENTRY(PHMLIMIT),
54 IOMMU_REGSET_ENTRY(IQH),
55 IOMMU_REGSET_ENTRY(IQT),
56 IOMMU_REGSET_ENTRY(IQA),
57 IOMMU_REGSET_ENTRY(ICS),
58 IOMMU_REGSET_ENTRY(IRTA),
59 IOMMU_REGSET_ENTRY(PQH),
60 IOMMU_REGSET_ENTRY(PQT),
61 IOMMU_REGSET_ENTRY(PQA),
62 IOMMU_REGSET_ENTRY(PRS),
63 IOMMU_REGSET_ENTRY(PECTL),
64 IOMMU_REGSET_ENTRY(PEDATA),
65 IOMMU_REGSET_ENTRY(PEADDR),
66 IOMMU_REGSET_ENTRY(PEUADDR),
67 IOMMU_REGSET_ENTRY(MTRRCAP),
68 IOMMU_REGSET_ENTRY(MTRRDEF),
69 IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
70 IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
71 IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
72 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
73 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
74 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
75 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
76 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
77 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
78 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
79 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
80 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
81 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
82 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
83 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
84 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
85 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
86 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
87 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
88 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
89 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
90 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
91 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
92 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
93 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
94 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
95 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
96 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
97 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
98 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
99 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
100 IOMMU_REGSET_ENTRY(VCCAP),
101 IOMMU_REGSET_ENTRY(VCMD),
102 IOMMU_REGSET_ENTRY(VCRSP),
103};
104
105static int iommu_regset_show(struct seq_file *m, void *unused)
106{
107 struct dmar_drhd_unit *drhd;
108 struct intel_iommu *iommu;
109 unsigned long flag;
110 int i, ret = 0;
111 u64 value;
112
113 rcu_read_lock();
114 for_each_active_iommu(iommu, drhd) {
115 if (!drhd->reg_base_addr) {
116 seq_puts(m, "IOMMU: Invalid base address\n");
117 ret = -EINVAL;
118 goto out;
119 }
120
121 seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
122 iommu->name, drhd->reg_base_addr);
123 seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
124 /*
125 * Publish the contents of the 64-bit hardware registers
126 * by adding the offset to the pointer (virtual address).
127 */
128 raw_spin_lock_irqsave(&iommu->register_lock, flag);
129 for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
130 value = dmar_readq(iommu->reg + iommu_regs[i].offset);
131 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
132 iommu_regs[i].regs, iommu_regs[i].offset,
133 value);
134 }
135 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
136 seq_putc(m, '\n');
137 }
138out:
139 rcu_read_unlock();
140
141 return ret;
142}
143DEFINE_SHOW_ATTRIBUTE(iommu_regset);
144
ea09506c 145static inline void print_tbl_walk(struct seq_file *m)
18f99c9b 146{
ea09506c 147 struct tbl_walk *tbl_wlk = m->private;
18f99c9b 148
dd5142ca 149 seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
ea09506c
SPP
150 tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
151 PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
152 tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
153 tbl_wlk->ctx_entry->lo);
dd5142ca
SPP
154
155 /*
156 * A legacy mode DMAR doesn't support PASID, hence default it to -1
157 * indicating that it's invalid. Also, default all PASID related fields
158 * to 0.
159 */
160 if (!tbl_wlk->pasid_tbl_entry)
161 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
162 (u64)0, (u64)0, (u64)0);
163 else
164 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
7f6cade5 165 tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
dd5142ca 166 tbl_wlk->pasid_tbl_entry->val[1],
7f6cade5 167 tbl_wlk->pasid_tbl_entry->val[0]);
dd5142ca
SPP
168}
169
170static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
171 u16 dir_idx)
172{
173 struct tbl_walk *tbl_wlk = m->private;
174 u8 tbl_idx;
175
176 for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
177 if (pasid_pte_is_present(tbl_entry)) {
178 tbl_wlk->pasid_tbl_entry = tbl_entry;
179 tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
180 print_tbl_walk(m);
181 }
182
183 tbl_entry++;
184 }
185}
186
187static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
188 u16 pasid_dir_size)
189{
190 struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
191 struct pasid_entry *pasid_tbl;
192 u16 dir_idx;
193
194 for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
195 pasid_tbl = get_pasid_table_from_pde(dir_entry);
196 if (pasid_tbl)
197 pasid_tbl_walk(m, pasid_tbl, dir_idx);
198
199 dir_entry++;
200 }
ea09506c
SPP
201}
202
203static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
204{
205 struct context_entry *context;
dd5142ca
SPP
206 u16 devfn, pasid_dir_size;
207 u64 pasid_dir_ptr;
18f99c9b
SM
208
209 for (devfn = 0; devfn < 256; devfn++) {
ea09506c
SPP
210 struct tbl_walk tbl_wlk = {0};
211
dd5142ca
SPP
212 /*
213 * Scalable mode root entry points to upper scalable mode
214 * context table and lower scalable mode context table. Each
215 * scalable mode context table has 128 context entries where as
216 * legacy mode context table has 256 context entries. So in
217 * scalable mode, the context entries for former 128 devices are
218 * in the lower scalable mode context table, while the latter
219 * 128 devices are in the upper scalable mode context table.
220 * In scalable mode, when devfn > 127, iommu_context_addr()
221 * automatically refers to upper scalable mode context table and
222 * hence the caller doesn't have to worry about differences
223 * between scalable mode and non scalable mode.
224 */
18f99c9b
SM
225 context = iommu_context_addr(iommu, bus, devfn, 0);
226 if (!context)
227 return;
228
229 if (!context_present(context))
230 continue;
231
ea09506c
SPP
232 tbl_wlk.bus = bus;
233 tbl_wlk.devfn = devfn;
234 tbl_wlk.rt_entry = &iommu->root_entry[bus];
235 tbl_wlk.ctx_entry = context;
236 m->private = &tbl_wlk;
237
bfeaec7f 238 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
dd5142ca
SPP
239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
240 pasid_dir_size = get_pasid_dir_size(context);
241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
242 continue;
243 }
244
ea09506c 245 print_tbl_walk(m);
18f99c9b
SM
246 }
247}
248
ea09506c 249static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
18f99c9b
SM
250{
251 unsigned long flags;
ea09506c 252 u16 bus;
18f99c9b
SM
253
254 spin_lock_irqsave(&iommu->lock, flags);
ea09506c 255 seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
18f99c9b 256 (u64)virt_to_phys(iommu->root_entry));
dd5142ca 257 seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
18f99c9b 258
ea09506c
SPP
259 /*
260 * No need to check if the root entry is present or not because
261 * iommu_context_addr() performs the same check before returning
262 * context entry.
263 */
264 for (bus = 0; bus < 256; bus++)
265 ctx_tbl_walk(m, iommu, bus);
18f99c9b 266
18f99c9b
SM
267 spin_unlock_irqrestore(&iommu->lock, flags);
268}
269
270static int dmar_translation_struct_show(struct seq_file *m, void *unused)
271{
272 struct dmar_drhd_unit *drhd;
273 struct intel_iommu *iommu;
274
275 rcu_read_lock();
276 for_each_active_iommu(iommu, drhd) {
ea09506c 277 root_tbl_walk(m, iommu);
18f99c9b
SM
278 seq_putc(m, '\n');
279 }
280 rcu_read_unlock();
281
282 return 0;
283}
284DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
285
a6d268c6
SM
286#ifdef CONFIG_IRQ_REMAP
287static void ir_tbl_remap_entry_show(struct seq_file *m,
288 struct intel_iommu *iommu)
289{
290 struct irte *ri_entry;
291 unsigned long flags;
292 int idx;
293
294 seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n");
295
296 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
297 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
298 ri_entry = &iommu->ir_table->base[idx];
299 if (!ri_entry->present || ri_entry->p_pst)
300 continue;
301
302 seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n",
303 idx, PCI_BUS_NUM(ri_entry->sid),
304 PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
305 ri_entry->dest_id, ri_entry->vector,
306 ri_entry->high, ri_entry->low);
307 }
308 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
309}
310
311static void ir_tbl_posted_entry_show(struct seq_file *m,
312 struct intel_iommu *iommu)
313{
314 struct irte *pi_entry;
315 unsigned long flags;
316 int idx;
317
318 seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n");
319
320 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
321 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
322 pi_entry = &iommu->ir_table->base[idx];
323 if (!pi_entry->present || !pi_entry->p_pst)
324 continue;
325
326 seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n",
327 idx, PCI_BUS_NUM(pi_entry->sid),
328 PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
329 pi_entry->pda_h, pi_entry->pda_l << 6,
330 pi_entry->vector, pi_entry->high,
331 pi_entry->low);
332 }
333 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
334}
335
336/*
337 * For active IOMMUs go through the Interrupt remapping
338 * table and print valid entries in a table format for
339 * Remapped and Posted Interrupts.
340 */
341static int ir_translation_struct_show(struct seq_file *m, void *unused)
342{
343 struct dmar_drhd_unit *drhd;
344 struct intel_iommu *iommu;
345 u64 irta;
346
347 rcu_read_lock();
348 for_each_active_iommu(iommu, drhd) {
349 if (!ecap_ir_support(iommu->ecap))
350 continue;
351
352 seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
353 iommu->name);
354
355 if (iommu->ir_table) {
356 irta = virt_to_phys(iommu->ir_table->base);
357 seq_printf(m, " IR table address:%llx\n", irta);
358 ir_tbl_remap_entry_show(m, iommu);
359 } else {
360 seq_puts(m, "Interrupt Remapping is not enabled\n");
361 }
362 seq_putc(m, '\n');
363 }
364
365 seq_puts(m, "****\n\n");
366
367 for_each_active_iommu(iommu, drhd) {
368 if (!cap_pi_support(iommu->cap))
369 continue;
370
371 seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
372 iommu->name);
373
374 if (iommu->ir_table) {
375 irta = virt_to_phys(iommu->ir_table->base);
376 seq_printf(m, " IR table address:%llx\n", irta);
377 ir_tbl_posted_entry_show(m, iommu);
378 } else {
379 seq_puts(m, "Interrupt Remapping is not enabled\n");
380 }
381 seq_putc(m, '\n');
382 }
383 rcu_read_unlock();
384
385 return 0;
386}
387DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
388#endif
389
ee2636b8
SM
390void __init intel_iommu_debugfs_init(void)
391{
6825d3ea
GK
392 struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
393 iommu_debugfs_dir);
394
395 debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
396 &iommu_regset_fops);
18f99c9b
SM
397 debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
398 NULL, &dmar_translation_struct_fops);
a6d268c6
SM
399#ifdef CONFIG_IRQ_REMAP
400 debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
401 NULL, &ir_translation_struct_fops);
402#endif
ee2636b8 403}