]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drm/nouveau/mmu/gf100: implement vmm on top of new base
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / vmm.h
1 #ifndef __NVKM_VMM_H__
2 #define __NVKM_VMM_H__
3 #include "priv.h"
4 #include <core/memory.h>
5
6 struct nvkm_vmm_pt {
7 /* Some GPUs have a mapping level with a dual page tables to
8 * support large and small pages in the same address-range.
9 *
10 * We track the state of both page tables in one place, which
11 * is why there's multiple PT pointers/refcounts here.
12 */
13 struct nvkm_mmu_pt *pt[2];
14 u32 refs[2];
15
16 /* Page size handled by this PT.
17 *
18 * Tesla backend needs to know this when writinge PDEs,
19 * otherwise unnecessary.
20 */
21 u8 page;
22
23 /* Entire page table sparse.
24 *
25 * Used to propagate sparseness to child page tables.
26 */
27 bool sparse:1;
28
29 /* Tracking for page directories.
30 *
31 * The array is indexed by PDE, and will either point to the
32 * child page table, or indicate the PDE is marked as sparse.
33 **/
34 #define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
35 #define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
36 #define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
37 struct nvkm_vmm_pt **pde;
38
39 /* Tracking for dual page tables.
40 *
41 * There's one entry for each LPTE, keeping track of whether
42 * there are valid SPTEs in the same address-range.
43 *
44 * This information is used to manage LPTE state transitions.
45 */
46 #define NVKM_VMM_PTE_SPARSE 0x80
47 #define NVKM_VMM_PTE_VALID 0x40
48 #define NVKM_VMM_PTE_SPTES 0x3f
49 u8 pte[];
50 };
51
52 struct nvkm_vmm_desc_func {
53 };
54
55 extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
56 extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
57
58 struct nvkm_vmm_desc {
59 enum {
60 PGD,
61 PGT,
62 SPT,
63 LPT,
64 } type;
65 u8 bits; /* VMA bits covered by PT. */
66 u8 size; /* Bytes-per-PTE. */
67 u32 align; /* PT address alignment. */
68 const struct nvkm_vmm_desc_func *func;
69 };
70
71 struct nvkm_vmm_page {
72 u8 shift;
73 const struct nvkm_vmm_desc *desc;
74 #define NVKM_VMM_PAGE_SPARSE 0x01
75 #define NVKM_VMM_PAGE_VRAM 0x02
76 #define NVKM_VMM_PAGE_HOST 0x04
77 #define NVKM_VMM_PAGE_COMP 0x08
78 #define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
79 #define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
80 #define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
81 #define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
82 #define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
83 #define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
84 #define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
85 #define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
86 #define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
87 #define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
88 #define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
89 u8 type;
90 };
91
92 struct nvkm_vmm_func {
93 int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
94 void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
95
96 u64 page_block;
97 const struct nvkm_vmm_page page[];
98 };
99
100 struct nvkm_vmm_join {
101 struct nvkm_memory *inst;
102 struct list_head head;
103 };
104
105 int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
106 u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
107 const char *name, struct nvkm_vmm **);
108 int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
109 u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
110 const char *name, struct nvkm_vmm *);
111 void nvkm_vmm_dtor(struct nvkm_vmm *);
112
113 int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
114 u64, u64, void *, u32, struct lock_class_key *,
115 const char *, struct nvkm_vmm **);
116
117 int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
118 struct nvkm_mmu *, u64, u64, void *, u32,
119 struct lock_class_key *, const char *, struct nvkm_vmm **);
120 int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
121 int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
122 void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
123
124 int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
125 struct lock_class_key *, const char *, struct nvkm_vmm **);
126 int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
127 struct lock_class_key *, const char *, struct nvkm_vmm **);
128 int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
129 struct lock_class_key *, const char *, struct nvkm_vmm **);
130 int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
131 struct lock_class_key *, const char *, struct nvkm_vmm **);
132 int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
133 struct lock_class_key *, const char *, struct nvkm_vmm **);
134 int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
135 struct lock_class_key *, const char *, struct nvkm_vmm **);
136 #endif