]>
Commit | Line | Data |
---|---|---|
42594600 BS |
1 | #ifndef __NVKM_MMU_H__ |
2 | #define __NVKM_MMU_H__ | |
3863c9bc | 3 | #include <core/subdev.h> |
a11c3198 | 4 | |
42594600 | 5 | struct nvkm_vma { |
f9463a4b BS |
6 | struct list_head head; |
7 | struct rb_node tree; | |
8 | u64 addr; | |
9 | u64 size:50; | |
10 | bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */ | |
11 | bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */ | |
12 | #define NVKM_VMA_PAGE_NONE 7 | |
13 | u8 page:3; /* Requested page type (index, or NONE for automatic). */ | |
14 | u8 refd:3; /* Current page type (index, or NONE for unreferenced). */ | |
15 | bool used:1; /* Region allocated. */ | |
16 | bool part:1; /* Region was split from an allocated region by map(). */ | |
17 | bool user:1; /* Region user-allocated. */ | |
18 | bool busy:1; /* Region busy (for temporarily preventing user access). */ | |
19 | struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ | |
20 | struct nvkm_tags *tags; /* Compression tag reference. */ | |
a11c3198 BS |
21 | }; |
22 | ||
632b740c | 23 | struct nvkm_vmm { |
806a7335 | 24 | const struct nvkm_vmm_func *func; |
42594600 | 25 | struct nvkm_mmu *mmu; |
806a7335 | 26 | const char *name; |
eb813999 | 27 | u32 debug; |
806a7335 | 28 | struct kref kref; |
1de68568 | 29 | struct mutex mutex; |
806a7335 BS |
30 | |
31 | u64 start; | |
32 | u64 limit; | |
33 | ||
34 | struct nvkm_vmm_pt *pd; | |
806a7335 BS |
35 | struct list_head join; |
36 | ||
f9463a4b BS |
37 | struct list_head list; |
38 | struct rb_root free; | |
39 | struct rb_root root; | |
5e075fde BS |
40 | |
41 | bool bootstrapped; | |
806a7335 | 42 | atomic_t engref[NVKM_SUBDEV_NR]; |
03b0ba7b BS |
43 | |
44 | dma_addr_t null; | |
45 | void *nullp; | |
3863c9bc BS |
46 | }; |
47 | ||
f9463a4b BS |
48 | int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc, |
49 | struct lock_class_key *, const char *name, struct nvkm_vmm **); | |
50 | struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *); | |
51 | void nvkm_vmm_unref(struct nvkm_vmm **); | |
eb813999 | 52 | int nvkm_vmm_boot(struct nvkm_vmm *); |
f9463a4b BS |
53 | int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst); |
54 | void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst); | |
55 | int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **); | |
56 | void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **); | |
eb813999 BS |
57 | |
58 | struct nvkm_vmm_map { | |
59 | struct nvkm_memory *memory; | |
60 | u64 offset; | |
61 | ||
62 | struct nvkm_mm_node *mem; | |
63 | struct scatterlist *sgl; | |
64 | dma_addr_t *dma; | |
65 | u64 off; | |
66 | ||
67 | const struct nvkm_vmm_page *page; | |
68 | ||
69 | struct nvkm_tags *tags; | |
70 | u64 next; | |
71 | u64 type; | |
72 | u64 ctag; | |
73 | }; | |
74 | ||
f9463a4b BS |
75 | int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc, |
76 | struct nvkm_vmm_map *); | |
77 | void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *); | |
78 | ||
c83c4097 | 79 | struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64); |
f9463a4b BS |
80 | struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle); |
81 | ||
c9582455 BS |
82 | struct nvkm_mmu { |
83 | const struct nvkm_mmu_func *func; | |
84 | struct nvkm_subdev subdev; | |
85 | ||
c9582455 | 86 | u8 dma_bits; |
0b11b30d | 87 | |
51645eb7 BS |
88 | int heap_nr; |
89 | struct { | |
90 | #define NVKM_MEM_VRAM 0x01 | |
91 | #define NVKM_MEM_HOST 0x02 | |
92 | #define NVKM_MEM_COMP 0x04 | |
93 | #define NVKM_MEM_DISP 0x08 | |
94 | u8 type; | |
95 | u64 size; | |
96 | } heap[4]; | |
97 | ||
98 | int type_nr; | |
99 | struct { | |
100 | #define NVKM_MEM_KIND 0x10 | |
101 | #define NVKM_MEM_MAPPABLE 0x20 | |
102 | #define NVKM_MEM_COHERENT 0x40 | |
103 | #define NVKM_MEM_UNCACHED 0x80 | |
104 | u8 type; | |
105 | u8 heap; | |
106 | } type[16]; | |
107 | ||
0b11b30d | 108 | struct nvkm_vmm *vmm; |
9a45ddaa BS |
109 | |
110 | struct { | |
111 | struct mutex mutex; | |
112 | struct list_head list; | |
f1280394 | 113 | } ptc, ptp; |
eea5cf0f BS |
114 | |
115 | struct nvkm_device_oclass user; | |
c9582455 BS |
116 | }; |
117 | ||
118 | int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); | |
119 | int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); | |
120 | int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); | |
121 | int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); | |
0f43715f | 122 | int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
c9582455 | 123 | int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
db018585 | 124 | int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
d1f6c8d2 | 125 | int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
e1e33c79 | 126 | int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
cedc4d57 | 127 | int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
b86a4587 | 128 | int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
6359c982 | 129 | int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
a11c3198 | 130 | #endif |