]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drm/nouveau/mmu/nv44: implement vmm on top of new base
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / vmm.c
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #define NVKM_VMM_LEVELS_MAX 5
23 #include "vmm.h"
24
25 static void
26 nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
27 {
28 struct nvkm_vmm_pt *pgt = *ppgt;
29 if (pgt) {
30 kvfree(pgt->pde);
31 kfree(pgt);
32 *ppgt = NULL;
33 }
34 }
35
36
37 static struct nvkm_vmm_pt *
38 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
39 const struct nvkm_vmm_page *page)
40 {
41 const u32 pten = 1 << desc->bits;
42 struct nvkm_vmm_pt *pgt;
43 u32 lpte = 0;
44
45 if (desc->type > PGT) {
46 if (desc->type == SPT) {
47 const struct nvkm_vmm_desc *pair = page[-1].desc;
48 lpte = pten >> (desc->bits - pair->bits);
49 } else {
50 lpte = pten;
51 }
52 }
53
54 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
55 return NULL;
56 pgt->page = page ? page->shift : 0;
57 pgt->sparse = sparse;
58
59 if (desc->type == PGD) {
60 pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL);
61 if (!pgt->pde) {
62 kfree(pgt);
63 return NULL;
64 }
65 }
66
67 return pgt;
68 }
69
70 void
71 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
72 {
73 if (vmm->nullp) {
74 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
75 vmm->nullp, vmm->null);
76 }
77
78 if (vmm->pd) {
79 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
80 nvkm_vmm_pt_del(&vmm->pd);
81 }
82 }
83
84 int
85 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
86 u32 pd_header, u64 addr, u64 size, struct lock_class_key *key,
87 const char *name, struct nvkm_vmm *vmm)
88 {
89 static struct lock_class_key _key;
90 const struct nvkm_vmm_page *page = func->page;
91 const struct nvkm_vmm_desc *desc;
92 int levels, bits = 0;
93
94 vmm->func = func;
95 vmm->mmu = mmu;
96 vmm->name = name;
97 kref_init(&vmm->kref);
98
99 __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
100
101 /* Locate the smallest page size supported by the backend, it will
102 * have the the deepest nesting of page tables.
103 */
104 while (page[1].shift)
105 page++;
106
107 /* Locate the structure that describes the layout of the top-level
108 * page table, and determine the number of valid bits in a virtual
109 * address.
110 */
111 for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
112 bits += desc->bits;
113 bits += page->shift;
114 desc--;
115
116 if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
117 return -EINVAL;
118
119 vmm->start = addr;
120 vmm->limit = size ? (addr + size) : (1ULL << bits);
121 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
122 return -EINVAL;
123
124 /* Allocate top-level page table. */
125 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
126 if (!vmm->pd)
127 return -ENOMEM;
128 vmm->pd->refs[0] = 1;
129 INIT_LIST_HEAD(&vmm->join);
130
131 /* ... and the GPU storage for it, except on Tesla-class GPUs that
132 * have the PD embedded in the instance structure.
133 */
134 if (desc->size && mmu->func->vmm.global) {
135 const u32 size = pd_header + desc->size * (1 << desc->bits);
136 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
137 if (!vmm->pd->pt[0])
138 return -ENOMEM;
139 }
140
141 return 0;
142 }
143
144 int
145 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
146 u32 hdr, u64 addr, u64 size, struct lock_class_key *key,
147 const char *name, struct nvkm_vmm **pvmm)
148 {
149 if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
150 return -ENOMEM;
151 return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
152 }