]>
Commit | Line | Data |
---|---|---|
a6ff85d3 AC |
1 | /* |
2 | * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
20 | * DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
a7f6da6e AC |
23 | /* |
24 | * GK20A does not have dedicated video memory, and to accurately represent this | |
25 | * fact Nouveau will not create a RAM device for it. Therefore its instmem | |
69c49382 AC |
26 | * implementation must be done directly on top of system memory, while |
27 | * preserving coherency for read and write operations. | |
a7f6da6e AC |
28 | * |
29 | * Instmem can be allocated through two means: | |
69c49382 | 30 | * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory |
a7f6da6e | 31 | * pages contiguous to the GPU. This is the preferred way. |
69c49382 | 32 | * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically |
a7f6da6e AC |
33 | * contiguous memory. |
34 | * | |
69c49382 AC |
35 | * In both cases CPU read and writes are performed by creating a write-combined |
36 | * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To | |
37 | * be conservative we do this every time we acquire or release an instobj, but | |
38 | * ideally L2 management should be handled at a higher level. | |
39 | * | |
40 | * To improve performance, CPU mappings are not removed upon instobj release. | |
41 | * Instead they are placed into a LRU list to be recycled when the mapped space | |
42 | * goes beyond a certain threshold. At the moment this limit is 1MB. | |
a7f6da6e | 43 | */ |
d8e83994 | 44 | #include "priv.h" |
a7f6da6e | 45 | |
d8e83994 | 46 | #include <core/memory.h> |
a6ff85d3 | 47 | #include <core/mm.h> |
43a70661 | 48 | #include <core/tegra.h> |
d8e83994 | 49 | #include <subdev/fb.h> |
69c49382 | 50 | #include <subdev/ltc.h> |
f9463a4b | 51 | #include <subdev/mmu.h> |
a6ff85d3 | 52 | |
c44c06ae | 53 | struct gk20a_instobj { |
d8e83994 | 54 | struct nvkm_memory memory; |
d8e83994 | 55 | struct nvkm_mem mem; |
69c49382 AC |
56 | struct gk20a_instmem *imem; |
57 | ||
58 | /* CPU mapping */ | |
59 | u32 *vaddr; | |
a7f6da6e | 60 | }; |
69c49382 | 61 | #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) |
a7f6da6e AC |
62 | |
63 | /* | |
64 | * Used for objects allocated using the DMA API | |
65 | */ | |
66 | struct gk20a_instobj_dma { | |
c44c06ae | 67 | struct gk20a_instobj base; |
a7f6da6e | 68 | |
a6ff85d3 AC |
69 | dma_addr_t handle; |
70 | struct nvkm_mm_node r; | |
71 | }; | |
69c49382 AC |
72 | #define gk20a_instobj_dma(p) \ |
73 | container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base) | |
a6ff85d3 | 74 | |
a7f6da6e AC |
75 | /* |
76 | * Used for objects flattened using the IOMMU API | |
77 | */ | |
78 | struct gk20a_instobj_iommu { | |
c44c06ae | 79 | struct gk20a_instobj base; |
a7f6da6e | 80 | |
b306712d AC |
81 | /* to link into gk20a_instmem::vaddr_lru */ |
82 | struct list_head vaddr_node; | |
83 | /* how many clients are using vaddr? */ | |
84 | u32 use_cpt; | |
85 | ||
69c49382 AC |
86 | /* will point to the higher half of pages */ |
87 | dma_addr_t *dma_addrs; | |
88 | /* array of base.mem->size pages (+ dma_addr_ts) */ | |
a7f6da6e AC |
89 | struct page *pages[]; |
90 | }; | |
69c49382 AC |
91 | #define gk20a_instobj_iommu(p) \ |
92 | container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base) | |
a7f6da6e | 93 | |
c44c06ae | 94 | struct gk20a_instmem { |
a6ff85d3 | 95 | struct nvkm_instmem base; |
69c49382 AC |
96 | |
97 | /* protects vaddr_* and gk20a_instobj::vaddr* */ | |
e5ffa727 | 98 | struct mutex lock; |
69c49382 AC |
99 | |
100 | /* CPU mappings LRU */ | |
101 | unsigned int vaddr_use; | |
102 | unsigned int vaddr_max; | |
103 | struct list_head vaddr_lru; | |
a7f6da6e AC |
104 | |
105 | /* Only used if IOMMU if present */ | |
106 | struct mutex *mm_mutex; | |
107 | struct nvkm_mm *mm; | |
108 | struct iommu_domain *domain; | |
109 | unsigned long iommu_pgshift; | |
68b56653 | 110 | u16 iommu_bit; |
a7f6da6e AC |
111 | |
112 | /* Only used by DMA API */ | |
00085f1e | 113 | unsigned long attrs; |
a6ff85d3 | 114 | }; |
69c49382 | 115 | #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) |
a6ff85d3 | 116 | |
d8e83994 BS |
117 | static enum nvkm_memory_target |
118 | gk20a_instobj_target(struct nvkm_memory *memory) | |
119 | { | |
d2ee3605 | 120 | return NVKM_MEM_TARGET_NCOH; |
d8e83994 BS |
121 | } |
122 | ||
bd275f1d BS |
123 | static u8 |
124 | gk20a_instobj_page(struct nvkm_memory *memory) | |
125 | { | |
126 | return 12; | |
127 | } | |
128 | ||
d8e83994 BS |
129 | static u64 |
130 | gk20a_instobj_addr(struct nvkm_memory *memory) | |
131 | { | |
132 | return gk20a_instobj(memory)->mem.offset; | |
d8e83994 BS |
133 | } |
134 | ||
135 | static u64 | |
136 | gk20a_instobj_size(struct nvkm_memory *memory) | |
137 | { | |
138 | return (u64)gk20a_instobj(memory)->mem.size << 12; | |
139 | } | |
140 | ||
69c49382 | 141 | /* |
338840ee AC |
142 | * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. |
143 | */ | |
144 | static void | |
b306712d | 145 | gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) |
338840ee | 146 | { |
b306712d | 147 | struct gk20a_instmem *imem = obj->base.imem; |
338840ee AC |
148 | /* there should not be any user left... */ |
149 | WARN_ON(obj->use_cpt); | |
150 | list_del(&obj->vaddr_node); | |
b306712d AC |
151 | vunmap(obj->base.vaddr); |
152 | obj->base.vaddr = NULL; | |
153 | imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); | |
338840ee AC |
154 | nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, |
155 | imem->vaddr_max); | |
156 | } | |
157 | ||
158 | /* | |
159 | * Must be called while holding gk20a_instmem::lock | |
69c49382 AC |
160 | */ |
161 | static void | |
162 | gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) | |
163 | { | |
164 | while (imem->vaddr_use + size > imem->vaddr_max) { | |
69c49382 AC |
165 | /* no candidate that can be unmapped, abort... */ |
166 | if (list_empty(&imem->vaddr_lru)) | |
167 | break; | |
168 | ||
b306712d AC |
169 | gk20a_instobj_iommu_recycle_vaddr( |
170 | list_first_entry(&imem->vaddr_lru, | |
171 | struct gk20a_instobj_iommu, vaddr_node)); | |
69c49382 AC |
172 | } |
173 | } | |
174 | ||
d8e83994 | 175 | static void __iomem * |
b306712d | 176 | gk20a_instobj_acquire_dma(struct nvkm_memory *memory) |
d8e83994 | 177 | { |
69c49382 AC |
178 | struct gk20a_instobj *node = gk20a_instobj(memory); |
179 | struct gk20a_instmem *imem = node->imem; | |
180 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
b306712d AC |
181 | |
182 | nvkm_ltc_flush(ltc); | |
183 | ||
184 | return node->vaddr; | |
185 | } | |
186 | ||
187 | static void __iomem * | |
188 | gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) | |
189 | { | |
190 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | |
191 | struct gk20a_instmem *imem = node->base.imem; | |
192 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
69c49382 | 193 | const u64 size = nvkm_memory_size(memory); |
69c49382 AC |
194 | |
195 | nvkm_ltc_flush(ltc); | |
196 | ||
e5ffa727 | 197 | mutex_lock(&imem->lock); |
69c49382 | 198 | |
b306712d | 199 | if (node->base.vaddr) { |
338840ee AC |
200 | if (!node->use_cpt) { |
201 | /* remove from LRU list since mapping in use again */ | |
202 | list_del(&node->vaddr_node); | |
203 | } | |
69c49382 AC |
204 | goto out; |
205 | } | |
206 | ||
207 | /* try to free some address space if we reached the limit */ | |
208 | gk20a_instmem_vaddr_gc(imem, size); | |
209 | ||
b306712d AC |
210 | /* map the pages */ |
211 | node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP, | |
212 | pgprot_writecombine(PAGE_KERNEL)); | |
213 | if (!node->base.vaddr) { | |
69c49382 AC |
214 | nvkm_error(&imem->base.subdev, "cannot map instobj - " |
215 | "this is not going to end well...\n"); | |
216 | goto out; | |
217 | } | |
218 | ||
219 | imem->vaddr_use += size; | |
220 | nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", | |
221 | imem->vaddr_use, imem->vaddr_max); | |
222 | ||
223 | out: | |
338840ee | 224 | node->use_cpt++; |
e5ffa727 | 225 | mutex_unlock(&imem->lock); |
69c49382 | 226 | |
b306712d | 227 | return node->base.vaddr; |
d8e83994 BS |
228 | } |
229 | ||
230 | static void | |
b306712d | 231 | gk20a_instobj_release_dma(struct nvkm_memory *memory) |
d8e83994 | 232 | { |
69c49382 AC |
233 | struct gk20a_instobj *node = gk20a_instobj(memory); |
234 | struct gk20a_instmem *imem = node->imem; | |
235 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
b306712d | 236 | |
e02d586d AC |
237 | /* in case we got a write-combined mapping */ |
238 | wmb(); | |
b306712d AC |
239 | nvkm_ltc_invalidate(ltc); |
240 | } | |
241 | ||
242 | static void | |
243 | gk20a_instobj_release_iommu(struct nvkm_memory *memory) | |
244 | { | |
245 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | |
246 | struct gk20a_instmem *imem = node->base.imem; | |
247 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
d8e83994 | 248 | |
e5ffa727 | 249 | mutex_lock(&imem->lock); |
69c49382 | 250 | |
338840ee AC |
251 | /* we should at least have one user to release... */ |
252 | if (WARN_ON(node->use_cpt == 0)) | |
253 | goto out; | |
254 | ||
255 | /* add unused objs to the LRU list to recycle their mapping */ | |
256 | if (--node->use_cpt == 0) | |
257 | list_add_tail(&node->vaddr_node, &imem->vaddr_lru); | |
69c49382 | 258 | |
338840ee | 259 | out: |
e5ffa727 | 260 | mutex_unlock(&imem->lock); |
69c49382 AC |
261 | |
262 | wmb(); | |
263 | nvkm_ltc_invalidate(ltc); | |
264 | } | |
a7f6da6e | 265 | |
a6ff85d3 | 266 | static u32 |
d8e83994 | 267 | gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset) |
a6ff85d3 | 268 | { |
d8e83994 | 269 | struct gk20a_instobj *node = gk20a_instobj(memory); |
69c49382 AC |
270 | |
271 | return node->vaddr[offset / 4]; | |
a6ff85d3 AC |
272 | } |
273 | ||
274 | static void | |
d8e83994 | 275 | gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) |
a6ff85d3 | 276 | { |
d8e83994 | 277 | struct gk20a_instobj *node = gk20a_instobj(memory); |
a6ff85d3 | 278 | |
69c49382 | 279 | node->vaddr[offset / 4] = data; |
d8e83994 BS |
280 | } |
281 | ||
19a82e49 BS |
282 | static int |
283 | gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, | |
284 | struct nvkm_vma *vma, void *argv, u32 argc) | |
d8e83994 BS |
285 | { |
286 | struct gk20a_instobj *node = gk20a_instobj(memory); | |
19a82e49 BS |
287 | nvkm_vm_map_at(vma, 0, &node->mem); |
288 | return 0; | |
a6ff85d3 AC |
289 | } |
290 | ||
69c49382 AC |
291 | static void * |
292 | gk20a_instobj_dtor_dma(struct nvkm_memory *memory) | |
a6ff85d3 | 293 | { |
69c49382 AC |
294 | struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); |
295 | struct gk20a_instmem *imem = node->base.imem; | |
26c9e8ef | 296 | struct device *dev = imem->base.subdev.device->dev; |
a6ff85d3 | 297 | |
b306712d | 298 | if (unlikely(!node->base.vaddr)) |
69c49382 | 299 | goto out; |
a6ff85d3 | 300 | |
b306712d | 301 | dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, |
00085f1e | 302 | node->handle, imem->attrs); |
69c49382 AC |
303 | |
304 | out: | |
305 | return node; | |
a7f6da6e AC |
306 | } |
307 | ||
69c49382 AC |
308 | static void * |
309 | gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) | |
a7f6da6e | 310 | { |
69c49382 AC |
311 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); |
312 | struct gk20a_instmem *imem = node->base.imem; | |
313 | struct device *dev = imem->base.subdev.device->dev; | |
134fdc1a | 314 | struct nvkm_mm_node *r = node->base.mem.mem; |
a7f6da6e AC |
315 | int i; |
316 | ||
134fdc1a | 317 | if (unlikely(!r)) |
69c49382 AC |
318 | goto out; |
319 | ||
e5ffa727 | 320 | mutex_lock(&imem->lock); |
b306712d AC |
321 | |
322 | /* vaddr has already been recycled */ | |
323 | if (node->base.vaddr) | |
324 | gk20a_instobj_iommu_recycle_vaddr(node); | |
325 | ||
e5ffa727 | 326 | mutex_unlock(&imem->lock); |
b306712d | 327 | |
68b56653 AC |
328 | /* clear IOMMU bit to unmap pages */ |
329 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); | |
a7f6da6e AC |
330 | |
331 | /* Unmap pages from GPU address space and free them */ | |
69c49382 | 332 | for (i = 0; i < node->base.mem.size; i++) { |
c44c06ae BS |
333 | iommu_unmap(imem->domain, |
334 | (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); | |
69c49382 AC |
335 | dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, |
336 | DMA_BIDIRECTIONAL); | |
a7f6da6e AC |
337 | __free_page(node->pages[i]); |
338 | } | |
339 | ||
340 | /* Release area from GPU address space */ | |
c44c06ae BS |
341 | mutex_lock(imem->mm_mutex); |
342 | nvkm_mm_free(imem->mm, &r); | |
343 | mutex_unlock(imem->mm_mutex); | |
a6ff85d3 | 344 | |
69c49382 | 345 | out: |
d8e83994 | 346 | return node; |
a6ff85d3 AC |
347 | } |
348 | ||
d8e83994 | 349 | static const struct nvkm_memory_func |
69c49382 AC |
350 | gk20a_instobj_func_dma = { |
351 | .dtor = gk20a_instobj_dtor_dma, | |
352 | .target = gk20a_instobj_target, | |
bd275f1d | 353 | .page = gk20a_instobj_page, |
69c49382 AC |
354 | .addr = gk20a_instobj_addr, |
355 | .size = gk20a_instobj_size, | |
b306712d AC |
356 | .acquire = gk20a_instobj_acquire_dma, |
357 | .release = gk20a_instobj_release_dma, | |
69c49382 AC |
358 | .map = gk20a_instobj_map, |
359 | }; | |
360 | ||
361 | static const struct nvkm_memory_func | |
362 | gk20a_instobj_func_iommu = { | |
363 | .dtor = gk20a_instobj_dtor_iommu, | |
d8e83994 | 364 | .target = gk20a_instobj_target, |
bd275f1d | 365 | .page = gk20a_instobj_page, |
d8e83994 BS |
366 | .addr = gk20a_instobj_addr, |
367 | .size = gk20a_instobj_size, | |
b306712d AC |
368 | .acquire = gk20a_instobj_acquire_iommu, |
369 | .release = gk20a_instobj_release_iommu, | |
07bbc1c5 BS |
370 | .map = gk20a_instobj_map, |
371 | }; | |
372 | ||
373 | static const struct nvkm_memory_ptrs | |
374 | gk20a_instobj_ptrs = { | |
d8e83994 BS |
375 | .rd32 = gk20a_instobj_rd32, |
376 | .wr32 = gk20a_instobj_wr32, | |
d8e83994 BS |
377 | }; |
378 | ||
a6ff85d3 | 379 | static int |
d8e83994 | 380 | gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, |
c44c06ae | 381 | struct gk20a_instobj **_node) |
a6ff85d3 | 382 | { |
a7f6da6e | 383 | struct gk20a_instobj_dma *node; |
00c55507 | 384 | struct nvkm_subdev *subdev = &imem->base.subdev; |
d8e83994 | 385 | struct device *dev = subdev->device->dev; |
a6ff85d3 | 386 | |
d8e83994 BS |
387 | if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) |
388 | return -ENOMEM; | |
a7f6da6e | 389 | *_node = &node->base; |
a6ff85d3 | 390 | |
69c49382 | 391 | nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); |
07bbc1c5 | 392 | node->base.memory.ptrs = &gk20a_instobj_ptrs; |
69c49382 | 393 | |
b306712d AC |
394 | node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, |
395 | &node->handle, GFP_KERNEL, | |
00085f1e | 396 | imem->attrs); |
b306712d | 397 | if (!node->base.vaddr) { |
00c55507 | 398 | nvkm_error(subdev, "cannot allocate DMA memory\n"); |
a6ff85d3 AC |
399 | return -ENOMEM; |
400 | } | |
401 | ||
402 | /* alignment check */ | |
403 | if (unlikely(node->handle & (align - 1))) | |
00c55507 BS |
404 | nvkm_warn(subdev, |
405 | "memory not aligned as requested: %pad (0x%x)\n", | |
406 | &node->handle, align); | |
a6ff85d3 | 407 | |
a7f6da6e AC |
408 | /* present memory for being mapped using small pages */ |
409 | node->r.type = 12; | |
410 | node->r.offset = node->handle >> 12; | |
411 | node->r.length = (npages << PAGE_SHIFT) >> 12; | |
412 | ||
d8e83994 | 413 | node->base.mem.offset = node->handle; |
134fdc1a | 414 | node->base.mem.mem = &node->r; |
a7f6da6e AC |
415 | return 0; |
416 | } | |
417 | ||
418 | static int | |
d8e83994 | 419 | gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, |
c44c06ae | 420 | struct gk20a_instobj **_node) |
a7f6da6e AC |
421 | { |
422 | struct gk20a_instobj_iommu *node; | |
00c55507 | 423 | struct nvkm_subdev *subdev = &imem->base.subdev; |
69c49382 | 424 | struct device *dev = subdev->device->dev; |
a7f6da6e AC |
425 | struct nvkm_mm_node *r; |
426 | int ret; | |
427 | int i; | |
428 | ||
69c49382 AC |
429 | /* |
430 | * despite their variable size, instmem allocations are small enough | |
431 | * (< 1 page) to be handled by kzalloc | |
432 | */ | |
433 | if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + | |
434 | sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) | |
d8e83994 | 435 | return -ENOMEM; |
a7f6da6e | 436 | *_node = &node->base; |
69c49382 AC |
437 | node->dma_addrs = (void *)(node->pages + npages); |
438 | ||
439 | nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); | |
07bbc1c5 | 440 | node->base.memory.ptrs = &gk20a_instobj_ptrs; |
a7f6da6e AC |
441 | |
442 | /* Allocate backing memory */ | |
443 | for (i = 0; i < npages; i++) { | |
444 | struct page *p = alloc_page(GFP_KERNEL); | |
69c49382 | 445 | dma_addr_t dma_adr; |
a7f6da6e AC |
446 | |
447 | if (p == NULL) { | |
448 | ret = -ENOMEM; | |
449 | goto free_pages; | |
450 | } | |
451 | node->pages[i] = p; | |
69c49382 AC |
452 | dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
453 | if (dma_mapping_error(dev, dma_adr)) { | |
454 | nvkm_error(subdev, "DMA mapping error!\n"); | |
455 | ret = -ENOMEM; | |
456 | goto free_pages; | |
457 | } | |
458 | node->dma_addrs[i] = dma_adr; | |
a7f6da6e AC |
459 | } |
460 | ||
c44c06ae | 461 | mutex_lock(imem->mm_mutex); |
a7f6da6e | 462 | /* Reserve area from GPU address space */ |
c44c06ae BS |
463 | ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, |
464 | align >> imem->iommu_pgshift, &r); | |
465 | mutex_unlock(imem->mm_mutex); | |
a7f6da6e | 466 | if (ret) { |
69c49382 | 467 | nvkm_error(subdev, "IOMMU space is full!\n"); |
a7f6da6e AC |
468 | goto free_pages; |
469 | } | |
470 | ||
471 | /* Map into GPU address space */ | |
472 | for (i = 0; i < npages; i++) { | |
c44c06ae | 473 | u32 offset = (r->offset + i) << imem->iommu_pgshift; |
a7f6da6e | 474 | |
69c49382 | 475 | ret = iommu_map(imem->domain, offset, node->dma_addrs[i], |
a7f6da6e AC |
476 | PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); |
477 | if (ret < 0) { | |
00c55507 | 478 | nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret); |
a7f6da6e AC |
479 | |
480 | while (i-- > 0) { | |
481 | offset -= PAGE_SIZE; | |
c44c06ae | 482 | iommu_unmap(imem->domain, offset, PAGE_SIZE); |
a7f6da6e AC |
483 | } |
484 | goto release_area; | |
485 | } | |
486 | } | |
487 | ||
68b56653 AC |
488 | /* IOMMU bit tells that an address is to be resolved through the IOMMU */ |
489 | r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); | |
a7f6da6e | 490 | |
d8e83994 | 491 | node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; |
134fdc1a | 492 | node->base.mem.mem = r; |
a7f6da6e AC |
493 | return 0; |
494 | ||
495 | release_area: | |
c44c06ae BS |
496 | mutex_lock(imem->mm_mutex); |
497 | nvkm_mm_free(imem->mm, &r); | |
498 | mutex_unlock(imem->mm_mutex); | |
a7f6da6e AC |
499 | |
500 | free_pages: | |
69c49382 AC |
501 | for (i = 0; i < npages && node->pages[i] != NULL; i++) { |
502 | dma_addr_t dma_addr = node->dma_addrs[i]; | |
503 | if (dma_addr) | |
504 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, | |
505 | DMA_BIDIRECTIONAL); | |
a7f6da6e | 506 | __free_page(node->pages[i]); |
69c49382 | 507 | } |
a7f6da6e AC |
508 | |
509 | return ret; | |
510 | } | |
511 | ||
512 | static int | |
d8e83994 BS |
513 | gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, |
514 | struct nvkm_memory **pmemory) | |
a7f6da6e | 515 | { |
d8e83994 | 516 | struct gk20a_instmem *imem = gk20a_instmem(base); |
00c55507 | 517 | struct nvkm_subdev *subdev = &imem->base.subdev; |
69c49382 | 518 | struct gk20a_instobj *node = NULL; |
a7f6da6e AC |
519 | int ret; |
520 | ||
00c55507 | 521 | nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__, |
d8e83994 | 522 | imem->domain ? "IOMMU" : "DMA", size, align); |
a7f6da6e AC |
523 | |
524 | /* Round size and align to page bounds */ | |
d8e83994 BS |
525 | size = max(roundup(size, PAGE_SIZE), PAGE_SIZE); |
526 | align = max(roundup(align, PAGE_SIZE), PAGE_SIZE); | |
a7f6da6e | 527 | |
c44c06ae | 528 | if (imem->domain) |
d8e83994 BS |
529 | ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT, |
530 | align, &node); | |
a7f6da6e | 531 | else |
d8e83994 BS |
532 | ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, |
533 | align, &node); | |
b7a2bc18 | 534 | *pmemory = node ? &node->memory : NULL; |
a7f6da6e AC |
535 | if (ret) |
536 | return ret; | |
537 | ||
d8e83994 | 538 | node->imem = imem; |
a7f6da6e AC |
539 | |
540 | /* present memory for being mapped using small pages */ | |
d8e83994 BS |
541 | node->mem.size = size >> 12; |
542 | node->mem.memtype = 0; | |
bd275f1d | 543 | node->mem.memory = &node->memory; |
a6ff85d3 | 544 | |
00c55507 | 545 | nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", |
d8e83994 | 546 | size, align, node->mem.offset); |
a6ff85d3 AC |
547 | |
548 | return 0; | |
549 | } | |
550 | ||
69c49382 AC |
551 | static void * |
552 | gk20a_instmem_dtor(struct nvkm_instmem *base) | |
a6ff85d3 | 553 | { |
69c49382 AC |
554 | struct gk20a_instmem *imem = gk20a_instmem(base); |
555 | ||
556 | /* perform some sanity checks... */ | |
557 | if (!list_empty(&imem->vaddr_lru)) | |
558 | nvkm_warn(&base->subdev, "instobj LRU not empty!\n"); | |
559 | ||
560 | if (imem->vaddr_use != 0) | |
561 | nvkm_warn(&base->subdev, "instobj vmap area not empty! " | |
562 | "0x%x bytes still mapped\n", imem->vaddr_use); | |
563 | ||
564 | return imem; | |
a6ff85d3 AC |
565 | } |
566 | ||
b7a2bc18 BS |
567 | static const struct nvkm_instmem_func |
568 | gk20a_instmem = { | |
69c49382 | 569 | .dtor = gk20a_instmem_dtor, |
b7a2bc18 | 570 | .memory_new = gk20a_instobj_new, |
b7a2bc18 BS |
571 | .zero = false, |
572 | }; | |
573 | ||
574 | int | |
575 | gk20a_instmem_new(struct nvkm_device *device, int index, | |
43a70661 | 576 | struct nvkm_instmem **pimem) |
a6ff85d3 | 577 | { |
43a70661 | 578 | struct nvkm_device_tegra *tdev = device->func->tegra(device); |
c44c06ae | 579 | struct gk20a_instmem *imem; |
a6ff85d3 | 580 | |
b7a2bc18 BS |
581 | if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) |
582 | return -ENOMEM; | |
583 | nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); | |
e5ffa727 | 584 | mutex_init(&imem->lock); |
b7a2bc18 | 585 | *pimem = &imem->base; |
a6ff85d3 | 586 | |
69c49382 AC |
587 | /* do not allow more than 1MB of CPU-mapped instmem */ |
588 | imem->vaddr_use = 0; | |
589 | imem->vaddr_max = 0x100000; | |
590 | INIT_LIST_HEAD(&imem->vaddr_lru); | |
591 | ||
43a70661 | 592 | if (tdev->iommu.domain) { |
69c49382 | 593 | imem->mm_mutex = &tdev->iommu.mutex; |
43a70661 | 594 | imem->mm = &tdev->iommu.mm; |
69c49382 | 595 | imem->domain = tdev->iommu.domain; |
43a70661 | 596 | imem->iommu_pgshift = tdev->iommu.pgshift; |
68b56653 | 597 | imem->iommu_bit = tdev->func->iommu_bit; |
a7f6da6e | 598 | |
00c55507 | 599 | nvkm_info(&imem->base.subdev, "using IOMMU\n"); |
a7f6da6e | 600 | } else { |
00085f1e KK |
601 | imem->attrs = DMA_ATTR_NON_CONSISTENT | |
602 | DMA_ATTR_WEAK_ORDERING | | |
603 | DMA_ATTR_WRITE_COMBINE; | |
a7f6da6e | 604 | |
00c55507 | 605 | nvkm_info(&imem->base.subdev, "using DMA API\n"); |
a7f6da6e | 606 | } |
5dc240bc | 607 | |
a6ff85d3 AC |
608 | return 0; |
609 | } |