]>
Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, | |
3 | * All Rights Reserved. | |
4 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the "Software"), | |
9 | * to deal in the Software without restriction, including without limitation | |
10 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
11 | * and/or sell copies of the Software, and to permit persons to whom the | |
12 | * Software is furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | */ | |
26 | ||
ebb945a9 BS |
27 | #include "nouveau_drm.h" |
28 | #include "nouveau_ttm.h" | |
29 | #include "nouveau_gem.h" | |
6ee73861 | 30 | |
bc9e7b9a BS |
31 | static int |
32 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
33 | { | |
897a6e27 | 34 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
967e7bde | 35 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
897a6e27 | 36 | man->priv = pfb; |
bc9e7b9a BS |
37 | return 0; |
38 | } | |
39 | ||
40 | static int | |
41 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | |
42 | { | |
897a6e27 | 43 | man->priv = NULL; |
bc9e7b9a BS |
44 | return 0; |
45 | } | |
46 | ||
47 | static inline void | |
48 | nouveau_mem_node_cleanup(struct nouveau_mem *node) | |
49 | { | |
50 | if (node->vma[0].node) { | |
51 | nouveau_vm_unmap(&node->vma[0]); | |
52 | nouveau_vm_put(&node->vma[0]); | |
53 | } | |
54 | ||
55 | if (node->vma[1].node) { | |
56 | nouveau_vm_unmap(&node->vma[1]); | |
57 | nouveau_vm_put(&node->vma[1]); | |
58 | } | |
59 | } | |
60 | ||
61 | static void | |
62 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | |
63 | struct ttm_mem_reg *mem) | |
64 | { | |
ebb945a9 | 65 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
967e7bde | 66 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
bc9e7b9a | 67 | nouveau_mem_node_cleanup(mem->mm_node); |
dceef5d8 | 68 | pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node); |
bc9e7b9a BS |
69 | } |
70 | ||
71 | static int | |
72 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |
73 | struct ttm_buffer_object *bo, | |
74 | struct ttm_placement *placement, | |
e3f20279 | 75 | uint32_t flags, |
bc9e7b9a BS |
76 | struct ttm_mem_reg *mem) |
77 | { | |
ebb945a9 | 78 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
967e7bde | 79 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
bc9e7b9a BS |
80 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
81 | struct nouveau_mem *node; | |
82 | u32 size_nc = 0; | |
83 | int ret; | |
84 | ||
85 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | |
86 | size_nc = 1 << nvbo->page_shift; | |
87 | ||
dceef5d8 | 88 | ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT, |
ebb945a9 BS |
89 | mem->page_alignment << PAGE_SHIFT, size_nc, |
90 | (nvbo->tile_flags >> 8) & 0x3ff, &node); | |
bc9e7b9a BS |
91 | if (ret) { |
92 | mem->mm_node = NULL; | |
93 | return (ret == -ENOSPC) ? 0 : ret; | |
94 | } | |
95 | ||
96 | node->page_shift = nvbo->page_shift; | |
97 | ||
98 | mem->mm_node = node; | |
99 | mem->start = node->offset >> PAGE_SHIFT; | |
100 | return 0; | |
101 | } | |
102 | ||
5b8a43ae | 103 | static void |
bc9e7b9a BS |
104 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
105 | { | |
897a6e27 MS |
106 | struct nouveau_fb *pfb = man->priv; |
107 | struct nouveau_mm *mm = &pfb->vram; | |
bc9e7b9a BS |
108 | struct nouveau_mm_node *r; |
109 | u32 total = 0, free = 0; | |
110 | ||
51a506c0 | 111 | mutex_lock(&nv_subdev(pfb)->mutex); |
bc9e7b9a BS |
112 | list_for_each_entry(r, &mm->nodes, nl_entry) { |
113 | printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", | |
114 | prefix, r->type, ((u64)r->offset << 12), | |
115 | (((u64)r->offset + r->length) << 12)); | |
116 | ||
117 | total += r->length; | |
118 | if (!r->type) | |
119 | free += r->length; | |
120 | } | |
51a506c0 | 121 | mutex_unlock(&nv_subdev(pfb)->mutex); |
bc9e7b9a BS |
122 | |
123 | printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", | |
124 | prefix, (u64)total << 12, (u64)free << 12); | |
125 | printk(KERN_DEBUG "%s block: 0x%08x\n", | |
126 | prefix, mm->block_size << 12); | |
127 | } | |
128 | ||
129 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | |
130 | nouveau_vram_manager_init, | |
131 | nouveau_vram_manager_fini, | |
132 | nouveau_vram_manager_new, | |
133 | nouveau_vram_manager_del, | |
134 | nouveau_vram_manager_debug | |
135 | }; | |
136 | ||
137 | static int | |
138 | nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
139 | { | |
140 | return 0; | |
141 | } | |
142 | ||
143 | static int | |
144 | nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) | |
145 | { | |
146 | return 0; | |
147 | } | |
148 | ||
149 | static void | |
150 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, | |
151 | struct ttm_mem_reg *mem) | |
152 | { | |
153 | nouveau_mem_node_cleanup(mem->mm_node); | |
154 | kfree(mem->mm_node); | |
155 | mem->mm_node = NULL; | |
156 | } | |
157 | ||
158 | static int | |
159 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |
160 | struct ttm_buffer_object *bo, | |
161 | struct ttm_placement *placement, | |
e3f20279 | 162 | uint32_t flags, |
bc9e7b9a BS |
163 | struct ttm_mem_reg *mem) |
164 | { | |
de7b7d59 BS |
165 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
166 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
bc9e7b9a BS |
167 | struct nouveau_mem *node; |
168 | ||
bc9e7b9a BS |
169 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
170 | if (!node) | |
171 | return -ENOMEM; | |
2e2cfbe6 | 172 | |
bc9e7b9a BS |
173 | node->page_shift = 12; |
174 | ||
967e7bde BS |
175 | switch (drm->device.info.family) { |
176 | case NV_DEVICE_INFO_V0_TESLA: | |
177 | if (drm->device.info.chipset != 0x50) | |
de7b7d59 BS |
178 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; |
179 | break; | |
967e7bde BS |
180 | case NV_DEVICE_INFO_V0_FERMI: |
181 | case NV_DEVICE_INFO_V0_KEPLER: | |
de7b7d59 BS |
182 | node->memtype = (nvbo->tile_flags & 0xff00) >> 8; |
183 | break; | |
184 | default: | |
185 | break; | |
186 | } | |
187 | ||
bc9e7b9a BS |
188 | mem->mm_node = node; |
189 | mem->start = 0; | |
190 | return 0; | |
191 | } | |
192 | ||
5b8a43ae | 193 | static void |
bc9e7b9a BS |
194 | nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
195 | { | |
196 | } | |
197 | ||
198 | const struct ttm_mem_type_manager_func nouveau_gart_manager = { | |
199 | nouveau_gart_manager_init, | |
200 | nouveau_gart_manager_fini, | |
201 | nouveau_gart_manager_new, | |
202 | nouveau_gart_manager_del, | |
203 | nouveau_gart_manager_debug | |
204 | }; | |
205 | ||
ebb945a9 | 206 | #include <core/subdev/vm/nv04.h> |
bc9e7b9a BS |
207 | static int |
208 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
209 | { | |
ebb945a9 | 210 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
967e7bde | 211 | struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device); |
ebb945a9 BS |
212 | struct nv04_vmmgr_priv *priv = (void *)vmm; |
213 | struct nouveau_vm *vm = NULL; | |
214 | nouveau_vm_ref(priv->vm, &vm, NULL); | |
215 | man->priv = vm; | |
216 | return 0; | |
bc9e7b9a BS |
217 | } |
218 | ||
219 | static int | |
220 | nv04_gart_manager_fini(struct ttm_mem_type_manager *man) | |
221 | { | |
222 | struct nouveau_vm *vm = man->priv; | |
223 | nouveau_vm_ref(NULL, &vm, NULL); | |
224 | man->priv = NULL; | |
225 | return 0; | |
226 | } | |
227 | ||
228 | static void | |
229 | nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) | |
230 | { | |
231 | struct nouveau_mem *node = mem->mm_node; | |
232 | if (node->vma[0].node) | |
233 | nouveau_vm_put(&node->vma[0]); | |
234 | kfree(mem->mm_node); | |
235 | mem->mm_node = NULL; | |
236 | } | |
237 | ||
238 | static int | |
239 | nv04_gart_manager_new(struct ttm_mem_type_manager *man, | |
240 | struct ttm_buffer_object *bo, | |
241 | struct ttm_placement *placement, | |
e3f20279 | 242 | uint32_t flags, |
bc9e7b9a BS |
243 | struct ttm_mem_reg *mem) |
244 | { | |
245 | struct nouveau_mem *node; | |
246 | int ret; | |
247 | ||
248 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
249 | if (!node) | |
250 | return -ENOMEM; | |
251 | ||
252 | node->page_shift = 12; | |
253 | ||
254 | ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift, | |
255 | NV_MEM_ACCESS_RW, &node->vma[0]); | |
256 | if (ret) { | |
257 | kfree(node); | |
258 | return ret; | |
259 | } | |
260 | ||
261 | mem->mm_node = node; | |
262 | mem->start = node->vma[0].offset >> PAGE_SHIFT; | |
263 | return 0; | |
264 | } | |
265 | ||
5b8a43ae | 266 | static void |
bc9e7b9a BS |
267 | nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
268 | { | |
269 | } | |
270 | ||
271 | const struct ttm_mem_type_manager_func nv04_gart_manager = { | |
272 | nv04_gart_manager_init, | |
273 | nv04_gart_manager_fini, | |
274 | nv04_gart_manager_new, | |
275 | nv04_gart_manager_del, | |
276 | nv04_gart_manager_debug | |
277 | }; | |
278 | ||
6ee73861 BS |
279 | int |
280 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) | |
281 | { | |
282 | struct drm_file *file_priv = filp->private_data; | |
77145f1c | 283 | struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); |
6ee73861 BS |
284 | |
285 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | |
286 | return drm_mmap(filp, vma); | |
287 | ||
ebb945a9 | 288 | return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); |
6ee73861 BS |
289 | } |
290 | ||
291 | static int | |
ba4420c2 | 292 | nouveau_ttm_mem_global_init(struct drm_global_reference *ref) |
6ee73861 BS |
293 | { |
294 | return ttm_mem_global_init(ref->object); | |
295 | } | |
296 | ||
297 | static void | |
ba4420c2 | 298 | nouveau_ttm_mem_global_release(struct drm_global_reference *ref) |
6ee73861 BS |
299 | { |
300 | ttm_mem_global_release(ref->object); | |
301 | } | |
302 | ||
303 | int | |
ebb945a9 | 304 | nouveau_ttm_global_init(struct nouveau_drm *drm) |
6ee73861 | 305 | { |
ba4420c2 | 306 | struct drm_global_reference *global_ref; |
6ee73861 BS |
307 | int ret; |
308 | ||
ebb945a9 | 309 | global_ref = &drm->ttm.mem_global_ref; |
ba4420c2 | 310 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
6ee73861 BS |
311 | global_ref->size = sizeof(struct ttm_mem_global); |
312 | global_ref->init = &nouveau_ttm_mem_global_init; | |
313 | global_ref->release = &nouveau_ttm_mem_global_release; | |
314 | ||
ba4420c2 | 315 | ret = drm_global_item_ref(global_ref); |
6ee73861 BS |
316 | if (unlikely(ret != 0)) { |
317 | DRM_ERROR("Failed setting up TTM memory accounting\n"); | |
ebb945a9 | 318 | drm->ttm.mem_global_ref.release = NULL; |
6ee73861 BS |
319 | return ret; |
320 | } | |
321 | ||
ebb945a9 BS |
322 | drm->ttm.bo_global_ref.mem_glob = global_ref->object; |
323 | global_ref = &drm->ttm.bo_global_ref.ref; | |
ba4420c2 | 324 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
6ee73861 BS |
325 | global_ref->size = sizeof(struct ttm_bo_global); |
326 | global_ref->init = &ttm_bo_global_init; | |
327 | global_ref->release = &ttm_bo_global_release; | |
328 | ||
ba4420c2 | 329 | ret = drm_global_item_ref(global_ref); |
6ee73861 BS |
330 | if (unlikely(ret != 0)) { |
331 | DRM_ERROR("Failed setting up TTM BO subsystem\n"); | |
ebb945a9 BS |
332 | drm_global_item_unref(&drm->ttm.mem_global_ref); |
333 | drm->ttm.mem_global_ref.release = NULL; | |
6ee73861 BS |
334 | return ret; |
335 | } | |
336 | ||
337 | return 0; | |
338 | } | |
339 | ||
340 | void | |
ebb945a9 | 341 | nouveau_ttm_global_release(struct nouveau_drm *drm) |
6ee73861 | 342 | { |
ebb945a9 | 343 | if (drm->ttm.mem_global_ref.release == NULL) |
6ee73861 BS |
344 | return; |
345 | ||
ebb945a9 BS |
346 | drm_global_item_unref(&drm->ttm.bo_global_ref.ref); |
347 | drm_global_item_unref(&drm->ttm.mem_global_ref); | |
348 | drm->ttm.mem_global_ref.release = NULL; | |
349 | } | |
350 | ||
351 | int | |
352 | nouveau_ttm_init(struct nouveau_drm *drm) | |
353 | { | |
354 | struct drm_device *dev = drm->dev; | |
355 | u32 bits; | |
356 | int ret; | |
357 | ||
967e7bde BS |
358 | bits = nvkm_vmmgr(&drm->device)->dma_bits; |
359 | if (nv_device_is_pci(nvkm_device(&drm->device))) { | |
420b9469 AC |
360 | if (drm->agp.stat == ENABLED || |
361 | !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) | |
362 | bits = 32; | |
363 | ||
364 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); | |
365 | if (ret) | |
366 | return ret; | |
367 | ||
368 | ret = pci_set_consistent_dma_mask(dev->pdev, | |
369 | DMA_BIT_MASK(bits)); | |
370 | if (ret) | |
371 | pci_set_consistent_dma_mask(dev->pdev, | |
372 | DMA_BIT_MASK(32)); | |
373 | } | |
ebb945a9 BS |
374 | |
375 | ret = nouveau_ttm_global_init(drm); | |
376 | if (ret) | |
377 | return ret; | |
378 | ||
379 | ret = ttm_bo_device_init(&drm->ttm.bdev, | |
380 | drm->ttm.bo_global_ref.ref.object, | |
44d847b7 DH |
381 | &nouveau_bo_driver, |
382 | dev->anon_inode->i_mapping, | |
383 | DRM_FILE_PAGE_OFFSET, | |
ebb945a9 BS |
384 | bits <= 32 ? true : false); |
385 | if (ret) { | |
386 | NV_ERROR(drm, "error initialising bo driver, %d\n", ret); | |
387 | return ret; | |
388 | } | |
389 | ||
390 | /* VRAM init */ | |
967e7bde BS |
391 | drm->gem.vram_available = nvkm_fb(&drm->device)->ram->size; |
392 | drm->gem.vram_available -= nvkm_instmem(&drm->device)->reserved; | |
ebb945a9 BS |
393 | |
394 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, | |
395 | drm->gem.vram_available >> PAGE_SHIFT); | |
396 | if (ret) { | |
397 | NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); | |
398 | return ret; | |
399 | } | |
400 | ||
967e7bde BS |
401 | drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1), |
402 | nv_device_resource_len(nvkm_device(&drm->device), 1)); | |
ebb945a9 BS |
403 | |
404 | /* GART init */ | |
405 | if (drm->agp.stat != ENABLED) { | |
967e7bde | 406 | drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit; |
ebb945a9 BS |
407 | } else { |
408 | drm->gem.gart_available = drm->agp.size; | |
409 | } | |
410 | ||
411 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, | |
412 | drm->gem.gart_available >> PAGE_SHIFT); | |
413 | if (ret) { | |
414 | NV_ERROR(drm, "GART mm init failed, %d\n", ret); | |
415 | return ret; | |
416 | } | |
417 | ||
418 | NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); | |
419 | NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); | |
420 | return 0; | |
421 | } | |
422 | ||
423 | void | |
424 | nouveau_ttm_fini(struct nouveau_drm *drm) | |
425 | { | |
426 | mutex_lock(&drm->dev->struct_mutex); | |
427 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); | |
428 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); | |
429 | mutex_unlock(&drm->dev->struct_mutex); | |
430 | ||
431 | ttm_bo_device_release(&drm->ttm.bdev); | |
432 | ||
433 | nouveau_ttm_global_release(drm); | |
434 | ||
247d36d7 AL |
435 | arch_phys_wc_del(drm->ttm.mtrr); |
436 | drm->ttm.mtrr = 0; | |
6ee73861 | 437 | } |