2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be
{
11 struct ttm_backend backend
;
12 struct drm_device
*dev
;
23 nouveau_sgdma_populate(struct ttm_backend
*be
, unsigned long num_pages
,
24 struct page
**pages
, struct page
*dummy_read_page
,
25 dma_addr_t
*dma_addrs
)
27 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
28 struct drm_device
*dev
= nvbe
->dev
;
31 NV_DEBUG(nvbe
->dev
, "num_pages = %ld\n", num_pages
);
33 nvbe
->pages
= dma_addrs
;
34 nvbe
->nr_pages
= num_pages
;
35 nvbe
->unmap_pages
= true;
37 /* this code path isn't called and is incorrect anyways */
38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39 nvbe
->unmap_pages
= false;
43 for (i
= 0; i
< num_pages
; i
++) {
44 nvbe
->pages
[i
] = pci_map_page(dev
->pdev
, pages
[i
], 0,
45 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
46 if (pci_dma_mapping_error(dev
->pdev
, nvbe
->pages
[i
])) {
57 nouveau_sgdma_clear(struct ttm_backend
*be
)
59 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
60 struct drm_device
*dev
= nvbe
->dev
;
65 if (nvbe
->unmap_pages
) {
66 while (nvbe
->nr_pages
--) {
67 pci_unmap_page(dev
->pdev
, nvbe
->pages
[nvbe
->nr_pages
],
68 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
70 nvbe
->unmap_pages
= false;
77 nouveau_sgdma_destroy(struct ttm_backend
*be
)
79 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
82 NV_DEBUG(nvbe
->dev
, "\n");
93 nv04_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
95 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
96 struct drm_device
*dev
= nvbe
->dev
;
97 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
98 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
101 NV_DEBUG(dev
, "pg=0x%lx\n", mem
->start
);
103 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
104 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
105 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
106 dma_addr_t dma_offset
= nvbe
->pages
[i
];
107 uint32_t offset_l
= lower_32_bits(dma_offset
);
109 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++) {
110 nv_wo32(gpuobj
, (pte
* 4) + 0, offset_l
| 3);
111 offset_l
+= NV_CTXDMA_PAGE_SIZE
;
120 nv04_sgdma_unbind(struct ttm_backend
*be
)
122 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
123 struct drm_device
*dev
= nvbe
->dev
;
124 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
125 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
133 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
134 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
135 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++)
136 nv_wo32(gpuobj
, (pte
* 4) + 0, 0x00000000);
143 static struct ttm_backend_func nv04_sgdma_backend
= {
144 .populate
= nouveau_sgdma_populate
,
145 .clear
= nouveau_sgdma_clear
,
146 .bind
= nv04_sgdma_bind
,
147 .unbind
= nv04_sgdma_unbind
,
148 .destroy
= nouveau_sgdma_destroy
152 nv41_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
154 struct drm_device
*dev
= nvbe
->dev
;
156 nv_wr32(dev
, 0x100810, 0x00000022);
157 if (!nv_wait(dev
, 0x100810, 0x00000100, 0x00000100))
158 NV_ERROR(dev
, "vm flush timeout: 0x%08x\n",
159 nv_rd32(dev
, 0x100810));
160 nv_wr32(dev
, 0x100810, 0x00000000);
164 nv41_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
166 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
167 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
168 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
169 dma_addr_t
*list
= nvbe
->pages
;
170 u32 pte
= mem
->start
<< 2;
171 u32 cnt
= nvbe
->nr_pages
;
173 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
176 nv_wo32(pgt
, pte
, (*list
++ >> 7) | 1);
180 nv41_sgdma_flush(nvbe
);
186 nv41_sgdma_unbind(struct ttm_backend
*be
)
188 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
189 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
190 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
191 u32 pte
= (nvbe
->offset
>> 12) << 2;
192 u32 cnt
= nvbe
->nr_pages
;
195 nv_wo32(pgt
, pte
, 0x00000000);
199 nv41_sgdma_flush(nvbe
);
204 static struct ttm_backend_func nv41_sgdma_backend
= {
205 .populate
= nouveau_sgdma_populate
,
206 .clear
= nouveau_sgdma_clear
,
207 .bind
= nv41_sgdma_bind
,
208 .unbind
= nv41_sgdma_unbind
,
209 .destroy
= nouveau_sgdma_destroy
213 nv44_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
215 struct drm_device
*dev
= nvbe
->dev
;
217 nv_wr32(dev
, 0x100814, (nvbe
->nr_pages
- 1) << 12);
218 nv_wr32(dev
, 0x100808, nvbe
->offset
| 0x20);
219 if (!nv_wait(dev
, 0x100808, 0x00000001, 0x00000001))
220 NV_ERROR(dev
, "gart flush timeout: 0x%08x\n",
221 nv_rd32(dev
, 0x100808));
222 nv_wr32(dev
, 0x100808, 0x00000000);
226 nv44_sgdma_fill(struct nouveau_gpuobj
*pgt
, dma_addr_t
*list
, u32 base
, u32 cnt
)
228 struct drm_nouveau_private
*dev_priv
= pgt
->dev
->dev_private
;
229 dma_addr_t dummy
= dev_priv
->gart_info
.dummy
.addr
;
235 tmp
[0] = nv_ro32(pgt
, base
+ 0x0);
236 tmp
[1] = nv_ro32(pgt
, base
+ 0x4);
237 tmp
[2] = nv_ro32(pgt
, base
+ 0x8);
238 tmp
[3] = nv_ro32(pgt
, base
+ 0xc);
240 u32 addr
= list
? (*list
++ >> 12) : (dummy
>> 12);
241 switch (pte
++ & 0x3) {
243 tmp
[0] &= ~0x07ffffff;
247 tmp
[0] &= ~0xf8000000;
248 tmp
[0] |= addr
<< 27;
249 tmp
[1] &= ~0x003fffff;
253 tmp
[1] &= ~0xffc00000;
254 tmp
[1] |= addr
<< 22;
255 tmp
[2] &= ~0x0001ffff;
256 tmp
[2] |= addr
>> 10;
259 tmp
[2] &= ~0xfffe0000;
260 tmp
[2] |= addr
<< 17;
261 tmp
[3] &= ~0x00000fff;
262 tmp
[3] |= addr
>> 15;
267 tmp
[3] |= 0x40000000;
269 nv_wo32(pgt
, base
+ 0x0, tmp
[0]);
270 nv_wo32(pgt
, base
+ 0x4, tmp
[1]);
271 nv_wo32(pgt
, base
+ 0x8, tmp
[2]);
272 nv_wo32(pgt
, base
+ 0xc, tmp
[3]);
276 nv44_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
278 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
279 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
280 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
281 dma_addr_t
*list
= nvbe
->pages
;
282 u32 pte
= mem
->start
<< 2, tmp
[4];
283 u32 cnt
= nvbe
->nr_pages
;
286 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
288 if (pte
& 0x0000000c) {
289 u32 max
= 4 - ((pte
>> 2) & 0x3);
290 u32 part
= (cnt
> max
) ? max
: cnt
;
291 nv44_sgdma_fill(pgt
, list
, pte
, part
);
298 for (i
= 0; i
< 4; i
++)
299 tmp
[i
] = *list
++ >> 12;
300 nv_wo32(pgt
, pte
+ 0x0, tmp
[0] >> 0 | tmp
[1] << 27);
301 nv_wo32(pgt
, pte
+ 0x4, tmp
[1] >> 5 | tmp
[2] << 22);
302 nv_wo32(pgt
, pte
+ 0x8, tmp
[2] >> 10 | tmp
[3] << 17);
303 nv_wo32(pgt
, pte
+ 0xc, tmp
[3] >> 15 | 0x40000000);
309 nv44_sgdma_fill(pgt
, list
, pte
, cnt
);
311 nv44_sgdma_flush(nvbe
);
317 nv44_sgdma_unbind(struct ttm_backend
*be
)
319 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
320 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
321 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
322 u32 pte
= (nvbe
->offset
>> 12) << 2;
323 u32 cnt
= nvbe
->nr_pages
;
325 if (pte
& 0x0000000c) {
326 u32 max
= 4 - ((pte
>> 2) & 0x3);
327 u32 part
= (cnt
> max
) ? max
: cnt
;
328 nv44_sgdma_fill(pgt
, NULL
, pte
, part
);
334 nv_wo32(pgt
, pte
+ 0x0, 0x00000000);
335 nv_wo32(pgt
, pte
+ 0x4, 0x00000000);
336 nv_wo32(pgt
, pte
+ 0x8, 0x00000000);
337 nv_wo32(pgt
, pte
+ 0xc, 0x00000000);
343 nv44_sgdma_fill(pgt
, NULL
, pte
, cnt
);
345 nv44_sgdma_flush(nvbe
);
350 static struct ttm_backend_func nv44_sgdma_backend
= {
351 .populate
= nouveau_sgdma_populate
,
352 .clear
= nouveau_sgdma_clear
,
353 .bind
= nv44_sgdma_bind
,
354 .unbind
= nv44_sgdma_unbind
,
355 .destroy
= nouveau_sgdma_destroy
359 nv50_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
361 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
362 struct nouveau_mem
*node
= mem
->mm_node
;
363 /* noop: bound in move_notify() */
364 node
->pages
= nvbe
->pages
;
365 nvbe
->pages
= (dma_addr_t
*)node
;
371 nv50_sgdma_unbind(struct ttm_backend
*be
)
373 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
374 struct nouveau_mem
*node
= (struct nouveau_mem
*)nvbe
->pages
;
375 /* noop: unbound in move_notify() */
376 nvbe
->pages
= node
->pages
;
382 static struct ttm_backend_func nv50_sgdma_backend
= {
383 .populate
= nouveau_sgdma_populate
,
384 .clear
= nouveau_sgdma_clear
,
385 .bind
= nv50_sgdma_bind
,
386 .unbind
= nv50_sgdma_unbind
,
387 .destroy
= nouveau_sgdma_destroy
391 nouveau_sgdma_init_ttm(struct drm_device
*dev
)
393 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
394 struct nouveau_sgdma_be
*nvbe
;
396 nvbe
= kzalloc(sizeof(*nvbe
), GFP_KERNEL
);
402 nvbe
->backend
.func
= dev_priv
->gart_info
.func
;
403 return &nvbe
->backend
;
407 nouveau_sgdma_init(struct drm_device
*dev
)
409 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
410 struct nouveau_gpuobj
*gpuobj
= NULL
;
411 u32 aper_size
, align
;
414 if (dev_priv
->card_type
>= NV_40
&& pci_is_pcie(dev
->pdev
))
415 aper_size
= 512 * 1024 * 1024;
417 aper_size
= 64 * 1024 * 1024;
419 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
420 * christmas. The cards before it have them, the cards after
421 * it have them, why is NV44 so unloved?
423 dev_priv
->gart_info
.dummy
.page
= alloc_page(GFP_DMA32
| GFP_KERNEL
);
424 if (!dev_priv
->gart_info
.dummy
.page
)
427 dev_priv
->gart_info
.dummy
.addr
=
428 pci_map_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.page
,
429 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
430 if (pci_dma_mapping_error(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
)) {
431 NV_ERROR(dev
, "error mapping dummy page\n");
432 __free_page(dev_priv
->gart_info
.dummy
.page
);
433 dev_priv
->gart_info
.dummy
.page
= NULL
;
437 if (dev_priv
->card_type
>= NV_50
) {
438 dev_priv
->gart_info
.aper_base
= 0;
439 dev_priv
->gart_info
.aper_size
= aper_size
;
440 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
441 dev_priv
->gart_info
.func
= &nv50_sgdma_backend
;
443 if (0 && pci_is_pcie(dev
->pdev
) &&
444 dev_priv
->chipset
> 0x40 && dev_priv
->chipset
!= 0x45) {
445 if (nv44_graph_class(dev
)) {
446 dev_priv
->gart_info
.func
= &nv44_sgdma_backend
;
449 dev_priv
->gart_info
.func
= &nv41_sgdma_backend
;
453 ret
= nouveau_gpuobj_new(dev
, NULL
, aper_size
/ 1024, align
,
454 NVOBJ_FLAG_ZERO_ALLOC
|
455 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
457 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
461 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
462 dev_priv
->gart_info
.aper_base
= 0;
463 dev_priv
->gart_info
.aper_size
= aper_size
;
464 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
466 ret
= nouveau_gpuobj_new(dev
, NULL
, (aper_size
/ 1024) + 8, 16,
467 NVOBJ_FLAG_ZERO_ALLOC
|
468 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
470 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
474 nv_wo32(gpuobj
, 0, NV_CLASS_DMA_IN_MEMORY
|
475 (1 << 12) /* PT present */ |
476 (0 << 13) /* PT *not* linear */ |
478 (2 << 16) /* PCI */);
479 nv_wo32(gpuobj
, 4, aper_size
- 1);
481 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
482 dev_priv
->gart_info
.aper_base
= 0;
483 dev_priv
->gart_info
.aper_size
= aper_size
;
484 dev_priv
->gart_info
.type
= NOUVEAU_GART_PDMA
;
485 dev_priv
->gart_info
.func
= &nv04_sgdma_backend
;
492 nouveau_sgdma_takedown(struct drm_device
*dev
)
494 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
496 nouveau_gpuobj_ref(NULL
, &dev_priv
->gart_info
.sg_ctxdma
);
498 if (dev_priv
->gart_info
.dummy
.page
) {
499 pci_unmap_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
,
500 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
501 __free_page(dev_priv
->gart_info
.dummy
.page
);
502 dev_priv
->gart_info
.dummy
.page
= NULL
;
507 nouveau_sgdma_get_physical(struct drm_device
*dev
, uint32_t offset
)
509 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
510 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
511 int pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
513 BUG_ON(dev_priv
->card_type
>= NV_50
);
515 return (nv_ro32(gpuobj
, 4 * pte
) & ~NV_CTXDMA_PAGE_MASK
) |
516 (offset
& NV_CTXDMA_PAGE_MASK
);