2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be
{
11 struct ttm_backend backend
;
12 struct drm_device
*dev
;
22 nouveau_sgdma_populate(struct ttm_backend
*be
, unsigned long num_pages
,
23 struct page
**pages
, struct page
*dummy_read_page
)
25 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
26 struct drm_device
*dev
= nvbe
->dev
;
28 NV_DEBUG(nvbe
->dev
, "num_pages = %ld\n", num_pages
);
33 nvbe
->pages
= kmalloc(sizeof(dma_addr_t
) * num_pages
, GFP_KERNEL
);
39 nvbe
->pages
[nvbe
->nr_pages
] =
40 pci_map_page(dev
->pdev
, pages
[nvbe
->nr_pages
], 0,
41 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
42 if (pci_dma_mapping_error(dev
->pdev
,
43 nvbe
->pages
[nvbe
->nr_pages
])) {
55 nouveau_sgdma_clear(struct ttm_backend
*be
)
57 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
58 struct drm_device
*dev
;
60 if (nvbe
&& nvbe
->pages
) {
67 while (nvbe
->nr_pages
--) {
68 pci_unmap_page(dev
->pdev
, nvbe
->pages
[nvbe
->nr_pages
],
69 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
77 static inline unsigned
78 nouveau_sgdma_pte(struct drm_device
*dev
, uint64_t offset
)
80 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
81 unsigned pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
);
83 if (dev_priv
->card_type
< NV_50
)
90 nouveau_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
92 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
93 struct drm_device
*dev
= nvbe
->dev
;
94 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
95 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
98 NV_DEBUG(dev
, "pg=0x%lx\n", mem
->mm_node
->start
);
100 dev_priv
->engine
.instmem
.prepare_access(nvbe
->dev
, true);
101 pte
= nouveau_sgdma_pte(nvbe
->dev
, mem
->mm_node
->start
<< PAGE_SHIFT
);
102 nvbe
->pte_start
= pte
;
103 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
104 dma_addr_t dma_offset
= nvbe
->pages
[i
];
105 uint32_t offset_l
= lower_32_bits(dma_offset
);
106 uint32_t offset_h
= upper_32_bits(dma_offset
);
108 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++) {
109 if (dev_priv
->card_type
< NV_50
)
110 nv_wo32(dev
, gpuobj
, pte
++, offset_l
| 3);
112 nv_wo32(dev
, gpuobj
, pte
++, offset_l
| 0x21);
113 nv_wo32(dev
, gpuobj
, pte
++, offset_h
& 0xff);
116 dma_offset
+= NV_CTXDMA_PAGE_SIZE
;
119 dev_priv
->engine
.instmem
.finish_access(nvbe
->dev
);
121 if (dev_priv
->card_type
== NV_50
) {
122 nv_wr32(dev
, 0x100c80, 0x00050001);
123 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
124 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
125 NV_ERROR(dev
, "0x100c80 = 0x%08x\n",
126 nv_rd32(dev
, 0x100c80));
130 nv_wr32(dev
, 0x100c80, 0x00000001);
131 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
132 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
133 NV_ERROR(dev
, "0x100c80 = 0x%08x\n",
134 nv_rd32(dev
, 0x100c80));
144 nouveau_sgdma_unbind(struct ttm_backend
*be
)
146 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
147 struct drm_device
*dev
= nvbe
->dev
;
148 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
149 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
157 dev_priv
->engine
.instmem
.prepare_access(nvbe
->dev
, true);
158 pte
= nvbe
->pte_start
;
159 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
160 dma_addr_t dma_offset
= dev_priv
->gart_info
.sg_dummy_bus
;
162 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++) {
163 if (dev_priv
->card_type
< NV_50
)
164 nv_wo32(dev
, gpuobj
, pte
++, dma_offset
| 3);
166 nv_wo32(dev
, gpuobj
, pte
++, dma_offset
| 0x21);
167 nv_wo32(dev
, gpuobj
, pte
++, 0x00000000);
170 dma_offset
+= NV_CTXDMA_PAGE_SIZE
;
173 dev_priv
->engine
.instmem
.finish_access(nvbe
->dev
);
180 nouveau_sgdma_destroy(struct ttm_backend
*be
)
182 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
185 NV_DEBUG(nvbe
->dev
, "\n");
195 static struct ttm_backend_func nouveau_sgdma_backend
= {
196 .populate
= nouveau_sgdma_populate
,
197 .clear
= nouveau_sgdma_clear
,
198 .bind
= nouveau_sgdma_bind
,
199 .unbind
= nouveau_sgdma_unbind
,
200 .destroy
= nouveau_sgdma_destroy
204 nouveau_sgdma_init_ttm(struct drm_device
*dev
)
206 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
207 struct nouveau_sgdma_be
*nvbe
;
209 if (!dev_priv
->gart_info
.sg_ctxdma
)
212 nvbe
= kzalloc(sizeof(*nvbe
), GFP_KERNEL
);
218 nvbe
->backend
.func
= &nouveau_sgdma_backend
;
220 return &nvbe
->backend
;
224 nouveau_sgdma_init(struct drm_device
*dev
)
226 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
227 struct nouveau_gpuobj
*gpuobj
= NULL
;
228 uint32_t aper_size
, obj_size
;
231 if (dev_priv
->card_type
< NV_50
) {
232 aper_size
= (64 * 1024 * 1024);
233 obj_size
= (aper_size
>> NV_CTXDMA_PAGE_SHIFT
) * 4;
234 obj_size
+= 8; /* ctxdma header */
236 /* 1 entire VM page table */
237 aper_size
= (512 * 1024 * 1024);
238 obj_size
= (aper_size
>> NV_CTXDMA_PAGE_SHIFT
) * 8;
241 ret
= nouveau_gpuobj_new(dev
, NULL
, obj_size
, 16,
242 NVOBJ_FLAG_ALLOW_NO_REFS
|
243 NVOBJ_FLAG_ZERO_ALLOC
|
244 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
246 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
250 dev_priv
->gart_info
.sg_dummy_page
=
251 alloc_page(GFP_KERNEL
|__GFP_DMA32
);
252 set_bit(PG_locked
, &dev_priv
->gart_info
.sg_dummy_page
->flags
);
253 dev_priv
->gart_info
.sg_dummy_bus
=
254 pci_map_page(dev
->pdev
, dev_priv
->gart_info
.sg_dummy_page
, 0,
255 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
257 dev_priv
->engine
.instmem
.prepare_access(dev
, true);
258 if (dev_priv
->card_type
< NV_50
) {
259 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
260 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
262 nv_wo32(dev
, gpuobj
, 0, NV_CLASS_DMA_IN_MEMORY
|
263 (1 << 12) /* PT present */ |
264 (0 << 13) /* PT *not* linear */ |
265 (NV_DMA_ACCESS_RW
<< 14) |
266 (NV_DMA_TARGET_PCI
<< 16));
267 nv_wo32(dev
, gpuobj
, 1, aper_size
- 1);
268 for (i
= 2; i
< 2 + (aper_size
>> 12); i
++) {
269 nv_wo32(dev
, gpuobj
, i
,
270 dev_priv
->gart_info
.sg_dummy_bus
| 3);
273 for (i
= 0; i
< obj_size
; i
+= 8) {
274 nv_wo32(dev
, gpuobj
, (i
+0)/4,
275 dev_priv
->gart_info
.sg_dummy_bus
| 0x21);
276 nv_wo32(dev
, gpuobj
, (i
+4)/4, 0);
279 dev_priv
->engine
.instmem
.finish_access(dev
);
281 dev_priv
->gart_info
.type
= NOUVEAU_GART_SGDMA
;
282 dev_priv
->gart_info
.aper_base
= 0;
283 dev_priv
->gart_info
.aper_size
= aper_size
;
284 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
289 nouveau_sgdma_takedown(struct drm_device
*dev
)
291 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
293 if (dev_priv
->gart_info
.sg_dummy_page
) {
294 pci_unmap_page(dev
->pdev
, dev_priv
->gart_info
.sg_dummy_bus
,
295 NV_CTXDMA_PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
296 unlock_page(dev_priv
->gart_info
.sg_dummy_page
);
297 __free_page(dev_priv
->gart_info
.sg_dummy_page
);
298 dev_priv
->gart_info
.sg_dummy_page
= NULL
;
299 dev_priv
->gart_info
.sg_dummy_bus
= 0;
302 nouveau_gpuobj_del(dev
, &dev_priv
->gart_info
.sg_ctxdma
);
306 nouveau_sgdma_get_page(struct drm_device
*dev
, uint32_t offset
, uint32_t *page
)
308 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
309 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
310 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
313 pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
);
314 if (dev_priv
->card_type
< NV_50
) {
315 instmem
->prepare_access(dev
, false);
316 *page
= nv_ro32(dev
, gpuobj
, (pte
+ 2)) & ~NV_CTXDMA_PAGE_MASK
;
317 instmem
->finish_access(dev
);
321 NV_ERROR(dev
, "Unimplemented on NV50\n");