]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/nouveau/nv50_evo.c
drm/nouveau: port remainder of drm code, and rip out compat layer
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / nouveau / nv50_evo.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include "drmP.h"
26
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nv50_display.h"
30
31 #include <core/gpuobj.h>
32
33 #include <subdev/timer.h>
34 #include <subdev/fb.h>
35
36 static u32
37 nv50_evo_rd32(struct nouveau_object *object, u32 addr)
38 {
39 void __iomem *iomem = object->oclass->ofuncs->rd08;
40 return ioread32_native(iomem + addr);
41 }
42
43 static void
44 nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
45 {
46 void __iomem *iomem = object->oclass->ofuncs->rd08;
47 iowrite32_native(data, iomem + addr);
48 }
49
50 static void
51 nv50_evo_channel_del(struct nouveau_channel **pevo)
52 {
53 struct nouveau_channel *evo = *pevo;
54
55 if (!evo)
56 return;
57 *pevo = NULL;
58
59 nouveau_bo_unmap(evo->push.buffer);
60 nouveau_bo_ref(NULL, &evo->push.buffer);
61
62 if (evo->object)
63 iounmap(evo->object->oclass->ofuncs);
64
65 kfree(evo);
66 }
67
68 int
69 nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
70 u64 base, u64 size, struct nouveau_gpuobj **pobj)
71 {
72 struct drm_device *dev = evo->fence;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nv50_display *disp = nv50_display(dev);
75 u32 dmao = disp->dmao;
76 u32 hash = disp->hash;
77 u32 flags5;
78
79 if (nv_device(drm->device)->chipset < 0xc0) {
80 /* not supported on 0x50, specified in format mthd */
81 if (nv_device(drm->device)->chipset == 0x50)
82 memtype = 0;
83 flags5 = 0x00010000;
84 } else {
85 if (memtype & 0x80000000)
86 flags5 = 0x00000000; /* large pages */
87 else
88 flags5 = 0x00020000;
89 }
90
91 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
92 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
93 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
94 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
95 upper_32_bits(base));
96 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
97 nv_wo32(disp->ramin, dmao + 0x14, flags5);
98
99 nv_wo32(disp->ramin, hash + 0x00, handle);
100 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
101 evo->handle);
102
103 disp->dmao += 0x20;
104 disp->hash += 0x08;
105 return 0;
106 }
107
108 static int
109 nv50_evo_channel_new(struct drm_device *dev, int chid,
110 struct nouveau_channel **pevo)
111 {
112 struct nouveau_drm *drm = nouveau_drm(dev);
113 struct nv50_display *disp = nv50_display(dev);
114 struct nouveau_channel *evo;
115 int ret;
116
117 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
118 if (!evo)
119 return -ENOMEM;
120 *pevo = evo;
121
122 evo->drm = drm;
123 evo->handle = chid;
124 evo->fence = dev;
125 evo->user_get = 4;
126 evo->user_put = 0;
127
128 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
129 &evo->push.buffer);
130 if (ret == 0)
131 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
132 if (ret) {
133 NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
134 nv50_evo_channel_del(pevo);
135 return ret;
136 }
137
138 ret = nouveau_bo_map(evo->push.buffer);
139 if (ret) {
140 NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
141 nv50_evo_channel_del(pevo);
142 return ret;
143 }
144
145 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
146 #ifdef NOUVEAU_OBJECT_MAGIC
147 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
148 #endif
149 evo->object->parent = nv_object(disp->ramin)->parent;
150 evo->object->engine = nv_object(disp->ramin)->engine;
151 evo->object->oclass =
152 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
153 evo->object->oclass->ofuncs =
154 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
155 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
156 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
157 evo->object->oclass->ofuncs->rd08 =
158 ioremap(pci_resource_start(dev->pdev, 0) +
159 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
160 return 0;
161 }
162
163 static int
164 nv50_evo_channel_init(struct nouveau_channel *evo)
165 {
166 struct nouveau_drm *drm = evo->drm;
167 struct nouveau_device *device = nv_device(drm->device);
168 int id = evo->handle, ret, i;
169 u64 pushbuf = evo->push.buffer->bo.offset;
170 u32 tmp;
171
172 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
173 if ((tmp & 0x009f0000) == 0x00020000)
174 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
175
176 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
177 if ((tmp & 0x003f0000) == 0x00030000)
178 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
179
180 /* initialise fifo */
181 nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
182 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
183 NV50_PDISPLAY_EVO_DMA_CB_VALID);
184 nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
185 nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
186 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
187 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
188
189 nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
190 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
191 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
192 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
193 NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
194 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
195 return -EBUSY;
196 }
197
198 /* enable error reporting on the channel */
199 nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
200
201 evo->dma.max = (4096/4) - 2;
202 evo->dma.max &= ~7;
203 evo->dma.put = 0;
204 evo->dma.cur = evo->dma.put;
205 evo->dma.free = evo->dma.max - evo->dma.cur;
206
207 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
208 if (ret)
209 return ret;
210
211 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
212 OUT_RING(evo, 0);
213
214 return 0;
215 }
216
217 static void
218 nv50_evo_channel_fini(struct nouveau_channel *evo)
219 {
220 struct nouveau_drm *drm = evo->drm;
221 struct nouveau_device *device = nv_device(drm->device);
222 int id = evo->handle;
223
224 nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
225 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
226 nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
227 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
228 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
229 NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
230 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
231 }
232 }
233
234 void
235 nv50_evo_destroy(struct drm_device *dev)
236 {
237 struct nv50_display *disp = nv50_display(dev);
238 int i;
239
240 for (i = 0; i < 2; i++) {
241 if (disp->crtc[i].sem.bo) {
242 nouveau_bo_unmap(disp->crtc[i].sem.bo);
243 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
244 }
245 nv50_evo_channel_del(&disp->crtc[i].sync);
246 }
247 nv50_evo_channel_del(&disp->master);
248 nouveau_gpuobj_ref(NULL, &disp->ramin);
249 }
250
251 int
252 nv50_evo_create(struct drm_device *dev)
253 {
254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nouveau_fb *pfb = nouveau_fb(drm->device);
256 struct nv50_display *disp = nv50_display(dev);
257 struct nouveau_channel *evo;
258 int ret, i, j;
259
260 /* setup object management on it, any other evo channel will
261 * use this also as there's no per-channel support on the
262 * hardware
263 */
264 ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
265 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
266 if (ret) {
267 NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
268 goto err;
269 }
270
271 disp->hash = 0x0000;
272 disp->dmao = 0x1000;
273
274 /* create primary evo channel, the one we use for modesetting
275 * purporses
276 */
277 ret = nv50_evo_channel_new(dev, 0, &disp->master);
278 if (ret)
279 return ret;
280 evo = disp->master;
281
282 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
283 disp->ramin->addr + 0x2000, 0x1000, NULL);
284 if (ret)
285 goto err;
286
287 /* create some default objects for the scanout memtypes we support */
288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
289 0, pfb->ram.size, NULL);
290 if (ret)
291 goto err;
292
293 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
294 0, pfb->ram.size, NULL);
295 if (ret)
296 goto err;
297
298 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
299 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
300 0, pfb->ram.size, NULL);
301 if (ret)
302 goto err;
303
304 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
305 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
306 0, pfb->ram.size, NULL);
307 if (ret)
308 goto err;
309
310 /* create "display sync" channels and other structures we need
311 * to implement page flipping
312 */
313 for (i = 0; i < 2; i++) {
314 struct nv50_display_crtc *dispc = &disp->crtc[i];
315 u64 offset;
316
317 ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
318 if (ret)
319 goto err;
320
321 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
322 0, 0x0000, NULL, &dispc->sem.bo);
323 if (!ret) {
324 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
325 if (!ret)
326 ret = nouveau_bo_map(dispc->sem.bo);
327 if (ret)
328 nouveau_bo_ref(NULL, &dispc->sem.bo);
329 offset = dispc->sem.bo->bo.offset;
330 }
331
332 if (ret)
333 goto err;
334
335 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
336 offset, 4096, NULL);
337 if (ret)
338 goto err;
339
340 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
341 0, pfb->ram.size, NULL);
342 if (ret)
343 goto err;
344
345 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
346 (nv_device(drm->device)->chipset < 0xc0 ?
347 0x7a : 0xfe),
348 0, pfb->ram.size, NULL);
349 if (ret)
350 goto err;
351
352 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
353 (nv_device(drm->device)->chipset < 0xc0 ?
354 0x70 : 0xfe),
355 0, pfb->ram.size, NULL);
356 if (ret)
357 goto err;
358
359 for (j = 0; j < 4096; j += 4)
360 nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
361 dispc->sem.offset = 0;
362 }
363
364 return 0;
365
366 err:
367 nv50_evo_destroy(dev);
368 return ret;
369 }
370
371 int
372 nv50_evo_init(struct drm_device *dev)
373 {
374 struct nv50_display *disp = nv50_display(dev);
375 int ret, i;
376
377 ret = nv50_evo_channel_init(disp->master);
378 if (ret)
379 return ret;
380
381 for (i = 0; i < 2; i++) {
382 ret = nv50_evo_channel_init(disp->crtc[i].sync);
383 if (ret)
384 return ret;
385 }
386
387 return 0;
388 }
389
390 void
391 nv50_evo_fini(struct drm_device *dev)
392 {
393 struct nv50_display *disp = nv50_display(dev);
394 int i;
395
396 for (i = 0; i < 2; i++) {
397 if (disp->crtc[i].sync)
398 nv50_evo_channel_fini(disp->crtc[i].sync);
399 }
400
401 if (disp->master)
402 nv50_evo_channel_fini(disp->master);
403 }