]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/nouveau/nve0_fifo.c
UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nve0_fifo.c
CommitLineData
5132f377
BS
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
760285e7 25#include <drm/drmP.h>
5132f377
BS
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
c420b2dc 29#include "nouveau_fifo.h"
5132f377
BS
30
31#define NVE0_FIFO_ENGINE_NUM 32
32
33static void nve0_fifo_isr(struct drm_device *);
34
35struct nve0_fifo_engine {
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nve0_fifo_priv {
c420b2dc 41 struct nouveau_fifo_priv base;
5132f377
BS
42 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43 struct {
44 struct nouveau_gpuobj *mem;
45 struct nouveau_vma bar;
46 } user;
47 int spoon_nr;
48};
49
50struct nve0_fifo_chan {
c420b2dc 51 struct nouveau_fifo_chan base;
5132f377
BS
52 u32 engine;
53};
54
55static void
56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
c420b2dc 60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
5132f377
BS
61 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur;
63 u32 match = (engine << 16) | 0x00000001;
64 int ret, i, p;
65
66 cur = peng->playlist[peng->cur_playlist];
67 if (unlikely(cur == NULL)) {
68 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 if (ret) {
70 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71 return;
72 }
73
74 peng->playlist[peng->cur_playlist] = cur;
75 }
76
77 peng->cur_playlist = !peng->cur_playlist;
78
c420b2dc 79 for (i = 0, p = 0; i < priv->base.channels; i++) {
5132f377
BS
80 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81 if (ctrl != match)
82 continue;
83 nv_wo32(cur, p + 0, i);
84 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8;
86 }
87 pinstmem->flush(dev);
88
89 nv_wr32(dev, 0x002270, cur->vinst >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93}
94
c420b2dc
BS
95static int
96nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
5132f377
BS
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
c420b2dc
BS
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx;
5132f377
BS
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
c420b2dc 105 int ret = 0, i;
5132f377 106
c420b2dc
BS
107 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108 if (!fctx)
5132f377 109 return -ENOMEM;
c420b2dc
BS
110
111 fctx->engine = 0; /* PGRAPH */
5132f377
BS
112
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
116 if (!chan->user) {
117 ret = -ENOMEM;
118 goto error;
119 }
120
c420b2dc
BS
121 for (i = 0; i < 0x100; i += 4)
122 nv_wo32(chan->ramin, i, 0x00000000);
123 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125 nv_wo32(chan->ramin, 0x10, 0x0000face);
126 nv_wo32(chan->ramin, 0x30, 0xfffff902);
127 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
5132f377 129 upper_32_bits(ib_virt));
c420b2dc
BS
130 nv_wo32(chan->ramin, 0x84, 0x20400000);
131 nv_wo32(chan->ramin, 0x94, 0x30000001);
132 nv_wo32(chan->ramin, 0x9c, 0x00000100);
133 nv_wo32(chan->ramin, 0xac, 0x0000001f);
134 nv_wo32(chan->ramin, 0xe4, 0x00000000);
135 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
5132f377
BS
138 pinstmem->flush(dev);
139
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
c420b2dc 143 nve0_fifo_playlist_update(dev, fctx->engine);
5132f377 144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
5132f377
BS
145
146error:
c420b2dc
BS
147 if (ret)
148 priv->base.base.context_del(chan, engine);
5132f377
BS
149 return ret;
150}
151
c420b2dc
BS
152static void
153nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
5132f377 154{
c420b2dc 155 struct nve0_fifo_chan *fctx = chan->engctx[engine];
5132f377
BS
156 struct drm_device *dev = chan->dev;
157
5132f377
BS
158 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159 nv_wr32(dev, 0x002634, chan->id);
160 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
c420b2dc 162 nve0_fifo_playlist_update(dev, fctx->engine);
5132f377
BS
163 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165 if (chan->user) {
166 iounmap(chan->user);
167 chan->user = NULL;
168 }
169
c420b2dc
BS
170 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171 kfree(fctx);
5132f377
BS
172}
173
174static int
c420b2dc 175nve0_fifo_init(struct drm_device *dev, int engine)
5132f377
BS
176{
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
c420b2dc
BS
178 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179 struct nve0_fifo_chan *fctx;
180 int i;
5132f377
BS
181
182 /* reset PFIFO, enable all available PSUBFIFO areas */
183 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185 nv_wr32(dev, 0x000204, 0xffffffff);
186
187 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190 /* PSUBFIFO[n] */
191 for (i = 0; i < priv->spoon_nr; i++) {
192 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195 }
196
197 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199 nv_wr32(dev, 0x002a00, 0xffffffff);
200 nv_wr32(dev, 0x002100, 0xffffffff);
201 nv_wr32(dev, 0x002140, 0xbfffffff);
202
203 /* restore PFIFO context table */
c420b2dc
BS
204 for (i = 0; i < priv->base.channels; i++) {
205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206 if (!chan || !(fctx = chan->engctx[engine]))
5132f377 207 continue;
5132f377
BS
208
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
c420b2dc 212 nve0_fifo_playlist_update(dev, fctx->engine);
5132f377
BS
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214 }
215
216 return 0;
217}
218
c420b2dc
BS
219static int
220nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221{
222 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223 int i;
224
225 for (i = 0; i < priv->base.channels; i++) {
226 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227 continue;
228
229 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230 nv_wr32(dev, 0x002634, i);
231 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233 i, nv_rd32(dev, 0x002634));
234 return -EBUSY;
235 }
236 }
237
238 nv_wr32(dev, 0x002140, 0x00000000);
239 return 0;
240}
241
5132f377
BS
242struct nouveau_enum nve0_fifo_fault_unit[] = {
243 {}
244};
245
246struct nouveau_enum nve0_fifo_fault_reason[] = {
247 { 0x00, "PT_NOT_PRESENT" },
248 { 0x01, "PT_TOO_SHORT" },
249 { 0x02, "PAGE_NOT_PRESENT" },
250 { 0x03, "VM_LIMIT_EXCEEDED" },
251 { 0x04, "NO_CHANNEL" },
252 { 0x05, "PAGE_SYSTEM_ONLY" },
253 { 0x06, "PAGE_READ_ONLY" },
254 { 0x0a, "COMPRESSED_SYSRAM" },
255 { 0x0c, "INVALID_STORAGE_TYPE" },
256 {}
257};
258
259struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260 {}
261};
262
263struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264 {}
265};
266
267struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268 { 0x00200000, "ILLEGAL_MTHD" },
269 { 0x00800000, "EMPTY_SUBC" },
270 {}
271};
272
273static void
274nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275{
276 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280 u32 client = (stat & 0x00001f00) >> 8;
281
282 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285 printk("] from ");
286 nouveau_enum_print(nve0_fifo_fault_unit, unit);
287 if (stat & 0x00000040) {
288 printk("/");
289 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290 } else {
291 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293 }
294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295}
296
e2b34fa0
BS
297static int
298nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
299{
300 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302 struct nouveau_channel *chan = NULL;
303 unsigned long flags;
304 int ret = -EINVAL;
305
306 spin_lock_irqsave(&dev_priv->channels.lock, flags);
307 if (likely(chid >= 0 && chid < priv->base.channels)) {
308 chan = dev_priv->channels.ptr[chid];
309 if (likely(chan))
310 ret = nouveau_finish_page_flip(chan, NULL);
311 }
312 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
313 return ret;
314}
315
5132f377
BS
316static void
317nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
318{
319 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
320 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
321 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
322 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
323 u32 subc = (addr & 0x00070000);
324 u32 mthd = (addr & 0x00003ffc);
e2b34fa0
BS
325 u32 show = stat;
326
327 if (stat & 0x00200000) {
328 if (mthd == 0x0054) {
329 if (!nve0_fifo_page_flip(dev, chid))
330 show &= ~0x00200000;
331 }
332 }
5132f377 333
e2b34fa0
BS
334 if (show) {
335 NV_INFO(dev, "PFIFO%d:", unit);
336 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
337 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
338 unit, chid, subc, mthd, data);
339 }
5132f377
BS
340
341 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
342 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
343}
344
345static void
346nve0_fifo_isr(struct drm_device *dev)
347{
833dd822
BS
348 u32 mask = nv_rd32(dev, 0x002140);
349 u32 stat = nv_rd32(dev, 0x002100) & mask;
5132f377
BS
350
351 if (stat & 0x00000100) {
352 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
353 nv_wr32(dev, 0x002100, 0x00000100);
354 stat &= ~0x00000100;
355 }
356
357 if (stat & 0x10000000) {
358 u32 units = nv_rd32(dev, 0x00259c);
359 u32 u = units;
360
361 while (u) {
362 int i = ffs(u) - 1;
363 nve0_fifo_isr_vm_fault(dev, i);
364 u &= ~(1 << i);
365 }
366
367 nv_wr32(dev, 0x00259c, units);
368 stat &= ~0x10000000;
369 }
370
371 if (stat & 0x20000000) {
372 u32 units = nv_rd32(dev, 0x0025a0);
373 u32 u = units;
374
375 while (u) {
376 int i = ffs(u) - 1;
377 nve0_fifo_isr_subfifo_intr(dev, i);
378 u &= ~(1 << i);
379 }
380
381 nv_wr32(dev, 0x0025a0, units);
382 stat &= ~0x20000000;
383 }
384
385 if (stat & 0x40000000) {
386 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
387 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
388 stat &= ~0x40000000;
389 }
390
391 if (stat) {
392 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
393 nv_wr32(dev, 0x002100, stat);
394 nv_wr32(dev, 0x002140, 0);
395 }
396}
c420b2dc
BS
397
398static void
399nve0_fifo_destroy(struct drm_device *dev, int engine)
400{
401 struct drm_nouveau_private *dev_priv = dev->dev_private;
402 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
403 int i;
404
405 nouveau_vm_put(&priv->user.bar);
406 nouveau_gpuobj_ref(NULL, &priv->user.mem);
407
408 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
409 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
410 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
411 }
412
413 dev_priv->eng[engine] = NULL;
414 kfree(priv);
415}
416
417int
418nve0_fifo_create(struct drm_device *dev)
419{
420 struct drm_nouveau_private *dev_priv = dev->dev_private;
421 struct nve0_fifo_priv *priv;
422 int ret;
423
424 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
425 if (!priv)
426 return -ENOMEM;
427
428 priv->base.base.destroy = nve0_fifo_destroy;
429 priv->base.base.init = nve0_fifo_init;
430 priv->base.base.fini = nve0_fifo_fini;
431 priv->base.base.context_new = nve0_fifo_context_new;
432 priv->base.base.context_del = nve0_fifo_context_del;
433 priv->base.channels = 4096;
434 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
435
436 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
437 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
438 if (ret)
439 goto error;
440
441 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
442 12, NV_MEM_ACCESS_RW, &priv->user.bar);
443 if (ret)
444 goto error;
445
446 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
447
448 nouveau_irq_register(dev, 8, nve0_fifo_isr);
449error:
450 if (ret)
451 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
452 return ret;
453}