]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drm/nouveau/bar/gf100: directly use instmem for channel descriptors
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / nv04.c
CommitLineData
6ee73861 1/*
ebb945a9 2 * Copyright 2012 Red Hat Inc.
6ee73861 3 *
ebb945a9
BS
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
6ee73861 10 *
ebb945a9
BS
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
6ee73861 13 *
ebb945a9
BS
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
6ee73861 21 *
ebb945a9 22 * Authors: Ben Skeggs
6ee73861 23 */
05c7145d 24#include "nv04.h"
6ee73861 25
bbf8906b 26#include <core/client.h>
ebb945a9 27#include <core/engctx.h>
ebb945a9 28#include <core/handle.h>
02a841d4 29#include <core/ramht.h>
d8e83994 30#include <subdev/instmem.h>
ebb945a9 31#include <subdev/timer.h>
ebb945a9 32
05c7145d
BS
33#include <nvif/class.h>
34#include <nvif/unpack.h>
ebb945a9
BS
35
36static struct ramfc_desc
37nv04_ramfc[] = {
c420b2dc
BS
38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
46 {}
47};
48
ebb945a9
BS
49/*******************************************************************************
50 * FIFO channel objects
51 ******************************************************************************/
c420b2dc 52
ebb945a9 53int
05c7145d
BS
54nv04_fifo_object_attach(struct nvkm_object *parent,
55 struct nvkm_object *object, u32 handle)
588d7d12 56{
6189f1b0 57 struct nv04_fifo *fifo = (void *)parent->engine;
ebb945a9
BS
58 struct nv04_fifo_chan *chan = (void *)parent;
59 u32 context, chid = chan->base.chid;
60 int ret;
61
62 if (nv_iclass(object, NV_GPUOBJ_CLASS))
63 context = nv_gpuobj(object)->addr >> 4;
64 else
65 context = 0x00000004; /* just non-zero */
66
67 switch (nv_engidx(object->engine)) {
68 case NVDEV_ENGINE_DMAOBJ:
69 case NVDEV_ENGINE_SW:
70 context |= 0x00000000;
71 break;
72 case NVDEV_ENGINE_GR:
73 context |= 0x00010000;
74 break;
75 case NVDEV_ENGINE_MPEG:
76 context |= 0x00020000;
77 break;
78 default:
79 return -EINVAL;
588d7d12
FJ
80 }
81
ebb945a9
BS
82 context |= 0x80000000; /* valid */
83 context |= chid << 24;
84
6189f1b0
BS
85 mutex_lock(&nv_subdev(fifo)->mutex);
86 ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context);
87 mutex_unlock(&nv_subdev(fifo)->mutex);
ebb945a9
BS
88 return ret;
89}
90
91void
05c7145d 92nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
ebb945a9 93{
6189f1b0
BS
94 struct nv04_fifo *fifo = (void *)parent->engine;
95 mutex_lock(&nv_subdev(fifo)->mutex);
96 nvkm_ramht_remove(fifo->ramht, cookie);
97 mutex_unlock(&nv_subdev(fifo)->mutex);
588d7d12
FJ
98}
99
4c2d4222 100int
05c7145d
BS
101nv04_fifo_context_attach(struct nvkm_object *parent,
102 struct nvkm_object *object)
4c2d4222 103{
05c7145d 104 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
4c2d4222
BS
105 return 0;
106}
107
c420b2dc 108static int
05c7145d
BS
109nv04_fifo_chan_ctor(struct nvkm_object *parent,
110 struct nvkm_object *engine,
111 struct nvkm_oclass *oclass, void *data, u32 size,
112 struct nvkm_object **pobject)
6ee73861 113{
bbf8906b
BS
114 union {
115 struct nv03_channel_dma_v0 v0;
116 } *args = data;
6189f1b0 117 struct nv04_fifo *fifo = (void *)engine;
ebb945a9 118 struct nv04_fifo_chan *chan;
6ee73861
BS
119 int ret;
120
53003941 121 nvif_ioctl(parent, "create channel dma size %d\n", size);
bbf8906b 122 if (nvif_unpack(args->v0, 0, 0, false)) {
bf81df9b 123 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
159045cd 124 "offset %08x\n", args->v0.version,
53003941 125 args->v0.pushbuf, args->v0.offset);
bbf8906b
BS
126 } else
127 return ret;
6ee73861 128
05c7145d
BS
129 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
130 0x10000, args->v0.pushbuf,
131 (1ULL << NVDEV_ENGINE_DMAOBJ) |
132 (1ULL << NVDEV_ENGINE_SW) |
133 (1ULL << NVDEV_ENGINE_GR), &chan);
ebb945a9
BS
134 *pobject = nv_object(chan);
135 if (ret)
136 return ret;
70ee6f1c 137
bbf8906b
BS
138 args->v0.chid = chan->base.chid;
139
ebb945a9
BS
140 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
141 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
4c2d4222 142 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
ebb945a9 143 chan->ramfc = chan->base.chid * 32;
6ee73861 144
5444e770
BS
145 nvkm_kmap(fifo->ramfc);
146 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
147 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
148 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
149 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x10,
70ee6f1c
BS
150 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
151 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
c420b2dc 152#ifdef __BIG_ENDIAN
70ee6f1c 153 NV_PFIFO_CACHE1_BIG_ENDIAN |
c420b2dc 154#endif
70ee6f1c 155 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
5444e770 156 nvkm_done(fifo->ramfc);
ebb945a9
BS
157 return 0;
158}
159
160void
05c7145d 161nv04_fifo_chan_dtor(struct nvkm_object *object)
ebb945a9 162{
6189f1b0 163 struct nv04_fifo *fifo = (void *)object->engine;
ebb945a9 164 struct nv04_fifo_chan *chan = (void *)object;
6189f1b0 165 struct ramfc_desc *c = fifo->ramfc_desc;
ff9e5279 166
5444e770 167 nvkm_kmap(fifo->ramfc);
ebb945a9 168 do {
5444e770 169 nvkm_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000);
ebb945a9 170 } while ((++c)->bits);
5444e770 171 nvkm_done(fifo->ramfc);
ebb945a9 172
05c7145d 173 nvkm_fifo_channel_destroy(&chan->base);
ebb945a9 174}
c420b2dc 175
ebb945a9 176int
05c7145d 177nv04_fifo_chan_init(struct nvkm_object *object)
ebb945a9 178{
6189f1b0 179 struct nv04_fifo *fifo = (void *)object->engine;
ebb945a9 180 struct nv04_fifo_chan *chan = (void *)object;
87744403 181 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9
BS
182 u32 mask = 1 << chan->base.chid;
183 unsigned long flags;
184 int ret;
185
05c7145d 186 ret = nvkm_fifo_channel_init(&chan->base);
c420b2dc 187 if (ret)
ebb945a9
BS
188 return ret;
189
6189f1b0 190 spin_lock_irqsave(&fifo->base.lock, flags);
87744403 191 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
6189f1b0 192 spin_unlock_irqrestore(&fifo->base.lock, flags);
ebb945a9 193 return 0;
6ee73861
BS
194}
195
ebb945a9 196int
05c7145d 197nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
6ee73861 198{
6189f1b0 199 struct nv04_fifo *fifo = (void *)object->engine;
ebb945a9 200 struct nv04_fifo_chan *chan = (void *)object;
6189f1b0 201 struct nvkm_gpuobj *fctx = fifo->ramfc;
87744403 202 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 203 struct ramfc_desc *c;
3945e475 204 unsigned long flags;
ebb945a9
BS
205 u32 data = chan->ramfc;
206 u32 chid;
6ee73861 207
c420b2dc 208 /* prevent fifo context switches */
6189f1b0 209 spin_lock_irqsave(&fifo->base.lock, flags);
87744403 210 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
3945e475 211
c420b2dc 212 /* if this channel is active, replace it with a null context */
87744403 213 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
ebb945a9 214 if (chid == chan->base.chid) {
87744403
BS
215 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
216 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
217 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c420b2dc 218
6189f1b0 219 c = fifo->ramfc_desc;
c420b2dc 220 do {
ebb945a9
BS
221 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
222 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
87744403 223 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
5444e770
BS
224 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
225 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
ebb945a9
BS
226 } while ((++c)->bits);
227
6189f1b0 228 c = fifo->ramfc_desc;
ebb945a9 229 do {
87744403 230 nvkm_wr32(device, c->regp, 0x00000000);
c420b2dc
BS
231 } while ((++c)->bits);
232
87744403
BS
233 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
234 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
235 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
236 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
237 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
3945e475
FJ
238 }
239
c420b2dc 240 /* restore normal operation, after disabling dma mode */
87744403
BS
241 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
242 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
6189f1b0 243 spin_unlock_irqrestore(&fifo->base.lock, flags);
ebb945a9 244
05c7145d 245 return nvkm_fifo_channel_fini(&chan->base, suspend);
6ee73861
BS
246}
247
05c7145d 248static struct nvkm_ofuncs
ebb945a9
BS
249nv04_fifo_ofuncs = {
250 .ctor = nv04_fifo_chan_ctor,
251 .dtor = nv04_fifo_chan_dtor,
252 .init = nv04_fifo_chan_init,
253 .fini = nv04_fifo_chan_fini,
05c7145d
BS
254 .map = _nvkm_fifo_channel_map,
255 .rd32 = _nvkm_fifo_channel_rd32,
256 .wr32 = _nvkm_fifo_channel_wr32,
257 .ntfy = _nvkm_fifo_channel_ntfy
ebb945a9
BS
258};
259
05c7145d 260static struct nvkm_oclass
ebb945a9 261nv04_fifo_sclass[] = {
bbf8906b 262 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
ebb945a9
BS
263 {}
264};
265
266/*******************************************************************************
267 * FIFO context - basically just the instmem reserved for the channel
268 ******************************************************************************/
269
6ee73861 270int
05c7145d
BS
271nv04_fifo_context_ctor(struct nvkm_object *parent,
272 struct nvkm_object *engine,
273 struct nvkm_oclass *oclass, void *data, u32 size,
274 struct nvkm_object **pobject)
6ee73861 275{
ebb945a9
BS
276 struct nv04_fifo_base *base;
277 int ret;
6ee73861 278
05c7145d
BS
279 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
280 0x1000, NVOBJ_FLAG_HEAP, &base);
ebb945a9
BS
281 *pobject = nv_object(base);
282 if (ret)
283 return ret;
6ee73861 284
ebb945a9
BS
285 return 0;
286}
6ee73861 287
05c7145d 288static struct nvkm_oclass
ebb945a9
BS
289nv04_fifo_cclass = {
290 .handle = NV_ENGCTX(FIFO, 0x04),
05c7145d 291 .ofuncs = &(struct nvkm_ofuncs) {
ebb945a9 292 .ctor = nv04_fifo_context_ctor,
05c7145d
BS
293 .dtor = _nvkm_fifo_context_dtor,
294 .init = _nvkm_fifo_context_init,
295 .fini = _nvkm_fifo_context_fini,
296 .rd32 = _nvkm_fifo_context_rd32,
297 .wr32 = _nvkm_fifo_context_wr32,
ebb945a9
BS
298 },
299};
6ee73861 300
ebb945a9
BS
301/*******************************************************************************
302 * PFIFO engine
303 ******************************************************************************/
6ee73861 304
ebb945a9 305void
6189f1b0
BS
306nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags)
307__acquires(fifo->base.lock)
ebb945a9 308{
6189f1b0 309 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base);
87744403 310 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 311 unsigned long flags;
6ee73861 312
6189f1b0 313 spin_lock_irqsave(&fifo->base.lock, flags);
ebb945a9
BS
314 *pflags = flags;
315
87744403
BS
316 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
317 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
ebb945a9
BS
318
319 /* in some cases the puller may be left in an inconsistent state
320 * if you try to stop it while it's busy translating handles.
321 * sometimes you get a CACHE_ERROR, sometimes it just fails
322 * silently; sending incorrect instance offsets to PGRAPH after
323 * it's started up again.
324 *
325 * to avoid this, we invalidate the most recently calculated
326 * instance.
327 */
af3082b3
BS
328 nvkm_msec(device, 2000,
329 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
330 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
331 break;
332 );
ebb945a9 333
87744403 334 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
ebb945a9 335 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
87744403 336 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
ebb945a9 337
87744403 338 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
ebb945a9 339}
6ee73861 340
ebb945a9 341void
6189f1b0
BS
342nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags)
343__releases(fifo->base.lock)
ebb945a9 344{
6189f1b0 345 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base);
87744403 346 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 347 unsigned long flags = *pflags;
6ee73861 348
87744403
BS
349 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
350 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
ebb945a9 351
6189f1b0 352 spin_unlock_irqrestore(&fifo->base.lock, flags);
6ee73861
BS
353}
354
ebb945a9
BS
355static const char *
356nv_dma_state_err(u32 state)
5178d40d 357{
ebb945a9
BS
358 static const char * const desc[] = {
359 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
360 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
361 };
362 return desc[(state >> 29) & 0x7];
5178d40d
BS
363}
364
365static bool
6189f1b0 366nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data)
5178d40d 367{
87744403 368 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 369 struct nv04_fifo_chan *chan = NULL;
05c7145d 370 struct nvkm_handle *bind;
5178d40d
BS
371 const int subc = (addr >> 13) & 0x7;
372 const int mthd = addr & 0x1ffc;
373 bool handled = false;
ebb945a9 374 unsigned long flags;
5178d40d
BS
375 u32 engine;
376
6189f1b0
BS
377 spin_lock_irqsave(&fifo->base.lock, flags);
378 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
379 chan = (void *)fifo->base.channel[chid];
5178d40d
BS
380 if (unlikely(!chan))
381 goto out;
382
383 switch (mthd) {
ebb945a9 384 case 0x0000:
05c7145d 385 bind = nvkm_namedb_get(nv_namedb(chan), data);
ebb945a9 386 if (unlikely(!bind))
5178d40d
BS
387 break;
388
ebb945a9
BS
389 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
390 engine = 0x0000000f << (subc * 4);
391 chan->subc[subc] = data;
392 handled = true;
393
87744403 394 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
ebb945a9 395 }
5178d40d 396
05c7145d 397 nvkm_namedb_put(bind);
5178d40d
BS
398 break;
399 default:
87744403 400 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE);
5178d40d
BS
401 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
402 break;
403
05c7145d 404 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
ebb945a9
BS
405 if (likely(bind)) {
406 if (!nv_call(bind->object, mthd, data))
407 handled = true;
05c7145d 408 nvkm_namedb_put(bind);
ebb945a9 409 }
5178d40d
BS
410 break;
411 }
412
413out:
6189f1b0 414 spin_unlock_irqrestore(&fifo->base.lock, flags);
5178d40d
BS
415 return handled;
416}
417
fc10199e 418static void
e5c5e4f5 419nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
fc10199e 420{
e5c5e4f5
BS
421 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
422 struct nvkm_device *device = subdev->device;
fc10199e
MS
423 u32 mthd, data;
424 int ptr;
425
426 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
427 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
428 * show that it wraps around to the start at GET=0x800.. No clue as to
429 * why..
430 */
431 ptr = (get & 0x7ff) >> 2;
432
433 if (device->card_type < NV_40) {
87744403
BS
434 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
435 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
fc10199e 436 } else {
87744403
BS
437 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
438 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
fc10199e
MS
439 }
440
6189f1b0 441 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) {
93260d3c 442 const char *client_name =
6189f1b0 443 nvkm_client_name_for_fifo_chid(&fifo->base, chid);
e5c5e4f5
BS
444 nvkm_error(subdev, "CACHE_ERROR - "
445 "ch %d [%s] subc %d mthd %04x data %08x\n",
446 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
447 data);
fc10199e
MS
448 }
449
87744403
BS
450 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
451 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
fc10199e 452
87744403
BS
453 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
454 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
455 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
456 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
457 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
458 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
fc10199e 459
87744403
BS
460 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
461 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
462 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
fc10199e
MS
463}
464
465static void
e5c5e4f5 466nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
fc10199e 467{
e5c5e4f5
BS
468 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
469 struct nvkm_device *device = subdev->device;
87744403
BS
470 u32 dma_get = nvkm_rd32(device, 0x003244);
471 u32 dma_put = nvkm_rd32(device, 0x003240);
472 u32 push = nvkm_rd32(device, 0x003220);
473 u32 state = nvkm_rd32(device, 0x003228);
e5c5e4f5 474 const char *client_name;
fc10199e 475
6189f1b0 476 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid);
93260d3c 477
fc10199e 478 if (device->card_type == NV_50) {
87744403
BS
479 u32 ho_get = nvkm_rd32(device, 0x003328);
480 u32 ho_put = nvkm_rd32(device, 0x003320);
481 u32 ib_get = nvkm_rd32(device, 0x003334);
482 u32 ib_put = nvkm_rd32(device, 0x003330);
fc10199e 483
e5c5e4f5
BS
484 nvkm_error(subdev, "DMA_PUSHER - "
485 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
486 "ib_put %08x state %08x (err: %s) push %08x\n",
487 chid, client_name, ho_get, dma_get, ho_put, dma_put,
488 ib_get, ib_put, state, nv_dma_state_err(state),
489 push);
fc10199e
MS
490
491 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
87744403 492 nvkm_wr32(device, 0x003364, 0x00000000);
fc10199e 493 if (dma_get != dma_put || ho_get != ho_put) {
87744403
BS
494 nvkm_wr32(device, 0x003244, dma_put);
495 nvkm_wr32(device, 0x003328, ho_put);
fc10199e
MS
496 } else
497 if (ib_get != ib_put)
87744403 498 nvkm_wr32(device, 0x003334, ib_put);
fc10199e 499 } else {
e5c5e4f5
BS
500 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
501 "state %08x (err: %s) push %08x\n",
502 chid, client_name, dma_get, dma_put, state,
503 nv_dma_state_err(state), push);
fc10199e
MS
504
505 if (dma_get != dma_put)
87744403 506 nvkm_wr32(device, 0x003244, dma_put);
fc10199e
MS
507 }
508
87744403
BS
509 nvkm_wr32(device, 0x003228, 0x00000000);
510 nvkm_wr32(device, 0x003220, 0x00000001);
511 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
fc10199e
MS
512}
513
5178d40d 514void
05c7145d 515nv04_fifo_intr(struct nvkm_subdev *subdev)
5178d40d 516{
e5c5e4f5 517 struct nvkm_device *device = subdev->device;
6189f1b0 518 struct nv04_fifo *fifo = (void *)subdev;
87744403
BS
519 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
520 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
adc346b1 521 u32 reassign, chid, get, sem;
5178d40d 522
87744403
BS
523 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
524 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
5178d40d 525
87744403
BS
526 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
527 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
5178d40d 528
adc346b1 529 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
e5c5e4f5 530 nv04_fifo_cache_error(fifo, chid, get);
adc346b1
BS
531 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
532 }
5178d40d 533
adc346b1 534 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
e5c5e4f5 535 nv04_fifo_dma_pusher(fifo, chid);
adc346b1
BS
536 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
537 }
5178d40d 538
adc346b1
BS
539 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
540 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
87744403 541 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
5178d40d 542
87744403
BS
543 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
544 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
5178d40d 545
87744403
BS
546 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
547 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
adc346b1 548 }
5178d40d 549
adc346b1
BS
550 if (device->card_type == NV_50) {
551 if (stat & 0x00000010) {
552 stat &= ~0x00000010;
87744403 553 nvkm_wr32(device, 0x002100, 0x00000010);
5178d40d
BS
554 }
555
adc346b1 556 if (stat & 0x40000000) {
87744403 557 nvkm_wr32(device, 0x002100, 0x40000000);
6189f1b0 558 nvkm_fifo_uevent(&fifo->base);
adc346b1 559 stat &= ~0x40000000;
5178d40d 560 }
5178d40d
BS
561 }
562
adc346b1 563 if (stat) {
e5c5e4f5 564 nvkm_warn(subdev, "intr %08x\n", stat);
87744403
BS
565 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
566 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
5178d40d
BS
567 }
568
87744403 569 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
5178d40d 570}
c420b2dc 571
ebb945a9 572static int
05c7145d
BS
573nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
574 struct nvkm_oclass *oclass, void *data, u32 size,
575 struct nvkm_object **pobject)
c420b2dc 576{
d8e83994
BS
577 struct nvkm_device *device = (void *)parent;
578 struct nvkm_instmem *imem = device->imem;
6189f1b0 579 struct nv04_fifo *fifo;
ebb945a9 580 int ret;
c420b2dc 581
6189f1b0
BS
582 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo);
583 *pobject = nv_object(fifo);
ebb945a9
BS
584 if (ret)
585 return ret;
586
6189f1b0
BS
587 nvkm_ramht_ref(imem->ramht, &fifo->ramht);
588 nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
589 nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
590
591 nv_subdev(fifo)->unit = 0x00000100;
592 nv_subdev(fifo)->intr = nv04_fifo_intr;
593 nv_engine(fifo)->cclass = &nv04_fifo_cclass;
594 nv_engine(fifo)->sclass = nv04_fifo_sclass;
595 fifo->base.pause = nv04_fifo_pause;
596 fifo->base.start = nv04_fifo_start;
597 fifo->ramfc_desc = nv04_ramfc;
ebb945a9
BS
598 return 0;
599}
c420b2dc 600
ebb945a9 601void
05c7145d 602nv04_fifo_dtor(struct nvkm_object *object)
ebb945a9 603{
6189f1b0
BS
604 struct nv04_fifo *fifo = (void *)object;
605 nvkm_gpuobj_ref(NULL, &fifo->ramfc);
606 nvkm_gpuobj_ref(NULL, &fifo->ramro);
607 nvkm_ramht_ref(NULL, &fifo->ramht);
608 nvkm_fifo_destroy(&fifo->base);
c420b2dc
BS
609}
610
611int
05c7145d 612nv04_fifo_init(struct nvkm_object *object)
c420b2dc 613{
6189f1b0 614 struct nv04_fifo *fifo = (void *)object;
87744403 615 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9
BS
616 int ret;
617
6189f1b0 618 ret = nvkm_fifo_init(&fifo->base);
ebb945a9
BS
619 if (ret)
620 return ret;
c420b2dc 621
87744403
BS
622 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
623 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
c420b2dc 624
87744403 625 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
6189f1b0
BS
626 ((fifo->ramht->bits - 9) << 16) |
627 (fifo->ramht->gpuobj.addr >> 8));
87744403
BS
628 nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
629 nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8);
5787640d 630
87744403 631 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
c420b2dc 632
87744403
BS
633 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
634 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
ebb945a9 635
87744403
BS
636 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
637 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
638 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
c420b2dc
BS
639 return 0;
640}
ebb945a9 641
05c7145d
BS
642struct nvkm_oclass *
643nv04_fifo_oclass = &(struct nvkm_oclass) {
ebb945a9 644 .handle = NV_ENGINE(FIFO, 0x04),
05c7145d 645 .ofuncs = &(struct nvkm_ofuncs) {
ebb945a9
BS
646 .ctor = nv04_fifo_ctor,
647 .dtor = nv04_fifo_dtor,
648 .init = nv04_fifo_init,
05c7145d 649 .fini = _nvkm_fifo_fini,
ebb945a9
BS
650 },
651};