]>
Commit | Line | Data |
---|---|---|
5132f377 | 1 | /* |
ebb945a9 | 2 | * Copyright 2012 Red Hat Inc. |
5132f377 BS |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Ben Skeggs | |
23 | */ | |
05c7145d | 24 | #include "gk104.h" |
5132f377 | 25 | |
ebb945a9 | 26 | #include <core/client.h> |
ebb945a9 | 27 | #include <core/engctx.h> |
ebb945a9 | 28 | #include <core/enum.h> |
05c7145d | 29 | #include <core/handle.h> |
52225551 | 30 | #include <subdev/fb.h> |
5ce3bf3c | 31 | #include <subdev/mmu.h> |
05c7145d | 32 | #include <subdev/timer.h> |
5132f377 | 33 | |
05c7145d | 34 | #include <nvif/class.h> |
f58ddf95 | 35 | #include <nvif/ioctl.h> |
05c7145d | 36 | #include <nvif/unpack.h> |
5132f377 | 37 | |
507ceb15 | 38 | #define _(a,b) { (a), ((1ULL << (a)) | (b)) } |
dbff2dee | 39 | static const struct { |
507ceb15 MP |
40 | u64 subdev; |
41 | u64 mask; | |
dbff2dee | 42 | } fifo_engine[] = { |
48506d17 | 43 | _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | |
aedf24ff | 44 | (1ULL << NVDEV_ENGINE_CE2)), |
37a5d028 | 45 | _(NVDEV_ENGINE_MSPDEC , 0), |
fd8666f7 | 46 | _(NVDEV_ENGINE_MSPPP , 0), |
eccf7e8a | 47 | _(NVDEV_ENGINE_MSVLD , 0), |
aedf24ff BS |
48 | _(NVDEV_ENGINE_CE0 , 0), |
49 | _(NVDEV_ENGINE_CE1 , 0), | |
bd8369ec | 50 | _(NVDEV_ENGINE_MSENC , 0), |
dbff2dee BS |
51 | }; |
52 | #undef _ | |
53 | #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) | |
54 | ||
05c7145d BS |
55 | struct gk104_fifo_engn { |
56 | struct nvkm_gpuobj *runlist[2]; | |
f82c44a7 | 57 | int cur_runlist; |
138b873f | 58 | wait_queue_head_t wait; |
5132f377 BS |
59 | }; |
60 | ||
6189f1b0 | 61 | struct gk104_fifo { |
05c7145d | 62 | struct nvkm_fifo base; |
98d1e317 BS |
63 | |
64 | struct work_struct fault; | |
65 | u64 mask; | |
66 | ||
05c7145d | 67 | struct gk104_fifo_engn engine[FIFO_ENGINE_NR]; |
5132f377 | 68 | struct { |
05c7145d BS |
69 | struct nvkm_gpuobj *mem; |
70 | struct nvkm_vma bar; | |
5132f377 BS |
71 | } user; |
72 | int spoon_nr; | |
73 | }; | |
74 | ||
05c7145d BS |
75 | struct gk104_fifo_base { |
76 | struct nvkm_fifo_base base; | |
77 | struct nvkm_gpuobj *pgd; | |
78 | struct nvkm_vm *vm; | |
ebb945a9 BS |
79 | }; |
80 | ||
05c7145d BS |
81 | struct gk104_fifo_chan { |
82 | struct nvkm_fifo_chan base; | |
5132f377 | 83 | u32 engine; |
87032e11 BS |
84 | enum { |
85 | STOPPED, | |
86 | RUNNING, | |
87 | KILLED | |
88 | } state; | |
5132f377 BS |
89 | }; |
90 | ||
ebb945a9 BS |
91 | /******************************************************************************* |
92 | * FIFO channel objects | |
93 | ******************************************************************************/ | |
94 | ||
5132f377 | 95 | static void |
6189f1b0 | 96 | gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) |
5132f377 | 97 | { |
6189f1b0 | 98 | struct gk104_fifo_engn *engn = &fifo->engine[engine]; |
e5c5e4f5 BS |
99 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
100 | struct nvkm_device *device = subdev->device; | |
05c7145d | 101 | struct nvkm_gpuobj *cur; |
ebb945a9 | 102 | int i, p; |
5132f377 | 103 | |
6189f1b0 | 104 | mutex_lock(&nv_subdev(fifo)->mutex); |
f82c44a7 BS |
105 | cur = engn->runlist[engn->cur_runlist]; |
106 | engn->cur_runlist = !engn->cur_runlist; | |
5132f377 | 107 | |
5444e770 | 108 | nvkm_kmap(cur); |
6189f1b0 BS |
109 | for (i = 0, p = 0; i < fifo->base.max; i++) { |
110 | struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; | |
87032e11 | 111 | if (chan && chan->state == RUNNING && chan->engine == engine) { |
5444e770 BS |
112 | nvkm_wo32(cur, p + 0, i); |
113 | nvkm_wo32(cur, p + 4, 0x00000000); | |
87032e11 BS |
114 | p += 8; |
115 | } | |
5132f377 | 116 | } |
5444e770 | 117 | nvkm_done(cur); |
5132f377 | 118 | |
87744403 BS |
119 | nvkm_wr32(device, 0x002270, cur->addr >> 12); |
120 | nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3)); | |
87032e11 | 121 | |
87744403 | 122 | if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + |
5c0633e6 BS |
123 | (engine * 0x08)) & 0x00100000), |
124 | msecs_to_jiffies(2000)) == 0) | |
e5c5e4f5 | 125 | nvkm_error(subdev, "runlist %d update timeout\n", engine); |
6189f1b0 | 126 | mutex_unlock(&nv_subdev(fifo)->mutex); |
5132f377 BS |
127 | } |
128 | ||
c420b2dc | 129 | static int |
05c7145d BS |
130 | gk104_fifo_context_attach(struct nvkm_object *parent, |
131 | struct nvkm_object *object) | |
5132f377 | 132 | { |
05c7145d | 133 | struct gk104_fifo_base *base = (void *)parent->parent; |
5444e770 | 134 | struct nvkm_gpuobj *engn = &base->base.gpuobj; |
05c7145d | 135 | struct nvkm_engctx *ectx = (void *)object; |
ebb945a9 BS |
136 | u32 addr; |
137 | int ret; | |
138 | ||
139 | switch (nv_engidx(object->engine)) { | |
01672ef4 | 140 | case NVDEV_ENGINE_SW : |
448a4532 | 141 | return 0; |
aedf24ff BS |
142 | case NVDEV_ENGINE_CE0: |
143 | case NVDEV_ENGINE_CE1: | |
144 | case NVDEV_ENGINE_CE2: | |
448a4532 | 145 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; |
01672ef4 | 146 | return 0; |
37a5d028 BS |
147 | case NVDEV_ENGINE_GR : addr = 0x0210; break; |
148 | case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; | |
149 | case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; | |
150 | case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; | |
ebb945a9 BS |
151 | default: |
152 | return -EINVAL; | |
5132f377 BS |
153 | } |
154 | ||
ebb945a9 | 155 | if (!ectx->vma.node) { |
05c7145d BS |
156 | ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, |
157 | NV_MEM_ACCESS_RW, &ectx->vma); | |
ebb945a9 BS |
158 | if (ret) |
159 | return ret; | |
4c2d4222 BS |
160 | |
161 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; | |
ebb945a9 BS |
162 | } |
163 | ||
5444e770 BS |
164 | nvkm_kmap(engn); |
165 | nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); | |
166 | nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset)); | |
5444e770 | 167 | nvkm_done(engn); |
ebb945a9 | 168 | return 0; |
5132f377 BS |
169 | } |
170 | ||
6189f1b0 BS |
171 | static int |
172 | gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) | |
173 | { | |
174 | struct nvkm_object *obj = (void *)chan; | |
175 | struct gk104_fifo *fifo = (void *)obj->engine; | |
e5c5e4f5 BS |
176 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
177 | struct nvkm_device *device = subdev->device; | |
6189f1b0 | 178 | |
87744403 | 179 | nvkm_wr32(device, 0x002634, chan->base.chid); |
af3082b3 BS |
180 | if (nvkm_msec(device, 2000, |
181 | if (!(nvkm_rd32(device, 0x002634) & 0x00100000)) | |
182 | break; | |
183 | ) < 0) { | |
e5c5e4f5 BS |
184 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", |
185 | chan->base.chid, nvkm_client_name(chan)); | |
6189f1b0 BS |
186 | return -EBUSY; |
187 | } | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
ebb945a9 | 192 | static int |
05c7145d BS |
193 | gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, |
194 | struct nvkm_object *object) | |
5132f377 | 195 | { |
05c7145d BS |
196 | struct gk104_fifo_base *base = (void *)parent->parent; |
197 | struct gk104_fifo_chan *chan = (void *)parent; | |
5444e770 | 198 | struct nvkm_gpuobj *engn = &base->base.gpuobj; |
ebb945a9 | 199 | u32 addr; |
6189f1b0 | 200 | int ret; |
ebb945a9 BS |
201 | |
202 | switch (nv_engidx(object->engine)) { | |
37a5d028 BS |
203 | case NVDEV_ENGINE_SW : return 0; |
204 | case NVDEV_ENGINE_CE0 : | |
205 | case NVDEV_ENGINE_CE1 : | |
206 | case NVDEV_ENGINE_CE2 : addr = 0x0000; break; | |
207 | case NVDEV_ENGINE_GR : addr = 0x0210; break; | |
208 | case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; | |
209 | case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; | |
210 | case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; | |
ebb945a9 BS |
211 | default: |
212 | return -EINVAL; | |
213 | } | |
214 | ||
6189f1b0 BS |
215 | ret = gk104_fifo_chan_kick(chan); |
216 | if (ret && suspend) | |
217 | return ret; | |
5132f377 | 218 | |
01672ef4 | 219 | if (addr) { |
5444e770 BS |
220 | nvkm_kmap(engn); |
221 | nvkm_wo32(engn, addr + 0x00, 0x00000000); | |
222 | nvkm_wo32(engn, addr + 0x04, 0x00000000); | |
5444e770 | 223 | nvkm_done(engn); |
01672ef4 BS |
224 | } |
225 | ||
ebb945a9 | 226 | return 0; |
5132f377 BS |
227 | } |
228 | ||
229 | static int | |
05c7145d BS |
230 | gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
231 | struct nvkm_oclass *oclass, void *data, u32 size, | |
232 | struct nvkm_object **pobject) | |
5132f377 | 233 | { |
bbf8906b BS |
234 | union { |
235 | struct kepler_channel_gpfifo_a_v0 v0; | |
236 | } *args = data; | |
6189f1b0 | 237 | struct gk104_fifo *fifo = (void *)engine; |
05c7145d BS |
238 | struct gk104_fifo_base *base = (void *)parent; |
239 | struct gk104_fifo_chan *chan; | |
5444e770 | 240 | struct nvkm_gpuobj *ramfc = &base->base.gpuobj; |
ebb945a9 | 241 | u64 usermem, ioffset, ilength; |
d5624179 | 242 | u32 engines; |
ebb945a9 BS |
243 | int ret, i; |
244 | ||
53003941 | 245 | nvif_ioctl(parent, "create channel gpfifo size %d\n", size); |
bbf8906b | 246 | if (nvif_unpack(args->v0, 0, 0, false)) { |
159045cd | 247 | nvif_ioctl(parent, "create channel gpfifo vers %d " |
53003941 | 248 | "ioffset %016llx ilength %08x engine %08x\n", |
159045cd | 249 | args->v0.version, args->v0.ioffset, |
53003941 | 250 | args->v0.ilength, args->v0.engine); |
159045cd BS |
251 | if (args->v0.vm) |
252 | return -ENOENT; | |
bbf8906b BS |
253 | } else |
254 | return ret; | |
ebb945a9 | 255 | |
d5624179 BS |
256 | for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) { |
257 | if (!nvkm_engine(parent, fifo_engine[i].subdev)) | |
258 | continue; | |
259 | engines |= (1 << i); | |
260 | } | |
261 | ||
262 | if (!args->v0.engine) { | |
263 | static struct nvkm_oclass oclass = { | |
264 | .ofuncs = &nvkm_object_ofuncs, | |
265 | }; | |
266 | args->v0.engine = engines; | |
aa35888f | 267 | return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject); |
dbff2dee BS |
268 | } |
269 | ||
d5624179 BS |
270 | engines &= args->v0.engine; |
271 | if (!engines) { | |
272 | nvif_ioctl(parent, "unsupported engines %08x\n", | |
e5c5e4f5 | 273 | args->v0.engine); |
dbff2dee | 274 | return -ENODEV; |
56fbd2b6 | 275 | } |
d5624179 | 276 | i = __ffs(engines); |
dbff2dee | 277 | |
05c7145d | 278 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, |
159045cd | 279 | fifo->user.bar.offset, 0x200, 0, |
05c7145d | 280 | fifo_engine[i].mask, &chan); |
ebb945a9 BS |
281 | *pobject = nv_object(chan); |
282 | if (ret) | |
283 | return ret; | |
284 | ||
bbf8906b BS |
285 | args->v0.chid = chan->base.chid; |
286 | ||
05c7145d BS |
287 | nv_parent(chan)->context_attach = gk104_fifo_context_attach; |
288 | nv_parent(chan)->context_detach = gk104_fifo_context_detach; | |
dbff2dee | 289 | chan->engine = i; |
ebb945a9 BS |
290 | |
291 | usermem = chan->base.chid * 0x200; | |
bbf8906b BS |
292 | ioffset = args->v0.ioffset; |
293 | ilength = order_base_2(args->v0.ilength / 8); | |
ebb945a9 | 294 | |
5444e770 | 295 | nvkm_kmap(fifo->user.mem); |
ebb945a9 | 296 | for (i = 0; i < 0x200; i += 4) |
5444e770 BS |
297 | nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000); |
298 | nvkm_done(fifo->user.mem); | |
299 | ||
300 | nvkm_kmap(ramfc); | |
301 | nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); | |
302 | nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); | |
303 | nvkm_wo32(ramfc, 0x10, 0x0000face); | |
304 | nvkm_wo32(ramfc, 0x30, 0xfffff902); | |
305 | nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset)); | |
306 | nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); | |
307 | nvkm_wo32(ramfc, 0x84, 0x20400000); | |
308 | nvkm_wo32(ramfc, 0x94, 0x30000001); | |
309 | nvkm_wo32(ramfc, 0x9c, 0x00000100); | |
310 | nvkm_wo32(ramfc, 0xac, 0x0000001f); | |
311 | nvkm_wo32(ramfc, 0xe8, chan->base.chid); | |
312 | nvkm_wo32(ramfc, 0xb8, 0xf8000000); | |
313 | nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */ | |
314 | nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */ | |
5444e770 | 315 | nvkm_done(ramfc); |
ebb945a9 BS |
316 | return 0; |
317 | } | |
5132f377 | 318 | |
ebb945a9 | 319 | static int |
05c7145d | 320 | gk104_fifo_chan_init(struct nvkm_object *object) |
ebb945a9 | 321 | { |
05c7145d | 322 | struct nvkm_gpuobj *base = nv_gpuobj(object->parent); |
6189f1b0 | 323 | struct gk104_fifo *fifo = (void *)object->engine; |
05c7145d | 324 | struct gk104_fifo_chan *chan = (void *)object; |
87744403 | 325 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
ebb945a9 BS |
326 | u32 chid = chan->base.chid; |
327 | int ret; | |
5132f377 | 328 | |
05c7145d | 329 | ret = nvkm_fifo_channel_init(&chan->base); |
ebb945a9 BS |
330 | if (ret) |
331 | return ret; | |
5132f377 | 332 | |
87744403 BS |
333 | nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); |
334 | nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); | |
87032e11 BS |
335 | |
336 | if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { | |
87744403 | 337 | nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400); |
6189f1b0 | 338 | gk104_fifo_runlist_update(fifo, chan->engine); |
87744403 | 339 | nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400); |
87032e11 BS |
340 | } |
341 | ||
ebb945a9 BS |
342 | return 0; |
343 | } | |
5132f377 | 344 | |
ebb945a9 | 345 | static int |
05c7145d | 346 | gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) |
ebb945a9 | 347 | { |
6189f1b0 | 348 | struct gk104_fifo *fifo = (void *)object->engine; |
05c7145d | 349 | struct gk104_fifo_chan *chan = (void *)object; |
87744403 | 350 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
ebb945a9 | 351 | u32 chid = chan->base.chid; |
5132f377 | 352 | |
87032e11 | 353 | if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { |
87744403 | 354 | nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800); |
6189f1b0 | 355 | gk104_fifo_runlist_update(fifo, chan->engine); |
87032e11 | 356 | } |
5132f377 | 357 | |
87744403 | 358 | nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000); |
05c7145d | 359 | return nvkm_fifo_channel_fini(&chan->base, suspend); |
ebb945a9 | 360 | } |
5132f377 | 361 | |
89025bd4 BS |
362 | struct nvkm_ofuncs |
363 | gk104_fifo_chan_ofuncs = { | |
05c7145d BS |
364 | .ctor = gk104_fifo_chan_ctor, |
365 | .dtor = _nvkm_fifo_channel_dtor, | |
366 | .init = gk104_fifo_chan_init, | |
367 | .fini = gk104_fifo_chan_fini, | |
368 | .map = _nvkm_fifo_channel_map, | |
369 | .rd32 = _nvkm_fifo_channel_rd32, | |
370 | .wr32 = _nvkm_fifo_channel_wr32, | |
371 | .ntfy = _nvkm_fifo_channel_ntfy | |
ebb945a9 | 372 | }; |
5132f377 | 373 | |
05c7145d BS |
374 | static struct nvkm_oclass |
375 | gk104_fifo_sclass[] = { | |
89025bd4 | 376 | { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, |
ebb945a9 BS |
377 | {} |
378 | }; | |
379 | ||
380 | /******************************************************************************* | |
381 | * FIFO context - instmem heap and vm setup | |
382 | ******************************************************************************/ | |
5132f377 | 383 | |
c420b2dc | 384 | static int |
05c7145d BS |
385 | gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
386 | struct nvkm_oclass *oclass, void *data, u32 size, | |
387 | struct nvkm_object **pobject) | |
c420b2dc | 388 | { |
05c7145d | 389 | struct gk104_fifo_base *base; |
ebb945a9 | 390 | int ret; |
c420b2dc | 391 | |
05c7145d BS |
392 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, |
393 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); | |
ebb945a9 BS |
394 | *pobject = nv_object(base); |
395 | if (ret) | |
396 | return ret; | |
c420b2dc | 397 | |
05c7145d BS |
398 | ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, |
399 | &base->pgd); | |
ebb945a9 BS |
400 | if (ret) |
401 | return ret; | |
402 | ||
5444e770 BS |
403 | nvkm_kmap(&base->base.gpuobj); |
404 | nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr)); | |
405 | nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr)); | |
406 | nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff); | |
407 | nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff); | |
408 | nvkm_done(&base->base.gpuobj); | |
ebb945a9 | 409 | |
05c7145d | 410 | ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); |
ebb945a9 BS |
411 | if (ret) |
412 | return ret; | |
c420b2dc | 413 | |
c420b2dc BS |
414 | return 0; |
415 | } | |
416 | ||
ebb945a9 | 417 | static void |
05c7145d | 418 | gk104_fifo_context_dtor(struct nvkm_object *object) |
ebb945a9 | 419 | { |
05c7145d BS |
420 | struct gk104_fifo_base *base = (void *)object; |
421 | nvkm_vm_ref(NULL, &base->vm, base->pgd); | |
422 | nvkm_gpuobj_ref(NULL, &base->pgd); | |
423 | nvkm_fifo_context_destroy(&base->base); | |
ebb945a9 BS |
424 | } |
425 | ||
05c7145d BS |
426 | static struct nvkm_oclass |
427 | gk104_fifo_cclass = { | |
ebb945a9 | 428 | .handle = NV_ENGCTX(FIFO, 0xe0), |
05c7145d BS |
429 | .ofuncs = &(struct nvkm_ofuncs) { |
430 | .ctor = gk104_fifo_context_ctor, | |
431 | .dtor = gk104_fifo_context_dtor, | |
432 | .init = _nvkm_fifo_context_init, | |
433 | .fini = _nvkm_fifo_context_fini, | |
434 | .rd32 = _nvkm_fifo_context_rd32, | |
435 | .wr32 = _nvkm_fifo_context_wr32, | |
ebb945a9 BS |
436 | }, |
437 | }; | |
438 | ||
439 | /******************************************************************************* | |
440 | * PFIFO engine | |
441 | ******************************************************************************/ | |
442 | ||
98d1e317 | 443 | static inline int |
6189f1b0 | 444 | gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn) |
98d1e317 BS |
445 | { |
446 | switch (engn) { | |
37a5d028 BS |
447 | case NVDEV_ENGINE_GR : |
448 | case NVDEV_ENGINE_CE2 : engn = 0; break; | |
449 | case NVDEV_ENGINE_MSVLD : engn = 1; break; | |
450 | case NVDEV_ENGINE_MSPPP : engn = 2; break; | |
451 | case NVDEV_ENGINE_MSPDEC: engn = 3; break; | |
452 | case NVDEV_ENGINE_CE0 : engn = 4; break; | |
453 | case NVDEV_ENGINE_CE1 : engn = 5; break; | |
454 | case NVDEV_ENGINE_MSENC : engn = 6; break; | |
98d1e317 BS |
455 | default: |
456 | return -1; | |
457 | } | |
458 | ||
459 | return engn; | |
460 | } | |
461 | ||
05c7145d | 462 | static inline struct nvkm_engine * |
6189f1b0 | 463 | gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) |
129dcca7 BS |
464 | { |
465 | if (engn >= ARRAY_SIZE(fifo_engine)) | |
466 | return NULL; | |
6189f1b0 | 467 | return nvkm_engine(fifo, fifo_engine[engn].subdev); |
129dcca7 BS |
468 | } |
469 | ||
98d1e317 | 470 | static void |
05c7145d | 471 | gk104_fifo_recover_work(struct work_struct *work) |
98d1e317 | 472 | { |
6189f1b0 | 473 | struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault); |
87744403 | 474 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
6cf813fb | 475 | struct nvkm_engine *engine; |
98d1e317 BS |
476 | unsigned long flags; |
477 | u32 engn, engm = 0; | |
478 | u64 mask, todo; | |
479 | ||
6189f1b0 BS |
480 | spin_lock_irqsave(&fifo->base.lock, flags); |
481 | mask = fifo->mask; | |
482 | fifo->mask = 0ULL; | |
483 | spin_unlock_irqrestore(&fifo->base.lock, flags); | |
98d1e317 BS |
484 | |
485 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) | |
6189f1b0 | 486 | engm |= 1 << gk104_fifo_engidx(fifo, engn); |
87744403 | 487 | nvkm_mask(device, 0x002630, engm, engm); |
98d1e317 BS |
488 | |
489 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { | |
6cf813fb BS |
490 | if ((engine = nvkm_device_engine(device, engn))) { |
491 | nvkm_subdev_fini(&engine->subdev, false); | |
492 | WARN_ON(nvkm_subdev_init(&engine->subdev)); | |
98d1e317 | 493 | } |
6189f1b0 | 494 | gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn)); |
98d1e317 BS |
495 | } |
496 | ||
87744403 BS |
497 | nvkm_wr32(device, 0x00262c, engm); |
498 | nvkm_mask(device, 0x002630, engm, 0x00000000); | |
98d1e317 BS |
499 | } |
500 | ||
501 | static void | |
6189f1b0 | 502 | gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, |
05c7145d | 503 | struct gk104_fifo_chan *chan) |
98d1e317 | 504 | { |
e5c5e4f5 BS |
505 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
506 | struct nvkm_device *device = subdev->device; | |
98d1e317 BS |
507 | u32 chid = chan->base.chid; |
508 | unsigned long flags; | |
509 | ||
e5c5e4f5 | 510 | nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n", |
f0290215 | 511 | nvkm_subdev_name[nv_subdev(engine)->index], chid); |
98d1e317 | 512 | |
87744403 | 513 | nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); |
98d1e317 BS |
514 | chan->state = KILLED; |
515 | ||
6189f1b0 BS |
516 | spin_lock_irqsave(&fifo->base.lock, flags); |
517 | fifo->mask |= 1ULL << nv_engidx(engine); | |
518 | spin_unlock_irqrestore(&fifo->base.lock, flags); | |
519 | schedule_work(&fifo->fault); | |
98d1e317 BS |
520 | } |
521 | ||
3d61b967 | 522 | static int |
6189f1b0 | 523 | gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data) |
3d61b967 | 524 | { |
05c7145d BS |
525 | struct gk104_fifo_chan *chan = NULL; |
526 | struct nvkm_handle *bind; | |
3d61b967 BS |
527 | unsigned long flags; |
528 | int ret = -EINVAL; | |
529 | ||
6189f1b0 BS |
530 | spin_lock_irqsave(&fifo->base.lock, flags); |
531 | if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) | |
532 | chan = (void *)fifo->base.channel[chid]; | |
3d61b967 BS |
533 | if (unlikely(!chan)) |
534 | goto out; | |
535 | ||
f58ddf95 | 536 | bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100); |
3d61b967 BS |
537 | if (likely(bind)) { |
538 | if (!mthd || !nv_call(bind->object, mthd, data)) | |
539 | ret = 0; | |
05c7145d | 540 | nvkm_namedb_put(bind); |
3d61b967 BS |
541 | } |
542 | ||
543 | out: | |
6189f1b0 | 544 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
3d61b967 BS |
545 | return ret; |
546 | } | |
547 | ||
05c7145d BS |
548 | static const struct nvkm_enum |
549 | gk104_fifo_bind_reason[] = { | |
56b2f68c BS |
550 | { 0x01, "BIND_NOT_UNBOUND" }, |
551 | { 0x02, "SNOOP_WITHOUT_BAR1" }, | |
552 | { 0x03, "UNBIND_WHILE_RUNNING" }, | |
553 | { 0x05, "INVALID_RUNLIST" }, | |
554 | { 0x06, "INVALID_CTX_TGT" }, | |
555 | { 0x0b, "UNBIND_WHILE_PARKED" }, | |
556 | {} | |
557 | }; | |
558 | ||
559 | static void | |
6189f1b0 | 560 | gk104_fifo_intr_bind(struct gk104_fifo *fifo) |
56b2f68c | 561 | { |
e5c5e4f5 BS |
562 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
563 | struct nvkm_device *device = subdev->device; | |
87744403 | 564 | u32 intr = nvkm_rd32(device, 0x00252c); |
56b2f68c | 565 | u32 code = intr & 0x000000ff; |
e5c5e4f5 BS |
566 | const struct nvkm_enum *en = |
567 | nvkm_enum_find(gk104_fifo_bind_reason, code); | |
56b2f68c | 568 | |
e5c5e4f5 | 569 | nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); |
56b2f68c BS |
570 | } |
571 | ||
05c7145d BS |
572 | static const struct nvkm_enum |
573 | gk104_fifo_sched_reason[] = { | |
e9fb9805 BS |
574 | { 0x0a, "CTXSW_TIMEOUT" }, |
575 | {} | |
576 | }; | |
577 | ||
129dcca7 | 578 | static void |
6189f1b0 | 579 | gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) |
129dcca7 | 580 | { |
87744403 | 581 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
05c7145d BS |
582 | struct nvkm_engine *engine; |
583 | struct gk104_fifo_chan *chan; | |
129dcca7 BS |
584 | u32 engn; |
585 | ||
586 | for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { | |
87744403 | 587 | u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); |
129dcca7 BS |
588 | u32 busy = (stat & 0x80000000); |
589 | u32 next = (stat & 0x07ff0000) >> 16; | |
590 | u32 chsw = (stat & 0x00008000); | |
591 | u32 save = (stat & 0x00004000); | |
592 | u32 load = (stat & 0x00002000); | |
593 | u32 prev = (stat & 0x000007ff); | |
594 | u32 chid = load ? next : prev; | |
595 | (void)save; | |
596 | ||
597 | if (busy && chsw) { | |
6189f1b0 | 598 | if (!(chan = (void *)fifo->base.channel[chid])) |
129dcca7 | 599 | continue; |
6189f1b0 | 600 | if (!(engine = gk104_fifo_engine(fifo, engn))) |
129dcca7 | 601 | continue; |
6189f1b0 | 602 | gk104_fifo_recover(fifo, engine, chan); |
129dcca7 BS |
603 | } |
604 | } | |
605 | } | |
606 | ||
885f3ced | 607 | static void |
6189f1b0 | 608 | gk104_fifo_intr_sched(struct gk104_fifo *fifo) |
885f3ced | 609 | { |
e5c5e4f5 BS |
610 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
611 | struct nvkm_device *device = subdev->device; | |
87744403 | 612 | u32 intr = nvkm_rd32(device, 0x00254c); |
885f3ced | 613 | u32 code = intr & 0x000000ff; |
e5c5e4f5 BS |
614 | const struct nvkm_enum *en = |
615 | nvkm_enum_find(gk104_fifo_sched_reason, code); | |
0a7760e0 | 616 | |
e5c5e4f5 | 617 | nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); |
129dcca7 BS |
618 | |
619 | switch (code) { | |
620 | case 0x0a: | |
6189f1b0 | 621 | gk104_fifo_intr_sched_ctxsw(fifo); |
129dcca7 BS |
622 | break; |
623 | default: | |
624 | break; | |
625 | } | |
885f3ced BS |
626 | } |
627 | ||
628 | static void | |
6189f1b0 | 629 | gk104_fifo_intr_chsw(struct gk104_fifo *fifo) |
885f3ced | 630 | { |
e5c5e4f5 BS |
631 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
632 | struct nvkm_device *device = subdev->device; | |
87744403 | 633 | u32 stat = nvkm_rd32(device, 0x00256c); |
e5c5e4f5 | 634 | nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); |
87744403 | 635 | nvkm_wr32(device, 0x00256c, stat); |
885f3ced BS |
636 | } |
637 | ||
638 | static void | |
6189f1b0 | 639 | gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) |
885f3ced | 640 | { |
e5c5e4f5 BS |
641 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
642 | struct nvkm_device *device = subdev->device; | |
87744403 | 643 | u32 stat = nvkm_rd32(device, 0x00259c); |
e5c5e4f5 | 644 | nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); |
885f3ced BS |
645 | } |
646 | ||
05c7145d BS |
647 | static const struct nvkm_enum |
648 | gk104_fifo_fault_engine[] = { | |
e1b6b14a | 649 | { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, |
885f3ced | 650 | { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB }, |
cb1567c2 BS |
651 | { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, |
652 | { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, | |
e1b6b14a BS |
653 | { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, |
654 | { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO }, | |
655 | { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO }, | |
eccf7e8a | 656 | { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD }, |
fd8666f7 | 657 | { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP }, |
e1b6b14a | 658 | { 0x13, "PERF" }, |
37a5d028 | 659 | { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC }, |
aedf24ff BS |
660 | { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 }, |
661 | { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 }, | |
e1b6b14a | 662 | { 0x17, "PMU" }, |
bd8369ec | 663 | { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC }, |
aedf24ff | 664 | { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 }, |
5132f377 BS |
665 | {} |
666 | }; | |
667 | ||
05c7145d BS |
668 | static const struct nvkm_enum |
669 | gk104_fifo_fault_reason[] = { | |
e1b6b14a BS |
670 | { 0x00, "PDE" }, |
671 | { 0x01, "PDE_SIZE" }, | |
672 | { 0x02, "PTE" }, | |
673 | { 0x03, "VA_LIMIT_VIOLATION" }, | |
674 | { 0x04, "UNBOUND_INST_BLOCK" }, | |
675 | { 0x05, "PRIV_VIOLATION" }, | |
676 | { 0x06, "RO_VIOLATION" }, | |
677 | { 0x07, "WO_VIOLATION" }, | |
678 | { 0x08, "PITCH_MASK_VIOLATION" }, | |
679 | { 0x09, "WORK_CREATION" }, | |
680 | { 0x0a, "UNSUPPORTED_APERTURE" }, | |
681 | { 0x0b, "COMPRESSION_FAILURE" }, | |
682 | { 0x0c, "UNSUPPORTED_KIND" }, | |
683 | { 0x0d, "REGION_VIOLATION" }, | |
684 | { 0x0e, "BOTH_PTES_VALID" }, | |
685 | { 0x0f, "INFO_TYPE_POISONED" }, | |
5132f377 BS |
686 | {} |
687 | }; | |
688 | ||
05c7145d BS |
689 | static const struct nvkm_enum |
690 | gk104_fifo_fault_hubclient[] = { | |
e1b6b14a BS |
691 | { 0x00, "VIP" }, |
692 | { 0x01, "CE0" }, | |
693 | { 0x02, "CE1" }, | |
694 | { 0x03, "DNISO" }, | |
695 | { 0x04, "FE" }, | |
696 | { 0x05, "FECS" }, | |
697 | { 0x06, "HOST" }, | |
698 | { 0x07, "HOST_CPU" }, | |
699 | { 0x08, "HOST_CPU_NB" }, | |
700 | { 0x09, "ISO" }, | |
701 | { 0x0a, "MMU" }, | |
702 | { 0x0b, "MSPDEC" }, | |
703 | { 0x0c, "MSPPP" }, | |
704 | { 0x0d, "MSVLD" }, | |
705 | { 0x0e, "NISO" }, | |
706 | { 0x0f, "P2P" }, | |
707 | { 0x10, "PD" }, | |
708 | { 0x11, "PERF" }, | |
709 | { 0x12, "PMU" }, | |
710 | { 0x13, "RASTERTWOD" }, | |
711 | { 0x14, "SCC" }, | |
712 | { 0x15, "SCC_NB" }, | |
713 | { 0x16, "SEC" }, | |
714 | { 0x17, "SSYNC" }, | |
aedf24ff | 715 | { 0x18, "GR_CE" }, |
e1b6b14a BS |
716 | { 0x19, "CE2" }, |
717 | { 0x1a, "XV" }, | |
718 | { 0x1b, "MMU_NB" }, | |
719 | { 0x1c, "MSENC" }, | |
720 | { 0x1d, "DFALCON" }, | |
721 | { 0x1e, "SKED" }, | |
722 | { 0x1f, "AFALCON" }, | |
5132f377 BS |
723 | {} |
724 | }; | |
725 | ||
05c7145d BS |
726 | static const struct nvkm_enum |
727 | gk104_fifo_fault_gpcclient[] = { | |
e1b6b14a BS |
728 | { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, |
729 | { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, | |
730 | { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, | |
731 | { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, | |
732 | { 0x0c, "RAST" }, | |
733 | { 0x0d, "GCC" }, | |
734 | { 0x0e, "GPCCS" }, | |
735 | { 0x0f, "PROP_0" }, | |
736 | { 0x10, "PROP_1" }, | |
737 | { 0x11, "PROP_2" }, | |
738 | { 0x12, "PROP_3" }, | |
739 | { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, | |
740 | { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, | |
741 | { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, | |
742 | { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, | |
743 | { 0x1f, "GPM" }, | |
744 | { 0x20, "LTP_UTLB_0" }, | |
745 | { 0x21, "LTP_UTLB_1" }, | |
746 | { 0x22, "LTP_UTLB_2" }, | |
747 | { 0x23, "LTP_UTLB_3" }, | |
748 | { 0x24, "GPC_RGG_UTLB" }, | |
5132f377 BS |
749 | {} |
750 | }; | |
751 | ||
e9fb9805 | 752 | static void |
6189f1b0 | 753 | gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) |
5132f377 | 754 | { |
e5c5e4f5 BS |
755 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
756 | struct nvkm_device *device = subdev->device; | |
87744403 BS |
757 | u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); |
758 | u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); | |
759 | u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); | |
760 | u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); | |
885f3ced | 761 | u32 gpc = (stat & 0x1f000000) >> 24; |
5132f377 | 762 | u32 client = (stat & 0x00001f00) >> 8; |
885f3ced BS |
763 | u32 write = (stat & 0x00000080); |
764 | u32 hub = (stat & 0x00000040); | |
765 | u32 reason = (stat & 0x0000000f); | |
05c7145d BS |
766 | struct nvkm_object *engctx = NULL, *object; |
767 | struct nvkm_engine *engine = NULL; | |
768 | const struct nvkm_enum *er, *eu, *ec; | |
e5c5e4f5 | 769 | char gpcid[8] = ""; |
885f3ced | 770 | |
05c7145d | 771 | er = nvkm_enum_find(gk104_fifo_fault_reason, reason); |
05c7145d | 772 | eu = nvkm_enum_find(gk104_fifo_fault_engine, unit); |
e5c5e4f5 BS |
773 | if (hub) { |
774 | ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client); | |
775 | } else { | |
776 | ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client); | |
777 | snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc); | |
778 | } | |
779 | ||
885f3ced BS |
780 | if (eu) { |
781 | switch (eu->data2) { | |
782 | case NVDEV_SUBDEV_BAR: | |
87744403 | 783 | nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); |
885f3ced BS |
784 | break; |
785 | case NVDEV_SUBDEV_INSTMEM: | |
87744403 | 786 | nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); |
885f3ced BS |
787 | break; |
788 | case NVDEV_ENGINE_IFB: | |
87744403 | 789 | nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); |
885f3ced BS |
790 | break; |
791 | default: | |
6189f1b0 | 792 | engine = nvkm_engine(fifo, eu->data2); |
885f3ced | 793 | if (engine) |
05c7145d | 794 | engctx = nvkm_engctx_get(engine, inst); |
885f3ced | 795 | break; |
cb1567c2 | 796 | } |
885f3ced BS |
797 | } |
798 | ||
e5c5e4f5 BS |
799 | nvkm_error(subdev, |
800 | "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " | |
801 | "reason %02x [%s] on channel %d [%010llx %s]\n", | |
802 | write ? "write" : "read", (u64)vahi << 32 | valo, | |
803 | unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", | |
804 | reason, er ? er->name : "", -1, (u64)inst << 12, | |
805 | nvkm_client_name(engctx)); | |
93260d3c | 806 | |
98d1e317 BS |
807 | object = engctx; |
808 | while (object) { | |
809 | switch (nv_mclass(object)) { | |
bbf8906b | 810 | case KEPLER_CHANNEL_GPFIFO_A: |
89025bd4 | 811 | case MAXWELL_CHANNEL_GPFIFO_A: |
6189f1b0 | 812 | gk104_fifo_recover(fifo, engine, (void *)object); |
98d1e317 BS |
813 | break; |
814 | } | |
815 | object = object->parent; | |
816 | } | |
817 | ||
05c7145d | 818 | nvkm_engctx_put(engctx); |
5132f377 BS |
819 | } |
820 | ||
05c7145d | 821 | static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { |
3d61b967 BS |
822 | { 0x00000001, "MEMREQ" }, |
823 | { 0x00000002, "MEMACK_TIMEOUT" }, | |
824 | { 0x00000004, "MEMACK_EXTRA" }, | |
825 | { 0x00000008, "MEMDAT_TIMEOUT" }, | |
826 | { 0x00000010, "MEMDAT_EXTRA" }, | |
827 | { 0x00000020, "MEMFLUSH" }, | |
828 | { 0x00000040, "MEMOP" }, | |
829 | { 0x00000080, "LBCONNECT" }, | |
830 | { 0x00000100, "LBREQ" }, | |
831 | { 0x00000200, "LBACK_TIMEOUT" }, | |
832 | { 0x00000400, "LBACK_EXTRA" }, | |
833 | { 0x00000800, "LBDAT_TIMEOUT" }, | |
834 | { 0x00001000, "LBDAT_EXTRA" }, | |
835 | { 0x00002000, "GPFIFO" }, | |
836 | { 0x00004000, "GPPTR" }, | |
837 | { 0x00008000, "GPENTRY" }, | |
838 | { 0x00010000, "GPCRC" }, | |
839 | { 0x00020000, "PBPTR" }, | |
840 | { 0x00040000, "PBENTRY" }, | |
841 | { 0x00080000, "PBCRC" }, | |
842 | { 0x00100000, "XBARCONNECT" }, | |
843 | { 0x00200000, "METHOD" }, | |
844 | { 0x00400000, "METHODCRC" }, | |
845 | { 0x00800000, "DEVICE" }, | |
846 | { 0x02000000, "SEMAPHORE" }, | |
847 | { 0x04000000, "ACQUIRE" }, | |
848 | { 0x08000000, "PRI" }, | |
849 | { 0x20000000, "NO_CTXSW_SEG" }, | |
850 | { 0x40000000, "PBSEG" }, | |
851 | { 0x80000000, "SIGNATURE" }, | |
852 | {} | |
853 | }; | |
e2b34fa0 | 854 | |
5132f377 | 855 | static void |
6189f1b0 | 856 | gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) |
5132f377 | 857 | { |
e5c5e4f5 BS |
858 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
859 | struct nvkm_device *device = subdev->device; | |
87744403 BS |
860 | u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); |
861 | u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; | |
862 | u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); | |
863 | u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); | |
864 | u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; | |
ebb945a9 | 865 | u32 subc = (addr & 0x00070000) >> 16; |
5132f377 | 866 | u32 mthd = (addr & 0x00003ffc); |
e2b34fa0 | 867 | u32 show = stat; |
e5c5e4f5 | 868 | char msg[128]; |
e2b34fa0 | 869 | |
ebb945a9 | 870 | if (stat & 0x00800000) { |
6189f1b0 | 871 | if (!gk104_fifo_swmthd(fifo, chid, mthd, data)) |
ebb945a9 | 872 | show &= ~0x00800000; |
87744403 | 873 | nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); |
ebb945a9 BS |
874 | } |
875 | ||
e2b34fa0 | 876 | if (show) { |
e5c5e4f5 BS |
877 | nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); |
878 | nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d " | |
879 | "mthd %04x data %08x\n", | |
880 | unit, show, msg, chid, | |
881 | nvkm_client_name_for_fifo_chid(&fifo->base, chid), | |
882 | subc, mthd, data); | |
e2b34fa0 | 883 | } |
5132f377 | 884 | |
87744403 | 885 | nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); |
5132f377 BS |
886 | } |
887 | ||
05c7145d | 888 | static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { |
70b2cc8e BS |
889 | { 0x00000001, "HCE_RE_ILLEGAL_OP" }, |
890 | { 0x00000002, "HCE_RE_ALIGNB" }, | |
891 | { 0x00000004, "HCE_PRIV" }, | |
892 | { 0x00000008, "HCE_ILLEGAL_MTHD" }, | |
893 | { 0x00000010, "HCE_ILLEGAL_CLASS" }, | |
894 | {} | |
895 | }; | |
896 | ||
897 | static void | |
6189f1b0 | 898 | gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) |
70b2cc8e | 899 | { |
e5c5e4f5 BS |
900 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
901 | struct nvkm_device *device = subdev->device; | |
87744403 BS |
902 | u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); |
903 | u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; | |
904 | u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; | |
e5c5e4f5 | 905 | char msg[128]; |
70b2cc8e BS |
906 | |
907 | if (stat) { | |
e5c5e4f5 BS |
908 | nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); |
909 | nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", | |
910 | unit, stat, msg, chid, | |
911 | nvkm_rd32(device, 0x040150 + (unit * 0x2000)), | |
912 | nvkm_rd32(device, 0x040154 + (unit * 0x2000))); | |
70b2cc8e BS |
913 | } |
914 | ||
87744403 | 915 | nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); |
70b2cc8e BS |
916 | } |
917 | ||
138b873f | 918 | static void |
6189f1b0 | 919 | gk104_fifo_intr_runlist(struct gk104_fifo *fifo) |
138b873f | 920 | { |
87744403 BS |
921 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
922 | u32 mask = nvkm_rd32(device, 0x002a00); | |
138b873f BS |
923 | while (mask) { |
924 | u32 engn = __ffs(mask); | |
6189f1b0 | 925 | wake_up(&fifo->engine[engn].wait); |
87744403 | 926 | nvkm_wr32(device, 0x002a00, 1 << engn); |
138b873f BS |
927 | mask &= ~(1 << engn); |
928 | } | |
929 | } | |
930 | ||
c074bdbc | 931 | static void |
6189f1b0 | 932 | gk104_fifo_intr_engine(struct gk104_fifo *fifo) |
c074bdbc | 933 | { |
6189f1b0 | 934 | nvkm_fifo_uevent(&fifo->base); |
c074bdbc BS |
935 | } |
936 | ||
5132f377 | 937 | static void |
05c7145d | 938 | gk104_fifo_intr(struct nvkm_subdev *subdev) |
5132f377 | 939 | { |
6189f1b0 | 940 | struct gk104_fifo *fifo = (void *)subdev; |
87744403 BS |
941 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
942 | u32 mask = nvkm_rd32(device, 0x002140); | |
943 | u32 stat = nvkm_rd32(device, 0x002100) & mask; | |
5132f377 | 944 | |
e9fb9805 | 945 | if (stat & 0x00000001) { |
6189f1b0 | 946 | gk104_fifo_intr_bind(fifo); |
87744403 | 947 | nvkm_wr32(device, 0x002100, 0x00000001); |
e9fb9805 BS |
948 | stat &= ~0x00000001; |
949 | } | |
950 | ||
951 | if (stat & 0x00000010) { | |
e5c5e4f5 | 952 | nvkm_error(subdev, "PIO_ERROR\n"); |
87744403 | 953 | nvkm_wr32(device, 0x002100, 0x00000010); |
e9fb9805 BS |
954 | stat &= ~0x00000010; |
955 | } | |
956 | ||
5132f377 | 957 | if (stat & 0x00000100) { |
6189f1b0 | 958 | gk104_fifo_intr_sched(fifo); |
87744403 | 959 | nvkm_wr32(device, 0x002100, 0x00000100); |
5132f377 BS |
960 | stat &= ~0x00000100; |
961 | } | |
962 | ||
e9fb9805 | 963 | if (stat & 0x00010000) { |
6189f1b0 | 964 | gk104_fifo_intr_chsw(fifo); |
87744403 | 965 | nvkm_wr32(device, 0x002100, 0x00010000); |
e9fb9805 BS |
966 | stat &= ~0x00010000; |
967 | } | |
968 | ||
969 | if (stat & 0x00800000) { | |
e5c5e4f5 | 970 | nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); |
87744403 | 971 | nvkm_wr32(device, 0x002100, 0x00800000); |
e9fb9805 BS |
972 | stat &= ~0x00800000; |
973 | } | |
974 | ||
975 | if (stat & 0x01000000) { | |
e5c5e4f5 | 976 | nvkm_error(subdev, "LB_ERROR\n"); |
87744403 | 977 | nvkm_wr32(device, 0x002100, 0x01000000); |
e9fb9805 BS |
978 | stat &= ~0x01000000; |
979 | } | |
980 | ||
981 | if (stat & 0x08000000) { | |
6189f1b0 | 982 | gk104_fifo_intr_dropped_fault(fifo); |
87744403 | 983 | nvkm_wr32(device, 0x002100, 0x08000000); |
e9fb9805 BS |
984 | stat &= ~0x08000000; |
985 | } | |
986 | ||
5132f377 | 987 | if (stat & 0x10000000) { |
87744403 | 988 | u32 mask = nvkm_rd32(device, 0x00259c); |
885f3ced BS |
989 | while (mask) { |
990 | u32 unit = __ffs(mask); | |
6189f1b0 | 991 | gk104_fifo_intr_fault(fifo, unit); |
87744403 | 992 | nvkm_wr32(device, 0x00259c, (1 << unit)); |
885f3ced | 993 | mask &= ~(1 << unit); |
5132f377 | 994 | } |
5132f377 BS |
995 | stat &= ~0x10000000; |
996 | } | |
997 | ||
998 | if (stat & 0x20000000) { | |
87744403 | 999 | u32 mask = nvkm_rd32(device, 0x0025a0); |
3d61b967 BS |
1000 | while (mask) { |
1001 | u32 unit = __ffs(mask); | |
6189f1b0 BS |
1002 | gk104_fifo_intr_pbdma_0(fifo, unit); |
1003 | gk104_fifo_intr_pbdma_1(fifo, unit); | |
87744403 | 1004 | nvkm_wr32(device, 0x0025a0, (1 << unit)); |
3d61b967 | 1005 | mask &= ~(1 << unit); |
5132f377 | 1006 | } |
5132f377 BS |
1007 | stat &= ~0x20000000; |
1008 | } | |
1009 | ||
1010 | if (stat & 0x40000000) { | |
6189f1b0 | 1011 | gk104_fifo_intr_runlist(fifo); |
5132f377 BS |
1012 | stat &= ~0x40000000; |
1013 | } | |
1014 | ||
9bd2ddba | 1015 | if (stat & 0x80000000) { |
87744403 | 1016 | nvkm_wr32(device, 0x002100, 0x80000000); |
6189f1b0 | 1017 | gk104_fifo_intr_engine(fifo); |
9bd2ddba BS |
1018 | stat &= ~0x80000000; |
1019 | } | |
1020 | ||
5132f377 | 1021 | if (stat) { |
e5c5e4f5 | 1022 | nvkm_error(subdev, "INTR %08x\n", stat); |
87744403 BS |
1023 | nvkm_mask(device, 0x002140, stat, 0x00000000); |
1024 | nvkm_wr32(device, 0x002100, stat); | |
5132f377 BS |
1025 | } |
1026 | } | |
c420b2dc | 1027 | |
9bd2ddba | 1028 | static void |
05c7145d | 1029 | gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index) |
9bd2ddba | 1030 | { |
05c7145d | 1031 | struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
87744403 BS |
1032 | struct nvkm_device *device = fifo->engine.subdev.device; |
1033 | nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); | |
9bd2ddba BS |
1034 | } |
1035 | ||
1036 | static void | |
05c7145d | 1037 | gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) |
9bd2ddba | 1038 | { |
05c7145d | 1039 | struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
87744403 BS |
1040 | struct nvkm_device *device = fifo->engine.subdev.device; |
1041 | nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); | |
9bd2ddba BS |
1042 | } |
1043 | ||
79ca2770 | 1044 | static const struct nvkm_event_func |
05c7145d BS |
1045 | gk104_fifo_uevent_func = { |
1046 | .ctor = nvkm_fifo_uevent_ctor, | |
1047 | .init = gk104_fifo_uevent_init, | |
1048 | .fini = gk104_fifo_uevent_fini, | |
79ca2770 BS |
1049 | }; |
1050 | ||
649ec925 | 1051 | int |
05c7145d | 1052 | gk104_fifo_fini(struct nvkm_object *object, bool suspend) |
649ec925 | 1053 | { |
6189f1b0 | 1054 | struct gk104_fifo *fifo = (void *)object; |
87744403 | 1055 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
649ec925 BS |
1056 | int ret; |
1057 | ||
6189f1b0 | 1058 | ret = nvkm_fifo_fini(&fifo->base, suspend); |
649ec925 BS |
1059 | if (ret) |
1060 | return ret; | |
1061 | ||
1062 | /* allow mmu fault interrupts, even when we're not using fifo */ | |
87744403 | 1063 | nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); |
649ec925 BS |
1064 | return 0; |
1065 | } | |
1066 | ||
a763951a | 1067 | int |
05c7145d | 1068 | gk104_fifo_init(struct nvkm_object *object) |
a763951a | 1069 | { |
6189f1b0 | 1070 | struct gk104_fifo *fifo = (void *)object; |
e5c5e4f5 BS |
1071 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
1072 | struct nvkm_device *device = subdev->device; | |
a763951a BS |
1073 | int ret, i; |
1074 | ||
6189f1b0 | 1075 | ret = nvkm_fifo_init(&fifo->base); |
a763951a BS |
1076 | if (ret) |
1077 | return ret; | |
1078 | ||
39b05542 | 1079 | /* enable all available PBDMA units */ |
87744403 BS |
1080 | nvkm_wr32(device, 0x000204, 0xffffffff); |
1081 | fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204)); | |
e5c5e4f5 | 1082 | nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr); |
a763951a | 1083 | |
39b05542 | 1084 | /* PBDMA[n] */ |
6189f1b0 | 1085 | for (i = 0; i < fifo->spoon_nr; i++) { |
87744403 BS |
1086 | nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); |
1087 | nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ | |
1088 | nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ | |
a763951a BS |
1089 | } |
1090 | ||
70b2cc8e | 1091 | /* PBDMA[n].HCE */ |
6189f1b0 | 1092 | for (i = 0; i < fifo->spoon_nr; i++) { |
87744403 BS |
1093 | nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ |
1094 | nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ | |
70b2cc8e BS |
1095 | } |
1096 | ||
87744403 | 1097 | nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); |
a763951a | 1098 | |
87744403 BS |
1099 | nvkm_wr32(device, 0x002100, 0xffffffff); |
1100 | nvkm_wr32(device, 0x002140, 0x7fffffff); | |
a763951a BS |
1101 | return 0; |
1102 | } | |
1103 | ||
1104 | void | |
05c7145d | 1105 | gk104_fifo_dtor(struct nvkm_object *object) |
a763951a | 1106 | { |
6189f1b0 | 1107 | struct gk104_fifo *fifo = (void *)object; |
a763951a BS |
1108 | int i; |
1109 | ||
6189f1b0 BS |
1110 | nvkm_gpuobj_unmap(&fifo->user.bar); |
1111 | nvkm_gpuobj_ref(NULL, &fifo->user.mem); | |
a763951a BS |
1112 | |
1113 | for (i = 0; i < FIFO_ENGINE_NR; i++) { | |
6189f1b0 BS |
1114 | nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]); |
1115 | nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]); | |
a763951a BS |
1116 | } |
1117 | ||
6189f1b0 | 1118 | nvkm_fifo_destroy(&fifo->base); |
a763951a BS |
1119 | } |
1120 | ||
1121 | int | |
05c7145d BS |
1122 | gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
1123 | struct nvkm_oclass *oclass, void *data, u32 size, | |
1124 | struct nvkm_object **pobject) | |
ebb945a9 | 1125 | { |
05c7145d | 1126 | struct gk104_fifo_impl *impl = (void *)oclass; |
6189f1b0 | 1127 | struct gk104_fifo *fifo; |
8d6f585d | 1128 | int ret, i; |
ebb945a9 | 1129 | |
05c7145d | 1130 | ret = nvkm_fifo_create(parent, engine, oclass, 0, |
6189f1b0 BS |
1131 | impl->channels - 1, &fifo); |
1132 | *pobject = nv_object(fifo); | |
ebb945a9 BS |
1133 | if (ret) |
1134 | return ret; | |
1135 | ||
6189f1b0 | 1136 | INIT_WORK(&fifo->fault, gk104_fifo_recover_work); |
98d1e317 | 1137 | |
8d6f585d | 1138 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
6189f1b0 BS |
1139 | ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, |
1140 | 0, &fifo->engine[i].runlist[0]); | |
8d6f585d BS |
1141 | if (ret) |
1142 | return ret; | |
1143 | ||
6189f1b0 BS |
1144 | ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, |
1145 | 0, &fifo->engine[i].runlist[1]); | |
8d6f585d BS |
1146 | if (ret) |
1147 | return ret; | |
138b873f | 1148 | |
6189f1b0 | 1149 | init_waitqueue_head(&fifo->engine[i].wait); |
8d6f585d BS |
1150 | } |
1151 | ||
6189f1b0 BS |
1152 | ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200, |
1153 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem); | |
ebb945a9 BS |
1154 | if (ret) |
1155 | return ret; | |
1156 | ||
6189f1b0 BS |
1157 | ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, |
1158 | &fifo->user.bar); | |
ebb945a9 BS |
1159 | if (ret) |
1160 | return ret; | |
1161 | ||
6189f1b0 | 1162 | ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent); |
79ca2770 BS |
1163 | if (ret) |
1164 | return ret; | |
9bd2ddba | 1165 | |
6189f1b0 BS |
1166 | nv_subdev(fifo)->unit = 0x00000100; |
1167 | nv_subdev(fifo)->intr = gk104_fifo_intr; | |
1168 | nv_engine(fifo)->cclass = &gk104_fifo_cclass; | |
1169 | nv_engine(fifo)->sclass = gk104_fifo_sclass; | |
ebb945a9 BS |
1170 | return 0; |
1171 | } | |
1172 | ||
05c7145d BS |
1173 | struct nvkm_oclass * |
1174 | gk104_fifo_oclass = &(struct gk104_fifo_impl) { | |
a763951a | 1175 | .base.handle = NV_ENGINE(FIFO, 0xe0), |
05c7145d BS |
1176 | .base.ofuncs = &(struct nvkm_ofuncs) { |
1177 | .ctor = gk104_fifo_ctor, | |
1178 | .dtor = gk104_fifo_dtor, | |
1179 | .init = gk104_fifo_init, | |
1180 | .fini = gk104_fifo_fini, | |
ebb945a9 | 1181 | }, |
a763951a BS |
1182 | .channels = 4096, |
1183 | }.base; |