]>
Commit | Line | Data |
---|---|---|
6ee73861 | 1 | /* |
ebb945a9 | 2 | * Copyright 2012 Red Hat Inc. |
6ee73861 | 3 | * |
ebb945a9 BS |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
6ee73861 | 10 | * |
ebb945a9 BS |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. | |
6ee73861 | 13 | * |
ebb945a9 BS |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
6ee73861 | 21 | * |
ebb945a9 | 22 | * Authors: Ben Skeggs |
6ee73861 | 23 | */ |
05c7145d | 24 | #include "nv04.h" |
9a65a38c BS |
25 | #include "channv04.h" |
26 | #include "regsnv04.h" | |
6ee73861 | 27 | |
8f0649b5 | 28 | #include <core/client.h> |
02a841d4 | 29 | #include <core/ramht.h> |
d8e83994 | 30 | #include <subdev/instmem.h> |
ebb945a9 | 31 | #include <subdev/timer.h> |
61570911 | 32 | #include <engine/sw.h> |
ebb945a9 | 33 | |
13de7f46 BS |
34 | static const struct nv04_fifo_ramfc |
35 | nv04_fifo_ramfc[] = { | |
c420b2dc BS |
36 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, |
37 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, | |
38 | { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, | |
39 | { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, | |
40 | { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, | |
41 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, | |
42 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, | |
43 | { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, | |
44 | {} | |
45 | }; | |
46 | ||
ebb945a9 | 47 | void |
13de7f46 | 48 | nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags) |
6189f1b0 | 49 | __acquires(fifo->base.lock) |
ebb945a9 | 50 | { |
13de7f46 | 51 | struct nv04_fifo *fifo = nv04_fifo(base); |
87744403 | 52 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
ebb945a9 | 53 | unsigned long flags; |
6ee73861 | 54 | |
6189f1b0 | 55 | spin_lock_irqsave(&fifo->base.lock, flags); |
ebb945a9 BS |
56 | *pflags = flags; |
57 | ||
87744403 BS |
58 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); |
59 | nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); | |
ebb945a9 BS |
60 | |
61 | /* in some cases the puller may be left in an inconsistent state | |
62 | * if you try to stop it while it's busy translating handles. | |
63 | * sometimes you get a CACHE_ERROR, sometimes it just fails | |
64 | * silently; sending incorrect instance offsets to PGRAPH after | |
65 | * it's started up again. | |
66 | * | |
67 | * to avoid this, we invalidate the most recently calculated | |
68 | * instance. | |
69 | */ | |
af3082b3 BS |
70 | nvkm_msec(device, 2000, |
71 | u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); | |
72 | if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) | |
73 | break; | |
74 | ); | |
ebb945a9 | 75 | |
87744403 | 76 | if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & |
ebb945a9 | 77 | NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) |
87744403 | 78 | nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); |
ebb945a9 | 79 | |
87744403 | 80 | nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); |
ebb945a9 | 81 | } |
6ee73861 | 82 | |
ebb945a9 | 83 | void |
13de7f46 | 84 | nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags) |
6189f1b0 | 85 | __releases(fifo->base.lock) |
ebb945a9 | 86 | { |
13de7f46 | 87 | struct nv04_fifo *fifo = nv04_fifo(base); |
87744403 | 88 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
ebb945a9 | 89 | unsigned long flags = *pflags; |
6ee73861 | 90 | |
87744403 BS |
91 | nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); |
92 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); | |
ebb945a9 | 93 | |
6189f1b0 | 94 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
6ee73861 BS |
95 | } |
96 | ||
ebb945a9 BS |
97 | static const char * |
98 | nv_dma_state_err(u32 state) | |
5178d40d | 99 | { |
ebb945a9 BS |
100 | static const char * const desc[] = { |
101 | "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", | |
102 | "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" | |
103 | }; | |
104 | return desc[(state >> 29) & 0x7]; | |
5178d40d BS |
105 | } |
106 | ||
107 | static bool | |
61570911 | 108 | nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data) |
5178d40d | 109 | { |
61570911 BS |
110 | struct nvkm_sw *sw = device->sw; |
111 | const int subc = (addr & 0x0000e000) >> 13; | |
112 | const int mthd = (addr & 0x00001ffc); | |
113 | const u32 mask = 0x0000000f << (subc * 4); | |
114 | u32 engine = nvkm_rd32(device, 0x003280); | |
5178d40d | 115 | bool handled = false; |
5178d40d BS |
116 | |
117 | switch (mthd) { | |
61570911 BS |
118 | case 0x0000 ... 0x0000: /* subchannel's engine -> software */ |
119 | nvkm_wr32(device, 0x003280, (engine &= ~mask)); | |
f6e7393e | 120 | fallthrough; |
61570911 BS |
121 | case 0x0180 ... 0x01fc: /* handle -> instance */ |
122 | data = nvkm_rd32(device, 0x003258) & 0x0000ffff; | |
f6e7393e | 123 | fallthrough; |
61570911 BS |
124 | case 0x0100 ... 0x017c: |
125 | case 0x0200 ... 0x1ffc: /* pass method down to sw */ | |
126 | if (!(engine & mask) && sw) | |
127 | handled = nvkm_sw_mthd(sw, chid, subc, mthd, data); | |
5178d40d BS |
128 | break; |
129 | default: | |
5178d40d BS |
130 | break; |
131 | } | |
132 | ||
5178d40d BS |
133 | return handled; |
134 | } | |
135 | ||
fc10199e | 136 | static void |
e5c5e4f5 | 137 | nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) |
fc10199e | 138 | { |
e5c5e4f5 BS |
139 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
140 | struct nvkm_device *device = subdev->device; | |
8f0649b5 BS |
141 | struct nvkm_fifo_chan *chan; |
142 | unsigned long flags; | |
61570911 | 143 | u32 pull0 = nvkm_rd32(device, 0x003250); |
fc10199e MS |
144 | u32 mthd, data; |
145 | int ptr; | |
146 | ||
147 | /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my | |
148 | * G80 chips, but CACHE1 isn't big enough for this much data.. Tests | |
149 | * show that it wraps around to the start at GET=0x800.. No clue as to | |
150 | * why.. | |
151 | */ | |
152 | ptr = (get & 0x7ff) >> 2; | |
153 | ||
154 | if (device->card_type < NV_40) { | |
87744403 BS |
155 | mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); |
156 | data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); | |
fc10199e | 157 | } else { |
87744403 BS |
158 | mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); |
159 | data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); | |
fc10199e MS |
160 | } |
161 | ||
61570911 BS |
162 | if (!(pull0 & 0x00000100) || |
163 | !nv04_fifo_swmthd(device, chid, mthd, data)) { | |
8f0649b5 | 164 | chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); |
e5c5e4f5 BS |
165 | nvkm_error(subdev, "CACHE_ERROR - " |
166 | "ch %d [%s] subc %d mthd %04x data %08x\n", | |
8f0649b5 BS |
167 | chid, chan ? chan->object.client->name : "unknown", |
168 | (mthd >> 13) & 7, mthd & 0x1ffc, data); | |
169 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); | |
fc10199e MS |
170 | } |
171 | ||
87744403 BS |
172 | nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); |
173 | nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); | |
fc10199e | 174 | |
87744403 BS |
175 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, |
176 | nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); | |
177 | nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); | |
178 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, | |
179 | nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); | |
180 | nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); | |
fc10199e | 181 | |
87744403 BS |
182 | nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, |
183 | nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); | |
184 | nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); | |
fc10199e MS |
185 | } |
186 | ||
187 | static void | |
e5c5e4f5 | 188 | nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) |
fc10199e | 189 | { |
e5c5e4f5 BS |
190 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
191 | struct nvkm_device *device = subdev->device; | |
87744403 BS |
192 | u32 dma_get = nvkm_rd32(device, 0x003244); |
193 | u32 dma_put = nvkm_rd32(device, 0x003240); | |
194 | u32 push = nvkm_rd32(device, 0x003220); | |
195 | u32 state = nvkm_rd32(device, 0x003228); | |
8f0649b5 BS |
196 | struct nvkm_fifo_chan *chan; |
197 | unsigned long flags; | |
198 | const char *name; | |
93260d3c | 199 | |
8f0649b5 BS |
200 | chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); |
201 | name = chan ? chan->object.client->name : "unknown"; | |
fc10199e | 202 | if (device->card_type == NV_50) { |
87744403 BS |
203 | u32 ho_get = nvkm_rd32(device, 0x003328); |
204 | u32 ho_put = nvkm_rd32(device, 0x003320); | |
205 | u32 ib_get = nvkm_rd32(device, 0x003334); | |
206 | u32 ib_put = nvkm_rd32(device, 0x003330); | |
fc10199e | 207 | |
e5c5e4f5 BS |
208 | nvkm_error(subdev, "DMA_PUSHER - " |
209 | "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " | |
210 | "ib_put %08x state %08x (err: %s) push %08x\n", | |
8f0649b5 | 211 | chid, name, ho_get, dma_get, ho_put, dma_put, |
e5c5e4f5 BS |
212 | ib_get, ib_put, state, nv_dma_state_err(state), |
213 | push); | |
fc10199e MS |
214 | |
215 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | |
87744403 | 216 | nvkm_wr32(device, 0x003364, 0x00000000); |
fc10199e | 217 | if (dma_get != dma_put || ho_get != ho_put) { |
87744403 BS |
218 | nvkm_wr32(device, 0x003244, dma_put); |
219 | nvkm_wr32(device, 0x003328, ho_put); | |
fc10199e MS |
220 | } else |
221 | if (ib_get != ib_put) | |
87744403 | 222 | nvkm_wr32(device, 0x003334, ib_put); |
fc10199e | 223 | } else { |
e5c5e4f5 BS |
224 | nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " |
225 | "state %08x (err: %s) push %08x\n", | |
8f0649b5 | 226 | chid, name, dma_get, dma_put, state, |
e5c5e4f5 | 227 | nv_dma_state_err(state), push); |
fc10199e MS |
228 | |
229 | if (dma_get != dma_put) | |
87744403 | 230 | nvkm_wr32(device, 0x003244, dma_put); |
fc10199e | 231 | } |
8f0649b5 | 232 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); |
fc10199e | 233 | |
87744403 BS |
234 | nvkm_wr32(device, 0x003228, 0x00000000); |
235 | nvkm_wr32(device, 0x003220, 0x00000001); | |
236 | nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); | |
fc10199e MS |
237 | } |
238 | ||
5178d40d | 239 | void |
13de7f46 | 240 | nv04_fifo_intr(struct nvkm_fifo *base) |
5178d40d | 241 | { |
13de7f46 BS |
242 | struct nv04_fifo *fifo = nv04_fifo(base); |
243 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | |
e5c5e4f5 | 244 | struct nvkm_device *device = subdev->device; |
87744403 BS |
245 | u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); |
246 | u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; | |
adc346b1 | 247 | u32 reassign, chid, get, sem; |
5178d40d | 248 | |
87744403 BS |
249 | reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; |
250 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0); | |
5178d40d | 251 | |
8f0649b5 | 252 | chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1); |
87744403 | 253 | get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); |
5178d40d | 254 | |
adc346b1 | 255 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { |
e5c5e4f5 | 256 | nv04_fifo_cache_error(fifo, chid, get); |
adc346b1 BS |
257 | stat &= ~NV_PFIFO_INTR_CACHE_ERROR; |
258 | } | |
5178d40d | 259 | |
adc346b1 | 260 | if (stat & NV_PFIFO_INTR_DMA_PUSHER) { |
e5c5e4f5 | 261 | nv04_fifo_dma_pusher(fifo, chid); |
adc346b1 BS |
262 | stat &= ~NV_PFIFO_INTR_DMA_PUSHER; |
263 | } | |
5178d40d | 264 | |
adc346b1 BS |
265 | if (stat & NV_PFIFO_INTR_SEMAPHORE) { |
266 | stat &= ~NV_PFIFO_INTR_SEMAPHORE; | |
87744403 | 267 | nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); |
5178d40d | 268 | |
87744403 BS |
269 | sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); |
270 | nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | |
5178d40d | 271 | |
87744403 BS |
272 | nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); |
273 | nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); | |
adc346b1 | 274 | } |
5178d40d | 275 | |
adc346b1 BS |
276 | if (device->card_type == NV_50) { |
277 | if (stat & 0x00000010) { | |
278 | stat &= ~0x00000010; | |
87744403 | 279 | nvkm_wr32(device, 0x002100, 0x00000010); |
5178d40d BS |
280 | } |
281 | ||
adc346b1 | 282 | if (stat & 0x40000000) { |
87744403 | 283 | nvkm_wr32(device, 0x002100, 0x40000000); |
6189f1b0 | 284 | nvkm_fifo_uevent(&fifo->base); |
adc346b1 | 285 | stat &= ~0x40000000; |
5178d40d | 286 | } |
5178d40d BS |
287 | } |
288 | ||
adc346b1 | 289 | if (stat) { |
e5c5e4f5 | 290 | nvkm_warn(subdev, "intr %08x\n", stat); |
87744403 BS |
291 | nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); |
292 | nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); | |
5178d40d BS |
293 | } |
294 | ||
87744403 | 295 | nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); |
5178d40d | 296 | } |
c420b2dc | 297 | |
13de7f46 BS |
298 | void |
299 | nv04_fifo_init(struct nvkm_fifo *base) | |
c420b2dc | 300 | { |
13de7f46 | 301 | struct nv04_fifo *fifo = nv04_fifo(base); |
87744403 | 302 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
5b1ab0c2 BS |
303 | struct nvkm_instmem *imem = device->imem; |
304 | struct nvkm_ramht *ramht = imem->ramht; | |
305 | struct nvkm_memory *ramro = imem->ramro; | |
306 | struct nvkm_memory *ramfc = imem->ramfc; | |
c420b2dc | 307 | |
87744403 BS |
308 | nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); |
309 | nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); | |
c420b2dc | 310 | |
87744403 | 311 | nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | |
5b1ab0c2 | 312 | ((ramht->bits - 9) << 16) | |
1d2a1e53 | 313 | (ramht->gpuobj->addr >> 8)); |
5b1ab0c2 BS |
314 | nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); |
315 | nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); | |
5787640d | 316 | |
8f0649b5 | 317 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1); |
c420b2dc | 318 | |
87744403 BS |
319 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); |
320 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); | |
ebb945a9 | 321 | |
87744403 BS |
322 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); |
323 | nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); | |
324 | nvkm_wr32(device, NV03_PFIFO_CACHES, 1); | |
c420b2dc | 325 | } |
ebb945a9 | 326 | |
13de7f46 BS |
327 | int |
328 | nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, | |
329 | int index, int nr, const struct nv04_fifo_ramfc *ramfc, | |
330 | struct nvkm_fifo **pfifo) | |
9a65a38c BS |
331 | { |
332 | struct nv04_fifo *fifo; | |
333 | int ret; | |
334 | ||
13de7f46 BS |
335 | if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) |
336 | return -ENOMEM; | |
337 | fifo->ramfc = ramfc; | |
338 | *pfifo = &fifo->base; | |
339 | ||
340 | ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base); | |
9a65a38c BS |
341 | if (ret) |
342 | return ret; | |
343 | ||
13de7f46 | 344 | set_bit(nr - 1, fifo->base.mask); /* inactive channel */ |
9a65a38c BS |
345 | return 0; |
346 | } | |
347 | ||
13de7f46 BS |
348 | static const struct nvkm_fifo_func |
349 | nv04_fifo = { | |
350 | .init = nv04_fifo_init, | |
351 | .intr = nv04_fifo_intr, | |
352 | .pause = nv04_fifo_pause, | |
353 | .start = nv04_fifo_start, | |
354 | .chan = { | |
355 | &nv04_fifo_dma_oclass, | |
356 | NULL | |
ebb945a9 BS |
357 | }, |
358 | }; | |
13de7f46 BS |
359 | |
360 | int | |
361 | nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) | |
362 | { | |
363 | return nv04_fifo_new_(&nv04_fifo, device, index, 16, | |
364 | nv04_fifo_ramfc, pfifo); | |
365 | } |