]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
drm/nouveau/gr: remove dependence on namedb/engctx lookup
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / gr / gk20a.c
1 /*
2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22 #include "gk20a.h"
23 #include "ctxgf100.h"
24
25 #include <nvif/class.h>
26 #include <subdev/timer.h>
27
28 static struct nvkm_oclass
29 gk20a_gr_sclass[] = {
30 { FERMI_TWOD_A, &nvkm_object_ofuncs },
31 { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
32 { KEPLER_C, &gf100_fermi_ofuncs },
33 { KEPLER_COMPUTE_A, &nvkm_object_ofuncs },
34 {}
35 };
36
37 static void
38 gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
39 {
40 vfree(pack);
41 }
42
43 struct gk20a_fw_av
44 {
45 u32 addr;
46 u32 data;
47 };
48
49 static struct gf100_gr_pack *
50 gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
51 {
52 struct gf100_gr_init *init;
53 struct gf100_gr_pack *pack;
54 const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
55 int i;
56
57 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
58 if (!pack)
59 return ERR_PTR(-ENOMEM);
60
61 init = (void *)(pack + 2);
62
63 pack[0].init = init;
64
65 for (i = 0; i < nent; i++) {
66 struct gf100_gr_init *ent = &init[i];
67 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
68
69 ent->addr = av->addr;
70 ent->data = av->data;
71 ent->count = 1;
72 ent->pitch = 1;
73 }
74
75 return pack;
76 }
77
78 struct gk20a_fw_aiv
79 {
80 u32 addr;
81 u32 index;
82 u32 data;
83 };
84
85 static struct gf100_gr_pack *
86 gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
87 {
88 struct gf100_gr_init *init;
89 struct gf100_gr_pack *pack;
90 const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
91 int i;
92
93 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
94 if (!pack)
95 return ERR_PTR(-ENOMEM);
96
97 init = (void *)(pack + 2);
98
99 pack[0].init = init;
100
101 for (i = 0; i < nent; i++) {
102 struct gf100_gr_init *ent = &init[i];
103 struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
104
105 ent->addr = av->addr;
106 ent->data = av->data;
107 ent->count = 1;
108 ent->pitch = 1;
109 }
110
111 return pack;
112 }
113
114 static struct gf100_gr_pack *
115 gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
116 {
117 struct gf100_gr_init *init;
118 struct gf100_gr_pack *pack;
119 /* We don't suppose we will initialize more than 16 classes here... */
120 static const unsigned int max_classes = 16;
121 const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
122 int i, classidx = 0;
123 u32 prevclass = 0;
124
125 pack = vzalloc((sizeof(*pack) * max_classes) +
126 (sizeof(*init) * (nent + 1)));
127 if (!pack)
128 return ERR_PTR(-ENOMEM);
129
130 init = (void *)(pack + max_classes);
131
132 for (i = 0; i < nent; i++) {
133 struct gf100_gr_init *ent = &init[i];
134 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
135 u32 class = av->addr & 0xffff;
136 u32 addr = (av->addr & 0xffff0000) >> 14;
137
138 if (prevclass != class) {
139 pack[classidx].init = ent;
140 pack[classidx].type = class;
141 prevclass = class;
142 if (++classidx >= max_classes) {
143 vfree(pack);
144 return ERR_PTR(-ENOSPC);
145 }
146 }
147
148 ent->addr = addr;
149 ent->data = av->data;
150 ent->count = 1;
151 ent->pitch = 1;
152 }
153
154 return pack;
155 }
156
157 int
158 gk20a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
159 struct nvkm_oclass *oclass, void *data, u32 size,
160 struct nvkm_object **pobject)
161 {
162 int err;
163 struct gf100_gr *gr;
164 struct gf100_gr_fuc fuc;
165
166 err = gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
167 if (err)
168 return err;
169
170 gr = (void *)*pobject;
171
172 err = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
173 if (err)
174 return err;
175 gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
176 gf100_gr_dtor_fw(&fuc);
177 if (IS_ERR(gr->fuc_sw_nonctx))
178 return PTR_ERR(gr->fuc_sw_nonctx);
179
180 err = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
181 if (err)
182 return err;
183 gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
184 gf100_gr_dtor_fw(&fuc);
185 if (IS_ERR(gr->fuc_sw_ctx))
186 return PTR_ERR(gr->fuc_sw_ctx);
187
188 err = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
189 if (err)
190 return err;
191 gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
192 gf100_gr_dtor_fw(&fuc);
193 if (IS_ERR(gr->fuc_bundle))
194 return PTR_ERR(gr->fuc_bundle);
195
196 err = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
197 if (err)
198 return err;
199 gr->fuc_method = gk20a_gr_av_to_method(&fuc);
200 gf100_gr_dtor_fw(&fuc);
201 if (IS_ERR(gr->fuc_method))
202 return PTR_ERR(gr->fuc_method);
203
204 return 0;
205 }
206
207 void
208 gk20a_gr_dtor(struct nvkm_object *object)
209 {
210 struct gf100_gr *gr = (void *)object;
211
212 gk20a_gr_init_dtor(gr->fuc_method);
213 gk20a_gr_init_dtor(gr->fuc_bundle);
214 gk20a_gr_init_dtor(gr->fuc_sw_ctx);
215 gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
216
217 gf100_gr_dtor(object);
218 }
219
220 static int
221 gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
222 {
223 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
224 struct nvkm_device *device = subdev->device;
225
226 if (nvkm_msec(device, 2000,
227 if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
228 break;
229 ) < 0) {
230 nvkm_error(subdev, "FECS mem scrubbing timeout\n");
231 return -ETIMEDOUT;
232 }
233
234 if (nvkm_msec(device, 2000,
235 if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
236 break;
237 ) < 0) {
238 nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
239 return -ETIMEDOUT;
240 }
241
242 return 0;
243 }
244
245 static void
246 gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
247 {
248 struct nvkm_device *device = gr->base.engine.subdev.device;
249 nvkm_wr32(device, 0x419e44, 0x1ffffe);
250 nvkm_wr32(device, 0x419e4c, 0x7f);
251 }
252
253 int
254 gk20a_gr_init(struct nvkm_object *object)
255 {
256 struct gk20a_gr_oclass *oclass = (void *)object->oclass;
257 struct gf100_gr *gr = (void *)object;
258 struct nvkm_device *device = gr->base.engine.subdev.device;
259 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
260 u32 data[TPC_MAX / 8] = {};
261 u8 tpcnr[GPC_MAX];
262 int gpc, tpc;
263 int ret, i;
264
265 ret = nvkm_gr_init(&gr->base);
266 if (ret)
267 return ret;
268
269 /* Clear SCC RAM */
270 nvkm_wr32(device, 0x40802c, 0x1);
271
272 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
273
274 ret = gk20a_gr_wait_mem_scrubbing(gr);
275 if (ret)
276 return ret;
277
278 ret = gf100_gr_wait_idle(gr);
279 if (ret)
280 return ret;
281
282 /* MMU debug buffer */
283 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
284 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
285
286 if (oclass->init_gpc_mmu)
287 oclass->init_gpc_mmu(gr);
288
289 /* Set the PE as stream master */
290 nvkm_mask(device, 0x503018, 0x1, 0x1);
291
292 /* Zcull init */
293 memset(data, 0x00, sizeof(data));
294 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
295 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
296 do {
297 gpc = (gpc + 1) % gr->gpc_nr;
298 } while (!tpcnr[gpc]);
299 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
300
301 data[i / 8] |= tpc << ((i % 8) * 4);
302 }
303
304 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
305 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
306 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
307 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
308
309 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
310 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
311 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
312 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
313 gr->tpc_total);
314 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
315 }
316
317 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
318
319 /* Enable FIFO access */
320 nvkm_wr32(device, 0x400500, 0x00010001);
321
322 /* Enable interrupts */
323 nvkm_wr32(device, 0x400100, 0xffffffff);
324 nvkm_wr32(device, 0x40013c, 0xffffffff);
325
326 /* Enable FECS error interrupts */
327 nvkm_wr32(device, 0x409c24, 0x000f0000);
328
329 /* Enable hardware warning exceptions */
330 nvkm_wr32(device, 0x404000, 0xc0000000);
331 nvkm_wr32(device, 0x404600, 0xc0000000);
332
333 if (oclass->set_hww_esr_report_mask)
334 oclass->set_hww_esr_report_mask(gr);
335
336 /* Enable TPC exceptions per GPC */
337 nvkm_wr32(device, 0x419d0c, 0x2);
338 nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
339
340 /* Reset and enable all exceptions */
341 nvkm_wr32(device, 0x400108, 0xffffffff);
342 nvkm_wr32(device, 0x400138, 0xffffffff);
343 nvkm_wr32(device, 0x400118, 0xffffffff);
344 nvkm_wr32(device, 0x400130, 0xffffffff);
345 nvkm_wr32(device, 0x40011c, 0xffffffff);
346 nvkm_wr32(device, 0x400134, 0xffffffff);
347
348 gf100_gr_zbc_init(gr);
349
350 return gf100_gr_init_ctxctl(gr);
351 }
352
353 struct nvkm_oclass *
354 gk20a_gr_oclass = &(struct gk20a_gr_oclass) {
355 .gf100 = {
356 .base.handle = NV_ENGINE(GR, 0xea),
357 .base.ofuncs = &(struct nvkm_ofuncs) {
358 .ctor = gk20a_gr_ctor,
359 .dtor = gk20a_gr_dtor,
360 .init = gk20a_gr_init,
361 .fini = _nvkm_gr_fini,
362 },
363 .cclass = &gk20a_grctx_oclass,
364 .sclass = gk20a_gr_sclass,
365 .ppc_nr = 1,
366 },
367 .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
368 }.gf100.base;