]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
drm/nouveau/gr/gf100-: virtualise tpc_mask + apply fixes from traces
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / gr / gm200.c
1 /*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24 #include "gf100.h"
25 #include "ctxgf100.h"
26
27 #include <subdev/secboot.h>
28
29 #include <nvif/class.h>
30
31 /*******************************************************************************
32 * PGRAPH engine/subdev functions
33 ******************************************************************************/
34
35 int
36 gm200_gr_rops(struct gf100_gr *gr)
37 {
38 return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
39 }
40
41 void
42 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
43 {
44 struct nvkm_device *device = gr->base.engine.subdev.device;
45
46 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
47 nvkm_wr32(device, 0x418890, 0x00000000);
48 nvkm_wr32(device, 0x418894, 0x00000000);
49
50 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
51 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
52 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
53 }
54
55 static void
56 gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
57 {
58 struct nvkm_device *device = gr->base.engine.subdev.device;
59 const u32 fbp_count = nvkm_rd32(device, 0x12006c);
60 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
61 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
62 }
63
64 int
65 gm200_gr_init(struct gf100_gr *gr)
66 {
67 struct nvkm_device *device = gr->base.engine.subdev.device;
68 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
69 u32 data[TPC_MAX / 8] = {};
70 u8 tpcnr[GPC_MAX];
71 int gpc, tpc, rop;
72 int i;
73
74 gr->func->init_gpc_mmu(gr);
75
76 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
77
78 gm107_gr_init_bios(gr);
79
80 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
81
82 memset(data, 0x00, sizeof(data));
83 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
84 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
85 do {
86 gpc = (gpc + 1) % gr->gpc_nr;
87 } while (!tpcnr[gpc]);
88 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
89
90 data[i / 8] |= tpc << ((i % 8) * 4);
91 }
92
93 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
94 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
95 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
96 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
97
98 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
99 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
100 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
101 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
102 gr->tpc_total);
103 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
104 }
105
106 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
107 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
108 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
109
110 gr->func->init_rop_active_fbps(gr);
111
112 nvkm_wr32(device, 0x400500, 0x00010001);
113 nvkm_wr32(device, 0x400100, 0xffffffff);
114 nvkm_wr32(device, 0x40013c, 0xffffffff);
115 nvkm_wr32(device, 0x400124, 0x00000002);
116 nvkm_wr32(device, 0x409c24, 0x000e0000);
117 nvkm_wr32(device, 0x405848, 0xc0000000);
118 nvkm_wr32(device, 0x40584c, 0x00000001);
119 nvkm_wr32(device, 0x404000, 0xc0000000);
120 nvkm_wr32(device, 0x404600, 0xc0000000);
121 nvkm_wr32(device, 0x408030, 0xc0000000);
122 nvkm_wr32(device, 0x404490, 0xc0000000);
123 nvkm_wr32(device, 0x406018, 0xc0000000);
124 nvkm_wr32(device, 0x407020, 0x40000000);
125 nvkm_wr32(device, 0x405840, 0xc0000000);
126 nvkm_wr32(device, 0x405844, 0x00ffffff);
127 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
128
129 gr->func->init_ppc_exceptions(gr);
130
131 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
132 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
133 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
134 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
135 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
136 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
137 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
138 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
139 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
140 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
141 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
142 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
143 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
144 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
145 }
146 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
147 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
148 }
149
150 for (rop = 0; rop < gr->rop_nr; rop++) {
151 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
152 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
153 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
154 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
155 }
156
157 nvkm_wr32(device, 0x400108, 0xffffffff);
158 nvkm_wr32(device, 0x400138, 0xffffffff);
159 nvkm_wr32(device, 0x400118, 0xffffffff);
160 nvkm_wr32(device, 0x400130, 0xffffffff);
161 nvkm_wr32(device, 0x40011c, 0xffffffff);
162 nvkm_wr32(device, 0x400134, 0xffffffff);
163
164 nvkm_wr32(device, 0x400054, 0x2c350f63);
165
166 gf100_gr_zbc_init(gr);
167
168 return gf100_gr_init_ctxctl(gr);
169 }
170
171 int
172 gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
173 int index, struct nvkm_gr **pgr)
174 {
175 struct gf100_gr *gr;
176 int ret;
177
178 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
179 return -ENOMEM;
180 *pgr = &gr->base;
181
182 ret = gf100_gr_ctor(func, device, index, gr);
183 if (ret)
184 return ret;
185
186 /* Load firmwares for non-secure falcons */
187 if (!nvkm_secboot_is_managed(device->secboot,
188 NVKM_SECBOOT_FALCON_FECS)) {
189 if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
190 (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
191 return ret;
192 }
193 if (!nvkm_secboot_is_managed(device->secboot,
194 NVKM_SECBOOT_FALCON_GPCCS)) {
195 if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
196 (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
197 return ret;
198 }
199
200 if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
201 (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
202 (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
203 (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
204 return ret;
205
206 return 0;
207 }
208
209 static const struct gf100_gr_func
210 gm200_gr = {
211 .init = gm200_gr_init,
212 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
213 .init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
214 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
215 .rops = gm200_gr_rops,
216 .tpc_nr = 4,
217 .ppc_nr = 2,
218 .grctx = &gm200_grctx,
219 .sclass = {
220 { -1, -1, FERMI_TWOD_A },
221 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
222 { -1, -1, MAXWELL_B, &gf100_fermi },
223 { -1, -1, MAXWELL_COMPUTE_B },
224 {}
225 }
226 };
227
228 int
229 gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
230 {
231 return gm200_gr_new_(&gm200_gr, device, index, pgr);
232 }