]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 | 29 | |
e5747e3a | 30 | #include <linux/acpi.h> |
79e53945 | 31 | #include <linux/console.h> |
0673ad47 CW |
32 | #include <linux/device.h> |
33 | #include <linux/oom.h> | |
e0cd3608 | 34 | #include <linux/module.h> |
0673ad47 CW |
35 | #include <linux/pci.h> |
36 | #include <linux/pm.h> | |
d6102977 | 37 | #include <linux/pm_runtime.h> |
0673ad47 CW |
38 | #include <linux/pnp.h> |
39 | #include <linux/slab.h> | |
40 | #include <linux/vgaarb.h> | |
704ab614 | 41 | #include <linux/vga_switcheroo.h> |
0673ad47 CW |
42 | #include <linux/vt.h> |
43 | #include <acpi/video.h> | |
44 | ||
45 | #include <drm/drmP.h> | |
760285e7 | 46 | #include <drm/drm_crtc_helper.h> |
0673ad47 CW |
47 | #include <drm/i915_drm.h> |
48 | ||
49 | #include "i915_drv.h" | |
50 | #include "i915_trace.h" | |
51 | #include "i915_vgpu.h" | |
52 | #include "intel_drv.h" | |
79e53945 | 53 | |
112b715e KH |
54 | static struct drm_driver driver; |
55 | ||
a57c774a AK |
56 | #define GEN_DEFAULT_PIPEOFFSETS \ |
57 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ | |
58 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ | |
59 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | |
60 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ | |
a57c774a AK |
61 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } |
62 | ||
84fd4f4e RB |
63 | #define GEN_CHV_PIPEOFFSETS \ |
64 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ | |
65 | CHV_PIPE_C_OFFSET }, \ | |
66 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | |
67 | CHV_TRANSCODER_C_OFFSET, }, \ | |
84fd4f4e RB |
68 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ |
69 | CHV_PALETTE_C_OFFSET } | |
a57c774a | 70 | |
5efb3e28 VS |
71 | #define CURSOR_OFFSETS \ |
72 | .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } | |
73 | ||
74 | #define IVB_CURSOR_OFFSETS \ | |
75 | .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } | |
76 | ||
82cf435b LL |
77 | #define BDW_COLORS \ |
78 | .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } | |
29dc3739 LL |
79 | #define CHV_COLORS \ |
80 | .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } | |
82cf435b | 81 | |
9a7e8492 | 82 | static const struct intel_device_info intel_i830_info = { |
7eb552ae | 83 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
31578148 | 84 | .has_overlay = 1, .overlay_needs_physical = 1, |
73ae478c | 85 | .ring_mask = RENDER_RING, |
a57c774a | 86 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 87 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
88 | }; |
89 | ||
9a7e8492 | 90 | static const struct intel_device_info intel_845g_info = { |
7eb552ae | 91 | .gen = 2, .num_pipes = 1, |
31578148 | 92 | .has_overlay = 1, .overlay_needs_physical = 1, |
73ae478c | 93 | .ring_mask = RENDER_RING, |
a57c774a | 94 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 95 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
96 | }; |
97 | ||
9a7e8492 | 98 | static const struct intel_device_info intel_i85x_info = { |
7eb552ae | 99 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, |
5ce8ba7c | 100 | .cursor_needs_physical = 1, |
31578148 | 101 | .has_overlay = 1, .overlay_needs_physical = 1, |
fd70d52a | 102 | .has_fbc = 1, |
73ae478c | 103 | .ring_mask = RENDER_RING, |
a57c774a | 104 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 105 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
106 | }; |
107 | ||
9a7e8492 | 108 | static const struct intel_device_info intel_i865g_info = { |
7eb552ae | 109 | .gen = 2, .num_pipes = 1, |
31578148 | 110 | .has_overlay = 1, .overlay_needs_physical = 1, |
73ae478c | 111 | .ring_mask = RENDER_RING, |
a57c774a | 112 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 113 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
114 | }; |
115 | ||
9a7e8492 | 116 | static const struct intel_device_info intel_i915g_info = { |
7eb552ae | 117 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
31578148 | 118 | .has_overlay = 1, .overlay_needs_physical = 1, |
73ae478c | 119 | .ring_mask = RENDER_RING, |
a57c774a | 120 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 121 | CURSOR_OFFSETS, |
cfdf1fa2 | 122 | }; |
9a7e8492 | 123 | static const struct intel_device_info intel_i915gm_info = { |
7eb552ae | 124 | .gen = 3, .is_mobile = 1, .num_pipes = 2, |
b295d1b6 | 125 | .cursor_needs_physical = 1, |
31578148 | 126 | .has_overlay = 1, .overlay_needs_physical = 1, |
a6c45cf0 | 127 | .supports_tv = 1, |
fd70d52a | 128 | .has_fbc = 1, |
73ae478c | 129 | .ring_mask = RENDER_RING, |
a57c774a | 130 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 131 | CURSOR_OFFSETS, |
cfdf1fa2 | 132 | }; |
9a7e8492 | 133 | static const struct intel_device_info intel_i945g_info = { |
7eb552ae | 134 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
31578148 | 135 | .has_overlay = 1, .overlay_needs_physical = 1, |
73ae478c | 136 | .ring_mask = RENDER_RING, |
a57c774a | 137 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 138 | CURSOR_OFFSETS, |
cfdf1fa2 | 139 | }; |
9a7e8492 | 140 | static const struct intel_device_info intel_i945gm_info = { |
7eb552ae | 141 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, |
b295d1b6 | 142 | .has_hotplug = 1, .cursor_needs_physical = 1, |
31578148 | 143 | .has_overlay = 1, .overlay_needs_physical = 1, |
a6c45cf0 | 144 | .supports_tv = 1, |
fd70d52a | 145 | .has_fbc = 1, |
73ae478c | 146 | .ring_mask = RENDER_RING, |
a57c774a | 147 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 148 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
149 | }; |
150 | ||
9a7e8492 | 151 | static const struct intel_device_info intel_i965g_info = { |
7eb552ae | 152 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, |
c96c3a8c | 153 | .has_hotplug = 1, |
31578148 | 154 | .has_overlay = 1, |
73ae478c | 155 | .ring_mask = RENDER_RING, |
a57c774a | 156 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 157 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
158 | }; |
159 | ||
9a7e8492 | 160 | static const struct intel_device_info intel_i965gm_info = { |
7eb552ae | 161 | .gen = 4, .is_crestline = 1, .num_pipes = 2, |
e3c4e5dd | 162 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
31578148 | 163 | .has_overlay = 1, |
a6c45cf0 | 164 | .supports_tv = 1, |
73ae478c | 165 | .ring_mask = RENDER_RING, |
a57c774a | 166 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 167 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
168 | }; |
169 | ||
9a7e8492 | 170 | static const struct intel_device_info intel_g33_info = { |
7eb552ae | 171 | .gen = 3, .is_g33 = 1, .num_pipes = 2, |
c96c3a8c | 172 | .need_gfx_hws = 1, .has_hotplug = 1, |
31578148 | 173 | .has_overlay = 1, |
73ae478c | 174 | .ring_mask = RENDER_RING, |
a57c774a | 175 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 176 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
177 | }; |
178 | ||
9a7e8492 | 179 | static const struct intel_device_info intel_g45_info = { |
7eb552ae | 180 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, |
c96c3a8c | 181 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
73ae478c | 182 | .ring_mask = RENDER_RING | BSD_RING, |
a57c774a | 183 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 184 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
185 | }; |
186 | ||
9a7e8492 | 187 | static const struct intel_device_info intel_gm45_info = { |
7eb552ae | 188 | .gen = 4, .is_g4x = 1, .num_pipes = 2, |
e3c4e5dd | 189 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
c96c3a8c | 190 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
a6c45cf0 | 191 | .supports_tv = 1, |
73ae478c | 192 | .ring_mask = RENDER_RING | BSD_RING, |
a57c774a | 193 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 194 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
195 | }; |
196 | ||
9a7e8492 | 197 | static const struct intel_device_info intel_pineview_info = { |
7eb552ae | 198 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, |
c96c3a8c | 199 | .need_gfx_hws = 1, .has_hotplug = 1, |
31578148 | 200 | .has_overlay = 1, |
a57c774a | 201 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 202 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
203 | }; |
204 | ||
9a7e8492 | 205 | static const struct intel_device_info intel_ironlake_d_info = { |
7eb552ae | 206 | .gen = 5, .num_pipes = 2, |
5a117db7 | 207 | .need_gfx_hws = 1, .has_hotplug = 1, |
73ae478c | 208 | .ring_mask = RENDER_RING | BSD_RING, |
a57c774a | 209 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 210 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
211 | }; |
212 | ||
9a7e8492 | 213 | static const struct intel_device_info intel_ironlake_m_info = { |
7eb552ae | 214 | .gen = 5, .is_mobile = 1, .num_pipes = 2, |
e3c4e5dd | 215 | .need_gfx_hws = 1, .has_hotplug = 1, |
c1a9f047 | 216 | .has_fbc = 1, |
73ae478c | 217 | .ring_mask = RENDER_RING | BSD_RING, |
a57c774a | 218 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 219 | CURSOR_OFFSETS, |
cfdf1fa2 KH |
220 | }; |
221 | ||
9a7e8492 | 222 | static const struct intel_device_info intel_sandybridge_d_info = { |
7eb552ae | 223 | .gen = 6, .num_pipes = 2, |
c96c3a8c | 224 | .need_gfx_hws = 1, .has_hotplug = 1, |
cbaef0f1 | 225 | .has_fbc = 1, |
73ae478c | 226 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
3d29b842 | 227 | .has_llc = 1, |
a57c774a | 228 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 229 | CURSOR_OFFSETS, |
f6e450a6 EA |
230 | }; |
231 | ||
9a7e8492 | 232 | static const struct intel_device_info intel_sandybridge_m_info = { |
7eb552ae | 233 | .gen = 6, .is_mobile = 1, .num_pipes = 2, |
c96c3a8c | 234 | .need_gfx_hws = 1, .has_hotplug = 1, |
9c04f015 | 235 | .has_fbc = 1, |
73ae478c | 236 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
3d29b842 | 237 | .has_llc = 1, |
a57c774a | 238 | GEN_DEFAULT_PIPEOFFSETS, |
5efb3e28 | 239 | CURSOR_OFFSETS, |
a13e4093 EA |
240 | }; |
241 | ||
219f4fdb BW |
242 | #define GEN7_FEATURES \ |
243 | .gen = 7, .num_pipes = 3, \ | |
244 | .need_gfx_hws = 1, .has_hotplug = 1, \ | |
cbaef0f1 | 245 | .has_fbc = 1, \ |
73ae478c | 246 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
6a8beeff WB |
247 | .has_llc = 1, \ |
248 | GEN_DEFAULT_PIPEOFFSETS, \ | |
249 | IVB_CURSOR_OFFSETS | |
219f4fdb | 250 | |
c76b615c | 251 | static const struct intel_device_info intel_ivybridge_d_info = { |
219f4fdb BW |
252 | GEN7_FEATURES, |
253 | .is_ivybridge = 1, | |
c76b615c JB |
254 | }; |
255 | ||
256 | static const struct intel_device_info intel_ivybridge_m_info = { | |
219f4fdb BW |
257 | GEN7_FEATURES, |
258 | .is_ivybridge = 1, | |
259 | .is_mobile = 1, | |
c76b615c JB |
260 | }; |
261 | ||
999bcdea BW |
262 | static const struct intel_device_info intel_ivybridge_q_info = { |
263 | GEN7_FEATURES, | |
264 | .is_ivybridge = 1, | |
265 | .num_pipes = 0, /* legal, last one wins */ | |
266 | }; | |
267 | ||
6a8beeff WB |
268 | #define VLV_FEATURES \ |
269 | .gen = 7, .num_pipes = 2, \ | |
270 | .need_gfx_hws = 1, .has_hotplug = 1, \ | |
271 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | |
272 | .display_mmio_offset = VLV_DISPLAY_BASE, \ | |
273 | GEN_DEFAULT_PIPEOFFSETS, \ | |
274 | CURSOR_OFFSETS | |
275 | ||
70a3eb7a | 276 | static const struct intel_device_info intel_valleyview_m_info = { |
6a8beeff | 277 | VLV_FEATURES, |
70a3eb7a | 278 | .is_valleyview = 1, |
6a8beeff | 279 | .is_mobile = 1, |
70a3eb7a JB |
280 | }; |
281 | ||
282 | static const struct intel_device_info intel_valleyview_d_info = { | |
6a8beeff | 283 | VLV_FEATURES, |
70a3eb7a JB |
284 | .is_valleyview = 1, |
285 | }; | |
286 | ||
6a8beeff WB |
287 | #define HSW_FEATURES \ |
288 | GEN7_FEATURES, \ | |
289 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ | |
290 | .has_ddi = 1, \ | |
291 | .has_fpga_dbg = 1 | |
292 | ||
4cae9ae0 | 293 | static const struct intel_device_info intel_haswell_d_info = { |
6a8beeff | 294 | HSW_FEATURES, |
219f4fdb | 295 | .is_haswell = 1, |
4cae9ae0 ED |
296 | }; |
297 | ||
298 | static const struct intel_device_info intel_haswell_m_info = { | |
6a8beeff | 299 | HSW_FEATURES, |
219f4fdb BW |
300 | .is_haswell = 1, |
301 | .is_mobile = 1, | |
c76b615c JB |
302 | }; |
303 | ||
82cf435b LL |
304 | #define BDW_FEATURES \ |
305 | HSW_FEATURES, \ | |
306 | BDW_COLORS | |
307 | ||
4d4dead6 | 308 | static const struct intel_device_info intel_broadwell_d_info = { |
82cf435b | 309 | BDW_FEATURES, |
6a8beeff | 310 | .gen = 8, |
ab0d24ac | 311 | .is_broadwell = 1, |
4d4dead6 BW |
312 | }; |
313 | ||
314 | static const struct intel_device_info intel_broadwell_m_info = { | |
82cf435b | 315 | BDW_FEATURES, |
6a8beeff | 316 | .gen = 8, .is_mobile = 1, |
ab0d24ac | 317 | .is_broadwell = 1, |
4d4dead6 BW |
318 | }; |
319 | ||
fd3c269f | 320 | static const struct intel_device_info intel_broadwell_gt3d_info = { |
82cf435b | 321 | BDW_FEATURES, |
6a8beeff | 322 | .gen = 8, |
ab0d24ac | 323 | .is_broadwell = 1, |
845f74a7 | 324 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
fd3c269f ZY |
325 | }; |
326 | ||
327 | static const struct intel_device_info intel_broadwell_gt3m_info = { | |
82cf435b | 328 | BDW_FEATURES, |
6a8beeff | 329 | .gen = 8, .is_mobile = 1, |
ab0d24ac | 330 | .is_broadwell = 1, |
845f74a7 | 331 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
fd3c269f ZY |
332 | }; |
333 | ||
0673ad47 CW |
334 | static const struct intel_device_info intel_cherryview_info = { |
335 | .gen = 8, .num_pipes = 3, | |
336 | .need_gfx_hws = 1, .has_hotplug = 1, | |
337 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
338 | .is_cherryview = 1, | |
339 | .display_mmio_offset = VLV_DISPLAY_BASE, | |
340 | GEN_CHV_PIPEOFFSETS, | |
341 | CURSOR_OFFSETS, | |
342 | CHV_COLORS, | |
343 | }; | |
344 | ||
345 | static const struct intel_device_info intel_skylake_info = { | |
346 | BDW_FEATURES, | |
347 | .is_skylake = 1, | |
348 | .gen = 9, | |
349 | }; | |
350 | ||
351 | static const struct intel_device_info intel_skylake_gt3_info = { | |
352 | BDW_FEATURES, | |
353 | .is_skylake = 1, | |
354 | .gen = 9, | |
355 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | |
356 | }; | |
357 | ||
358 | static const struct intel_device_info intel_broxton_info = { | |
359 | .is_preliminary = 1, | |
360 | .is_broxton = 1, | |
361 | .gen = 9, | |
362 | .need_gfx_hws = 1, .has_hotplug = 1, | |
363 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
364 | .num_pipes = 3, | |
365 | .has_ddi = 1, | |
366 | .has_fpga_dbg = 1, | |
367 | .has_fbc = 1, | |
368 | .has_pooled_eu = 0, | |
369 | GEN_DEFAULT_PIPEOFFSETS, | |
370 | IVB_CURSOR_OFFSETS, | |
371 | BDW_COLORS, | |
372 | }; | |
373 | ||
374 | static const struct intel_device_info intel_kabylake_info = { | |
375 | BDW_FEATURES, | |
376 | .is_kabylake = 1, | |
377 | .gen = 9, | |
378 | }; | |
379 | ||
380 | static const struct intel_device_info intel_kabylake_gt3_info = { | |
381 | BDW_FEATURES, | |
382 | .is_kabylake = 1, | |
383 | .gen = 9, | |
384 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | |
385 | }; | |
386 | ||
387 | /* | |
388 | * Make sure any device matches here are from most specific to most | |
389 | * general. For example, since the Quanta match is based on the subsystem | |
390 | * and subvendor IDs, we need it to come before the more general IVB | |
391 | * PCI ID matches, otherwise we'll use the wrong info struct above. | |
392 | */ | |
393 | static const struct pci_device_id pciidlist[] = { | |
394 | INTEL_I830_IDS(&intel_i830_info), | |
395 | INTEL_I845G_IDS(&intel_845g_info), | |
396 | INTEL_I85X_IDS(&intel_i85x_info), | |
397 | INTEL_I865G_IDS(&intel_i865g_info), | |
398 | INTEL_I915G_IDS(&intel_i915g_info), | |
399 | INTEL_I915GM_IDS(&intel_i915gm_info), | |
400 | INTEL_I945G_IDS(&intel_i945g_info), | |
401 | INTEL_I945GM_IDS(&intel_i945gm_info), | |
402 | INTEL_I965G_IDS(&intel_i965g_info), | |
403 | INTEL_G33_IDS(&intel_g33_info), | |
404 | INTEL_I965GM_IDS(&intel_i965gm_info), | |
405 | INTEL_GM45_IDS(&intel_gm45_info), | |
406 | INTEL_G45_IDS(&intel_g45_info), | |
407 | INTEL_PINEVIEW_IDS(&intel_pineview_info), | |
408 | INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), | |
409 | INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), | |
410 | INTEL_SNB_D_IDS(&intel_sandybridge_d_info), | |
411 | INTEL_SNB_M_IDS(&intel_sandybridge_m_info), | |
412 | INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ | |
413 | INTEL_IVB_M_IDS(&intel_ivybridge_m_info), | |
414 | INTEL_IVB_D_IDS(&intel_ivybridge_d_info), | |
415 | INTEL_HSW_D_IDS(&intel_haswell_d_info), | |
416 | INTEL_HSW_M_IDS(&intel_haswell_m_info), | |
417 | INTEL_VLV_M_IDS(&intel_valleyview_m_info), | |
418 | INTEL_VLV_D_IDS(&intel_valleyview_d_info), | |
419 | INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), | |
420 | INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), | |
421 | INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), | |
422 | INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), | |
423 | INTEL_CHV_IDS(&intel_cherryview_info), | |
424 | INTEL_SKL_GT1_IDS(&intel_skylake_info), | |
425 | INTEL_SKL_GT2_IDS(&intel_skylake_info), | |
426 | INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), | |
427 | INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), | |
428 | INTEL_BXT_IDS(&intel_broxton_info), | |
429 | INTEL_KBL_GT1_IDS(&intel_kabylake_info), | |
430 | INTEL_KBL_GT2_IDS(&intel_kabylake_info), | |
431 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), | |
432 | INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), | |
433 | {0, 0, 0} | |
434 | }; | |
435 | MODULE_DEVICE_TABLE(pci, pciidlist); | |
436 | ||
437 | static unsigned int i915_load_fail_count; | |
438 | ||
439 | bool __i915_inject_load_failure(const char *func, int line) | |
440 | { | |
441 | if (i915_load_fail_count >= i915.inject_load_failure) | |
442 | return false; | |
443 | ||
444 | if (++i915_load_fail_count == i915.inject_load_failure) { | |
445 | DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", | |
446 | i915.inject_load_failure, func, line); | |
447 | return true; | |
448 | } | |
449 | ||
450 | return false; | |
451 | } | |
452 | ||
453 | #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" | |
454 | #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ | |
455 | "providing the dmesg log by booting with drm.debug=0xf" | |
456 | ||
457 | void | |
458 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |
459 | const char *fmt, ...) | |
460 | { | |
461 | static bool shown_bug_once; | |
462 | struct device *dev = dev_priv->dev->dev; | |
463 | bool is_error = level[1] <= KERN_ERR[1]; | |
464 | bool is_debug = level[1] == KERN_DEBUG[1]; | |
465 | struct va_format vaf; | |
466 | va_list args; | |
467 | ||
468 | if (is_debug && !(drm_debug & DRM_UT_DRIVER)) | |
469 | return; | |
470 | ||
471 | va_start(args, fmt); | |
472 | ||
473 | vaf.fmt = fmt; | |
474 | vaf.va = &args; | |
475 | ||
476 | dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", | |
477 | __builtin_return_address(0), &vaf); | |
478 | ||
479 | if (is_error && !shown_bug_once) { | |
480 | dev_notice(dev, "%s", FDO_BUG_MSG); | |
481 | shown_bug_once = true; | |
482 | } | |
483 | ||
484 | va_end(args); | |
485 | } | |
486 | ||
487 | static bool i915_error_injected(struct drm_i915_private *dev_priv) | |
488 | { | |
489 | return i915.inject_load_failure && | |
490 | i915_load_fail_count == i915.inject_load_failure; | |
491 | } | |
492 | ||
493 | #define i915_load_error(dev_priv, fmt, ...) \ | |
494 | __i915_printk(dev_priv, \ | |
495 | i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ | |
496 | fmt, ##__VA_ARGS__) | |
497 | ||
498 | ||
499 | static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) | |
500 | { | |
501 | enum intel_pch ret = PCH_NOP; | |
502 | ||
503 | /* | |
504 | * In a virtualized passthrough environment we can be in a | |
505 | * setup where the ISA bridge is not able to be passed through. | |
506 | * In this case, a south bridge can be emulated and we have to | |
507 | * make an educated guess as to which PCH is really there. | |
508 | */ | |
509 | ||
510 | if (IS_GEN5(dev)) { | |
511 | ret = PCH_IBX; | |
512 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); | |
513 | } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { | |
514 | ret = PCH_CPT; | |
515 | DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); | |
516 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | |
517 | ret = PCH_LPT; | |
518 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); | |
519 | } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { | |
520 | ret = PCH_SPT; | |
521 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); | |
522 | } | |
523 | ||
524 | return ret; | |
525 | } | |
526 | ||
527 | static void intel_detect_pch(struct drm_device *dev) | |
528 | { | |
529 | struct drm_i915_private *dev_priv = dev->dev_private; | |
530 | struct pci_dev *pch = NULL; | |
531 | ||
532 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | |
533 | * (which really amounts to a PCH but no South Display). | |
534 | */ | |
535 | if (INTEL_INFO(dev)->num_pipes == 0) { | |
536 | dev_priv->pch_type = PCH_NOP; | |
537 | return; | |
538 | } | |
539 | ||
540 | /* | |
541 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | |
542 | * make graphics device passthrough work easy for VMM, that only | |
543 | * need to expose ISA bridge to let driver know the real hardware | |
544 | * underneath. This is a requirement from virtualization team. | |
545 | * | |
546 | * In some virtualized environments (e.g. XEN), there is irrelevant | |
547 | * ISA bridge in the system. To work reliably, we should scan trhough | |
548 | * all the ISA bridge devices and check for the first match, instead | |
549 | * of only checking the first one. | |
550 | */ | |
551 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { | |
552 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | |
553 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | |
554 | dev_priv->pch_id = id; | |
555 | ||
556 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | |
557 | dev_priv->pch_type = PCH_IBX; | |
558 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | |
559 | WARN_ON(!IS_GEN5(dev)); | |
560 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | |
561 | dev_priv->pch_type = PCH_CPT; | |
562 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | |
563 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | |
564 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | |
565 | /* PantherPoint is CPT compatible */ | |
566 | dev_priv->pch_type = PCH_CPT; | |
567 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); | |
568 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | |
569 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | |
570 | dev_priv->pch_type = PCH_LPT; | |
571 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | |
572 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); | |
573 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); | |
574 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | |
575 | dev_priv->pch_type = PCH_LPT; | |
576 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | |
577 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); | |
578 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); | |
579 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | |
580 | dev_priv->pch_type = PCH_SPT; | |
581 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | |
582 | WARN_ON(!IS_SKYLAKE(dev) && | |
583 | !IS_KABYLAKE(dev)); | |
584 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { | |
585 | dev_priv->pch_type = PCH_SPT; | |
586 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); | |
587 | WARN_ON(!IS_SKYLAKE(dev) && | |
588 | !IS_KABYLAKE(dev)); | |
589 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || | |
590 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || | |
591 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && | |
592 | pch->subsystem_vendor == | |
593 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET && | |
594 | pch->subsystem_device == | |
595 | PCI_SUBDEVICE_ID_QEMU)) { | |
596 | dev_priv->pch_type = intel_virt_detect_pch(dev); | |
597 | } else | |
598 | continue; | |
599 | ||
600 | break; | |
601 | } | |
602 | } | |
603 | if (!pch) | |
604 | DRM_DEBUG_KMS("No PCH found.\n"); | |
605 | ||
606 | pci_dev_put(pch); | |
607 | } | |
608 | ||
609 | bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) | |
610 | { | |
611 | if (INTEL_GEN(dev_priv) < 6) | |
612 | return false; | |
613 | ||
614 | if (i915.semaphores >= 0) | |
615 | return i915.semaphores; | |
616 | ||
617 | /* TODO: make semaphores and Execlists play nicely together */ | |
618 | if (i915.enable_execlists) | |
619 | return false; | |
620 | ||
621 | #ifdef CONFIG_INTEL_IOMMU | |
622 | /* Enable semaphores on SNB when IO remapping is off */ | |
623 | if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) | |
624 | return false; | |
625 | #endif | |
626 | ||
627 | return true; | |
628 | } | |
629 | ||
630 | static int i915_getparam(struct drm_device *dev, void *data, | |
631 | struct drm_file *file_priv) | |
632 | { | |
633 | struct drm_i915_private *dev_priv = dev->dev_private; | |
634 | drm_i915_getparam_t *param = data; | |
635 | int value; | |
636 | ||
637 | switch (param->param) { | |
638 | case I915_PARAM_IRQ_ACTIVE: | |
639 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
640 | case I915_PARAM_LAST_DISPATCH: | |
641 | /* Reject all old ums/dri params. */ | |
642 | return -ENODEV; | |
643 | case I915_PARAM_CHIPSET_ID: | |
644 | value = dev->pdev->device; | |
645 | break; | |
646 | case I915_PARAM_REVISION: | |
647 | value = dev->pdev->revision; | |
648 | break; | |
649 | case I915_PARAM_HAS_GEM: | |
650 | value = 1; | |
651 | break; | |
652 | case I915_PARAM_NUM_FENCES_AVAIL: | |
653 | value = dev_priv->num_fence_regs; | |
654 | break; | |
655 | case I915_PARAM_HAS_OVERLAY: | |
656 | value = dev_priv->overlay ? 1 : 0; | |
657 | break; | |
658 | case I915_PARAM_HAS_PAGEFLIPPING: | |
659 | value = 1; | |
660 | break; | |
661 | case I915_PARAM_HAS_EXECBUF2: | |
662 | /* depends on GEM */ | |
663 | value = 1; | |
664 | break; | |
665 | case I915_PARAM_HAS_BSD: | |
666 | value = intel_engine_initialized(&dev_priv->engine[VCS]); | |
667 | break; | |
668 | case I915_PARAM_HAS_BLT: | |
669 | value = intel_engine_initialized(&dev_priv->engine[BCS]); | |
670 | break; | |
671 | case I915_PARAM_HAS_VEBOX: | |
672 | value = intel_engine_initialized(&dev_priv->engine[VECS]); | |
673 | break; | |
674 | case I915_PARAM_HAS_BSD2: | |
675 | value = intel_engine_initialized(&dev_priv->engine[VCS2]); | |
676 | break; | |
677 | case I915_PARAM_HAS_RELAXED_FENCING: | |
678 | value = 1; | |
679 | break; | |
680 | case I915_PARAM_HAS_COHERENT_RINGS: | |
681 | value = 1; | |
682 | break; | |
683 | case I915_PARAM_HAS_EXEC_CONSTANTS: | |
684 | value = INTEL_INFO(dev)->gen >= 4; | |
685 | break; | |
686 | case I915_PARAM_HAS_RELAXED_DELTA: | |
687 | value = 1; | |
688 | break; | |
689 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
690 | value = 1; | |
691 | break; | |
692 | case I915_PARAM_HAS_LLC: | |
693 | value = HAS_LLC(dev); | |
694 | break; | |
695 | case I915_PARAM_HAS_WT: | |
696 | value = HAS_WT(dev); | |
697 | break; | |
698 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
699 | value = USES_PPGTT(dev); | |
700 | break; | |
701 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
702 | value = 1; | |
703 | break; | |
704 | case I915_PARAM_HAS_SEMAPHORES: | |
705 | value = i915_semaphore_is_enabled(dev_priv); | |
706 | break; | |
707 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
708 | value = 1; | |
709 | break; | |
710 | case I915_PARAM_HAS_SECURE_BATCHES: | |
711 | value = capable(CAP_SYS_ADMIN); | |
712 | break; | |
713 | case I915_PARAM_HAS_PINNED_BATCHES: | |
714 | value = 1; | |
715 | break; | |
716 | case I915_PARAM_HAS_EXEC_NO_RELOC: | |
717 | value = 1; | |
718 | break; | |
719 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | |
720 | value = 1; | |
721 | break; | |
722 | case I915_PARAM_CMD_PARSER_VERSION: | |
723 | value = i915_cmd_parser_get_version(dev_priv); | |
724 | break; | |
725 | case I915_PARAM_HAS_COHERENT_PHYS_GTT: | |
726 | value = 1; | |
727 | break; | |
728 | case I915_PARAM_MMAP_VERSION: | |
729 | value = 1; | |
730 | break; | |
731 | case I915_PARAM_SUBSLICE_TOTAL: | |
732 | value = INTEL_INFO(dev)->subslice_total; | |
733 | if (!value) | |
734 | return -ENODEV; | |
735 | break; | |
736 | case I915_PARAM_EU_TOTAL: | |
737 | value = INTEL_INFO(dev)->eu_total; | |
738 | if (!value) | |
739 | return -ENODEV; | |
740 | break; | |
741 | case I915_PARAM_HAS_GPU_RESET: | |
742 | value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); | |
743 | break; | |
744 | case I915_PARAM_HAS_RESOURCE_STREAMER: | |
745 | value = HAS_RESOURCE_STREAMER(dev); | |
746 | break; | |
747 | case I915_PARAM_HAS_EXEC_SOFTPIN: | |
748 | value = 1; | |
749 | break; | |
750 | default: | |
751 | DRM_DEBUG("Unknown parameter %d\n", param->param); | |
752 | return -EINVAL; | |
753 | } | |
754 | ||
dda33009 | 755 | if (put_user(value, param->value)) |
0673ad47 | 756 | return -EFAULT; |
0673ad47 CW |
757 | |
758 | return 0; | |
759 | } | |
760 | ||
761 | static int i915_get_bridge_dev(struct drm_device *dev) | |
762 | { | |
763 | struct drm_i915_private *dev_priv = dev->dev_private; | |
764 | ||
765 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | |
766 | if (!dev_priv->bridge_dev) { | |
767 | DRM_ERROR("bridge device not found\n"); | |
768 | return -1; | |
769 | } | |
770 | return 0; | |
771 | } | |
772 | ||
773 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
774 | static int | |
775 | intel_alloc_mchbar_resource(struct drm_device *dev) | |
776 | { | |
777 | struct drm_i915_private *dev_priv = dev->dev_private; | |
778 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
779 | u32 temp_lo, temp_hi = 0; | |
780 | u64 mchbar_addr; | |
781 | int ret; | |
782 | ||
783 | if (INTEL_INFO(dev)->gen >= 4) | |
784 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | |
785 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
786 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
787 | ||
788 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
789 | #ifdef CONFIG_PNP | |
790 | if (mchbar_addr && | |
791 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
792 | return 0; | |
793 | #endif | |
794 | ||
795 | /* Get some space for it */ | |
796 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
797 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
798 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
799 | &dev_priv->mch_res, | |
800 | MCHBAR_SIZE, MCHBAR_SIZE, | |
801 | PCIBIOS_MIN_MEM, | |
802 | 0, pcibios_align_resource, | |
803 | dev_priv->bridge_dev); | |
804 | if (ret) { | |
805 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
806 | dev_priv->mch_res.start = 0; | |
807 | return ret; | |
808 | } | |
809 | ||
810 | if (INTEL_INFO(dev)->gen >= 4) | |
811 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | |
812 | upper_32_bits(dev_priv->mch_res.start)); | |
813 | ||
814 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
815 | lower_32_bits(dev_priv->mch_res.start)); | |
816 | return 0; | |
817 | } | |
818 | ||
819 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
820 | static void | |
821 | intel_setup_mchbar(struct drm_device *dev) | |
822 | { | |
823 | struct drm_i915_private *dev_priv = dev->dev_private; | |
824 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
825 | u32 temp; | |
826 | bool enabled; | |
827 | ||
828 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | |
829 | return; | |
830 | ||
831 | dev_priv->mchbar_need_disable = false; | |
832 | ||
833 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
834 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); | |
835 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
836 | } else { | |
837 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
838 | enabled = temp & 1; | |
839 | } | |
840 | ||
841 | /* If it's already enabled, don't have to do anything */ | |
842 | if (enabled) | |
843 | return; | |
844 | ||
845 | if (intel_alloc_mchbar_resource(dev)) | |
846 | return; | |
847 | ||
848 | dev_priv->mchbar_need_disable = true; | |
849 | ||
850 | /* Space is allocated or reserved, so enable it. */ | |
851 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
852 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
853 | temp | DEVEN_MCHBAR_EN); | |
854 | } else { | |
855 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
856 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
857 | } | |
858 | } | |
859 | ||
860 | static void | |
861 | intel_teardown_mchbar(struct drm_device *dev) | |
862 | { | |
863 | struct drm_i915_private *dev_priv = dev->dev_private; | |
864 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
865 | ||
866 | if (dev_priv->mchbar_need_disable) { | |
867 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
868 | u32 deven_val; | |
869 | ||
870 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, | |
871 | &deven_val); | |
872 | deven_val &= ~DEVEN_MCHBAR_EN; | |
873 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
874 | deven_val); | |
875 | } else { | |
876 | u32 mchbar_val; | |
877 | ||
878 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
879 | &mchbar_val); | |
880 | mchbar_val &= ~1; | |
881 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
882 | mchbar_val); | |
883 | } | |
884 | } | |
885 | ||
886 | if (dev_priv->mch_res.start) | |
887 | release_resource(&dev_priv->mch_res); | |
888 | } | |
889 | ||
890 | /* true = enable decode, false = disable decoder */ | |
891 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
892 | { | |
893 | struct drm_device *dev = cookie; | |
894 | ||
895 | intel_modeset_vga_set_state(dev, state); | |
896 | if (state) | |
897 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
898 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
899 | else | |
900 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
901 | } | |
902 | ||
903 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | |
904 | { | |
905 | struct drm_device *dev = pci_get_drvdata(pdev); | |
906 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
907 | ||
908 | if (state == VGA_SWITCHEROO_ON) { | |
909 | pr_info("switched on\n"); | |
910 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
911 | /* i915 resume handler doesn't set to D0 */ | |
912 | pci_set_power_state(dev->pdev, PCI_D0); | |
913 | i915_resume_switcheroo(dev); | |
914 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
915 | } else { | |
916 | pr_info("switched off\n"); | |
917 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
918 | i915_suspend_switcheroo(dev, pmm); | |
919 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
920 | } | |
921 | } | |
922 | ||
923 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
924 | { | |
925 | struct drm_device *dev = pci_get_drvdata(pdev); | |
926 | ||
927 | /* | |
928 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | |
929 | * locking inversion with the driver load path. And the access here is | |
930 | * completely racy anyway. So don't bother with locking for now. | |
931 | */ | |
932 | return dev->open_count == 0; | |
933 | } | |
934 | ||
935 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
936 | .set_gpu_state = i915_switcheroo_set_state, | |
937 | .reprobe = NULL, | |
938 | .can_switch = i915_switcheroo_can_switch, | |
939 | }; | |
940 | ||
941 | static void i915_gem_fini(struct drm_device *dev) | |
942 | { | |
943 | struct drm_i915_private *dev_priv = to_i915(dev); | |
944 | ||
945 | /* | |
946 | * Neither the BIOS, ourselves or any other kernel | |
947 | * expects the system to be in execlists mode on startup, | |
948 | * so we need to reset the GPU back to legacy mode. And the only | |
949 | * known way to disable logical contexts is through a GPU reset. | |
950 | * | |
951 | * So in order to leave the system in a known default configuration, | |
952 | * always reset the GPU upon unload. Afterwards we then clean up the | |
953 | * GEM state tracking, flushing off the requests and leaving the | |
954 | * system in a known idle state. | |
955 | * | |
956 | * Note that is of the upmost importance that the GPU is idle and | |
957 | * all stray writes are flushed *before* we dismantle the backing | |
958 | * storage for the pinned objects. | |
959 | * | |
960 | * However, since we are uncertain that reseting the GPU on older | |
961 | * machines is a good idea, we don't - just in case it leaves the | |
962 | * machine in an unusable condition. | |
963 | */ | |
964 | if (HAS_HW_CONTEXTS(dev)) { | |
965 | int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); | |
966 | WARN_ON(reset && reset != -ENODEV); | |
967 | } | |
968 | ||
969 | mutex_lock(&dev->struct_mutex); | |
970 | i915_gem_reset(dev); | |
971 | i915_gem_cleanup_engines(dev); | |
972 | i915_gem_context_fini(dev); | |
973 | mutex_unlock(&dev->struct_mutex); | |
974 | ||
975 | WARN_ON(!list_empty(&to_i915(dev)->context_list)); | |
976 | } | |
977 | ||
978 | static int i915_load_modeset_init(struct drm_device *dev) | |
979 | { | |
980 | struct drm_i915_private *dev_priv = dev->dev_private; | |
981 | int ret; | |
982 | ||
983 | if (i915_inject_load_failure()) | |
984 | return -ENODEV; | |
985 | ||
986 | ret = intel_bios_init(dev_priv); | |
987 | if (ret) | |
988 | DRM_INFO("failed to find VBIOS tables\n"); | |
989 | ||
990 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
991 | * to the common VGA resources. | |
992 | * | |
993 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
994 | * then we do not take part in VGA arbitration and the | |
995 | * vga_client_register() fails with -ENODEV. | |
996 | */ | |
997 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | |
998 | if (ret && ret != -ENODEV) | |
999 | goto out; | |
1000 | ||
1001 | intel_register_dsm_handler(); | |
1002 | ||
1003 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); | |
1004 | if (ret) | |
1005 | goto cleanup_vga_client; | |
1006 | ||
1007 | /* must happen before intel_power_domains_init_hw() on VLV/CHV */ | |
1008 | intel_update_rawclk(dev_priv); | |
1009 | ||
1010 | intel_power_domains_init_hw(dev_priv, false); | |
1011 | ||
1012 | intel_csr_ucode_init(dev_priv); | |
1013 | ||
1014 | ret = intel_irq_install(dev_priv); | |
1015 | if (ret) | |
1016 | goto cleanup_csr; | |
1017 | ||
1018 | intel_setup_gmbus(dev); | |
1019 | ||
1020 | /* Important: The output setup functions called by modeset_init need | |
1021 | * working irqs for e.g. gmbus and dp aux transfers. */ | |
1022 | intel_modeset_init(dev); | |
1023 | ||
1024 | intel_guc_init(dev); | |
1025 | ||
1026 | ret = i915_gem_init(dev); | |
1027 | if (ret) | |
1028 | goto cleanup_irq; | |
1029 | ||
1030 | intel_modeset_gem_init(dev); | |
1031 | ||
1032 | if (INTEL_INFO(dev)->num_pipes == 0) | |
1033 | return 0; | |
1034 | ||
1035 | ret = intel_fbdev_init(dev); | |
1036 | if (ret) | |
1037 | goto cleanup_gem; | |
1038 | ||
1039 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
1040 | intel_hpd_init(dev_priv); | |
1041 | ||
1042 | drm_kms_helper_poll_init(dev); | |
1043 | ||
1044 | return 0; | |
1045 | ||
1046 | cleanup_gem: | |
1047 | i915_gem_fini(dev); | |
1048 | cleanup_irq: | |
1049 | intel_guc_fini(dev); | |
1050 | drm_irq_uninstall(dev); | |
1051 | intel_teardown_gmbus(dev); | |
1052 | cleanup_csr: | |
1053 | intel_csr_ucode_fini(dev_priv); | |
1054 | intel_power_domains_fini(dev_priv); | |
1055 | vga_switcheroo_unregister_client(dev->pdev); | |
1056 | cleanup_vga_client: | |
1057 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
1058 | out: | |
1059 | return ret; | |
1060 | } | |
1061 | ||
1062 | #if IS_ENABLED(CONFIG_FB) | |
1063 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
1064 | { | |
1065 | struct apertures_struct *ap; | |
1066 | struct pci_dev *pdev = dev_priv->dev->pdev; | |
1067 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | |
1068 | bool primary; | |
1069 | int ret; | |
1070 | ||
1071 | ap = alloc_apertures(1); | |
1072 | if (!ap) | |
1073 | return -ENOMEM; | |
1074 | ||
1075 | ap->ranges[0].base = ggtt->mappable_base; | |
1076 | ap->ranges[0].size = ggtt->mappable_end; | |
1077 | ||
1078 | primary = | |
1079 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
1080 | ||
1081 | ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); | |
1082 | ||
1083 | kfree(ap); | |
1084 | ||
1085 | return ret; | |
1086 | } | |
1087 | #else | |
1088 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
1089 | { | |
1090 | return 0; | |
1091 | } | |
1092 | #endif | |
1093 | ||
1094 | #if !defined(CONFIG_VGA_CONSOLE) | |
1095 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
1096 | { | |
1097 | return 0; | |
1098 | } | |
1099 | #elif !defined(CONFIG_DUMMY_CONSOLE) | |
1100 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
1101 | { | |
1102 | return -ENODEV; | |
1103 | } | |
1104 | #else | |
1105 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
1106 | { | |
1107 | int ret = 0; | |
1108 | ||
1109 | DRM_INFO("Replacing VGA console driver\n"); | |
1110 | ||
1111 | console_lock(); | |
1112 | if (con_is_bound(&vga_con)) | |
1113 | ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); | |
1114 | if (ret == 0) { | |
1115 | ret = do_unregister_con_driver(&vga_con); | |
1116 | ||
1117 | /* Ignore "already unregistered". */ | |
1118 | if (ret == -ENODEV) | |
1119 | ret = 0; | |
1120 | } | |
1121 | console_unlock(); | |
1122 | ||
1123 | return ret; | |
1124 | } | |
1125 | #endif | |
1126 | ||
1127 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | |
1128 | { | |
1129 | const struct intel_device_info *info = &dev_priv->info; | |
1130 | ||
1131 | #define PRINT_S(name) "%s" | |
1132 | #define SEP_EMPTY | |
1133 | #define PRINT_FLAG(name) info->name ? #name "," : "" | |
1134 | #define SEP_COMMA , | |
1135 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" | |
1136 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), | |
1137 | info->gen, | |
1138 | dev_priv->dev->pdev->device, | |
1139 | dev_priv->dev->pdev->revision, | |
1140 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); | |
1141 | #undef PRINT_S | |
1142 | #undef SEP_EMPTY | |
1143 | #undef PRINT_FLAG | |
1144 | #undef SEP_COMMA | |
1145 | } | |
1146 | ||
1147 | static void cherryview_sseu_info_init(struct drm_device *dev) | |
1148 | { | |
1149 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1150 | struct intel_device_info *info; | |
1151 | u32 fuse, eu_dis; | |
1152 | ||
1153 | info = (struct intel_device_info *)&dev_priv->info; | |
1154 | fuse = I915_READ(CHV_FUSE_GT); | |
1155 | ||
1156 | info->slice_total = 1; | |
1157 | ||
1158 | if (!(fuse & CHV_FGT_DISABLE_SS0)) { | |
1159 | info->subslice_per_slice++; | |
1160 | eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | | |
1161 | CHV_FGT_EU_DIS_SS0_R1_MASK); | |
1162 | info->eu_total += 8 - hweight32(eu_dis); | |
1163 | } | |
1164 | ||
1165 | if (!(fuse & CHV_FGT_DISABLE_SS1)) { | |
1166 | info->subslice_per_slice++; | |
1167 | eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | | |
1168 | CHV_FGT_EU_DIS_SS1_R1_MASK); | |
1169 | info->eu_total += 8 - hweight32(eu_dis); | |
1170 | } | |
1171 | ||
1172 | info->subslice_total = info->subslice_per_slice; | |
1173 | /* | |
1174 | * CHV expected to always have a uniform distribution of EU | |
1175 | * across subslices. | |
1176 | */ | |
1177 | info->eu_per_subslice = info->subslice_total ? | |
1178 | info->eu_total / info->subslice_total : | |
1179 | 0; | |
1180 | /* | |
1181 | * CHV supports subslice power gating on devices with more than | |
1182 | * one subslice, and supports EU power gating on devices with | |
1183 | * more than one EU pair per subslice. | |
1184 | */ | |
1185 | info->has_slice_pg = 0; | |
1186 | info->has_subslice_pg = (info->subslice_total > 1); | |
1187 | info->has_eu_pg = (info->eu_per_subslice > 2); | |
1188 | } | |
1189 | ||
1190 | static void gen9_sseu_info_init(struct drm_device *dev) | |
1191 | { | |
1192 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1193 | struct intel_device_info *info; | |
1194 | int s_max = 3, ss_max = 4, eu_max = 8; | |
1195 | int s, ss; | |
1196 | u32 fuse2, s_enable, ss_disable, eu_disable; | |
1197 | u8 eu_mask = 0xff; | |
1198 | ||
1199 | info = (struct intel_device_info *)&dev_priv->info; | |
1200 | fuse2 = I915_READ(GEN8_FUSE2); | |
1201 | s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> | |
1202 | GEN8_F2_S_ENA_SHIFT; | |
1203 | ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> | |
1204 | GEN9_F2_SS_DIS_SHIFT; | |
1205 | ||
1206 | info->slice_total = hweight32(s_enable); | |
1207 | /* | |
1208 | * The subslice disable field is global, i.e. it applies | |
1209 | * to each of the enabled slices. | |
1210 | */ | |
1211 | info->subslice_per_slice = ss_max - hweight32(ss_disable); | |
1212 | info->subslice_total = info->slice_total * | |
1213 | info->subslice_per_slice; | |
1214 | ||
1215 | /* | |
1216 | * Iterate through enabled slices and subslices to | |
1217 | * count the total enabled EU. | |
1218 | */ | |
1219 | for (s = 0; s < s_max; s++) { | |
1220 | if (!(s_enable & (0x1 << s))) | |
1221 | /* skip disabled slice */ | |
1222 | continue; | |
1223 | ||
1224 | eu_disable = I915_READ(GEN9_EU_DISABLE(s)); | |
1225 | for (ss = 0; ss < ss_max; ss++) { | |
1226 | int eu_per_ss; | |
1227 | ||
1228 | if (ss_disable & (0x1 << ss)) | |
1229 | /* skip disabled subslice */ | |
1230 | continue; | |
1231 | ||
1232 | eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & | |
1233 | eu_mask); | |
1234 | ||
1235 | /* | |
1236 | * Record which subslice(s) has(have) 7 EUs. we | |
1237 | * can tune the hash used to spread work among | |
1238 | * subslices if they are unbalanced. | |
1239 | */ | |
1240 | if (eu_per_ss == 7) | |
1241 | info->subslice_7eu[s] |= 1 << ss; | |
1242 | ||
1243 | info->eu_total += eu_per_ss; | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | /* | |
1248 | * SKL is expected to always have a uniform distribution | |
1249 | * of EU across subslices with the exception that any one | |
1250 | * EU in any one subslice may be fused off for die | |
1251 | * recovery. BXT is expected to be perfectly uniform in EU | |
1252 | * distribution. | |
1253 | */ | |
1254 | info->eu_per_subslice = info->subslice_total ? | |
1255 | DIV_ROUND_UP(info->eu_total, | |
1256 | info->subslice_total) : 0; | |
1257 | /* | |
1258 | * SKL supports slice power gating on devices with more than | |
1259 | * one slice, and supports EU power gating on devices with | |
1260 | * more than one EU pair per subslice. BXT supports subslice | |
1261 | * power gating on devices with more than one subslice, and | |
1262 | * supports EU power gating on devices with more than one EU | |
1263 | * pair per subslice. | |
1264 | */ | |
1265 | info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && | |
1266 | (info->slice_total > 1)); | |
1267 | info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); | |
1268 | info->has_eu_pg = (info->eu_per_subslice > 2); | |
1269 | ||
1270 | if (IS_BROXTON(dev)) { | |
1271 | #define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss)) | |
1272 | /* | |
1273 | * There is a HW issue in 2x6 fused down parts that requires | |
1274 | * Pooled EU to be enabled as a WA. The pool configuration | |
1275 | * changes depending upon which subslice is fused down. This | |
1276 | * doesn't affect if the device has all 3 subslices enabled. | |
1277 | */ | |
1278 | /* WaEnablePooledEuFor2x6:bxt */ | |
1279 | info->has_pooled_eu = ((info->subslice_per_slice == 3) || | |
1280 | (info->subslice_per_slice == 2 && | |
1281 | INTEL_REVID(dev) < BXT_REVID_C0)); | |
1282 | ||
1283 | info->min_eu_in_pool = 0; | |
1284 | if (info->has_pooled_eu) { | |
1285 | if (IS_SS_DISABLED(ss_disable, 0) || | |
1286 | IS_SS_DISABLED(ss_disable, 2)) | |
1287 | info->min_eu_in_pool = 3; | |
1288 | else if (IS_SS_DISABLED(ss_disable, 1)) | |
1289 | info->min_eu_in_pool = 6; | |
1290 | else | |
1291 | info->min_eu_in_pool = 9; | |
1292 | } | |
1293 | #undef IS_SS_DISABLED | |
1294 | } | |
1295 | } | |
1296 | ||
1297 | static void broadwell_sseu_info_init(struct drm_device *dev) | |
1298 | { | |
1299 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1300 | struct intel_device_info *info; | |
1301 | const int s_max = 3, ss_max = 3, eu_max = 8; | |
1302 | int s, ss; | |
1303 | u32 fuse2, eu_disable[s_max], s_enable, ss_disable; | |
1304 | ||
1305 | fuse2 = I915_READ(GEN8_FUSE2); | |
1306 | s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; | |
1307 | ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; | |
1308 | ||
1309 | eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; | |
1310 | eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | | |
1311 | ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << | |
1312 | (32 - GEN8_EU_DIS0_S1_SHIFT)); | |
1313 | eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | | |
1314 | ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << | |
1315 | (32 - GEN8_EU_DIS1_S2_SHIFT)); | |
1316 | ||
1317 | ||
1318 | info = (struct intel_device_info *)&dev_priv->info; | |
1319 | info->slice_total = hweight32(s_enable); | |
1320 | ||
1321 | /* | |
1322 | * The subslice disable field is global, i.e. it applies | |
1323 | * to each of the enabled slices. | |
1324 | */ | |
1325 | info->subslice_per_slice = ss_max - hweight32(ss_disable); | |
1326 | info->subslice_total = info->slice_total * info->subslice_per_slice; | |
1327 | ||
1328 | /* | |
1329 | * Iterate through enabled slices and subslices to | |
1330 | * count the total enabled EU. | |
1331 | */ | |
1332 | for (s = 0; s < s_max; s++) { | |
1333 | if (!(s_enable & (0x1 << s))) | |
1334 | /* skip disabled slice */ | |
1335 | continue; | |
1336 | ||
1337 | for (ss = 0; ss < ss_max; ss++) { | |
1338 | u32 n_disabled; | |
1339 | ||
1340 | if (ss_disable & (0x1 << ss)) | |
1341 | /* skip disabled subslice */ | |
1342 | continue; | |
1343 | ||
1344 | n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); | |
1345 | ||
1346 | /* | |
1347 | * Record which subslices have 7 EUs. | |
1348 | */ | |
1349 | if (eu_max - n_disabled == 7) | |
1350 | info->subslice_7eu[s] |= 1 << ss; | |
1351 | ||
1352 | info->eu_total += eu_max - n_disabled; | |
1353 | } | |
1354 | } | |
1355 | ||
1356 | /* | |
1357 | * BDW is expected to always have a uniform distribution of EU across | |
1358 | * subslices with the exception that any one EU in any one subslice may | |
1359 | * be fused off for die recovery. | |
1360 | */ | |
1361 | info->eu_per_subslice = info->subslice_total ? | |
1362 | DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; | |
1363 | ||
1364 | /* | |
1365 | * BDW supports slice power gating on devices with more than | |
1366 | * one slice. | |
1367 | */ | |
1368 | info->has_slice_pg = (info->slice_total > 1); | |
1369 | info->has_subslice_pg = 0; | |
1370 | info->has_eu_pg = 0; | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Determine various intel_device_info fields at runtime. | |
1375 | * | |
1376 | * Use it when either: | |
1377 | * - it's judged too laborious to fill n static structures with the limit | |
1378 | * when a simple if statement does the job, | |
1379 | * - run-time checks (eg read fuse/strap registers) are needed. | |
1380 | * | |
1381 | * This function needs to be called: | |
1382 | * - after the MMIO has been setup as we are reading registers, | |
1383 | * - after the PCH has been detected, | |
1384 | * - before the first usage of the fields it can tweak. | |
1385 | */ | |
1386 | static void intel_device_info_runtime_init(struct drm_device *dev) | |
1387 | { | |
1388 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1389 | struct intel_device_info *info; | |
1390 | enum pipe pipe; | |
1391 | ||
1392 | info = (struct intel_device_info *)&dev_priv->info; | |
1393 | ||
1394 | /* | |
1395 | * Skylake and Broxton currently don't expose the topmost plane as its | |
1396 | * use is exclusive with the legacy cursor and we only want to expose | |
1397 | * one of those, not both. Until we can safely expose the topmost plane | |
1398 | * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, | |
1399 | * we don't expose the topmost plane at all to prevent ABI breakage | |
1400 | * down the line. | |
1401 | */ | |
1402 | if (IS_BROXTON(dev)) { | |
1403 | info->num_sprites[PIPE_A] = 2; | |
1404 | info->num_sprites[PIPE_B] = 2; | |
1405 | info->num_sprites[PIPE_C] = 1; | |
1406 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | |
1407 | for_each_pipe(dev_priv, pipe) | |
1408 | info->num_sprites[pipe] = 2; | |
1409 | else | |
1410 | for_each_pipe(dev_priv, pipe) | |
1411 | info->num_sprites[pipe] = 1; | |
1412 | ||
1413 | if (i915.disable_display) { | |
1414 | DRM_INFO("Display disabled (module parameter)\n"); | |
1415 | info->num_pipes = 0; | |
1416 | } else if (info->num_pipes > 0 && | |
1417 | (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && | |
1418 | HAS_PCH_SPLIT(dev)) { | |
1419 | u32 fuse_strap = I915_READ(FUSE_STRAP); | |
1420 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); | |
1421 | ||
1422 | /* | |
1423 | * SFUSE_STRAP is supposed to have a bit signalling the display | |
1424 | * is fused off. Unfortunately it seems that, at least in | |
1425 | * certain cases, fused off display means that PCH display | |
1426 | * reads don't land anywhere. In that case, we read 0s. | |
1427 | * | |
1428 | * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK | |
1429 | * should be set when taking over after the firmware. | |
1430 | */ | |
1431 | if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || | |
1432 | sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || | |
1433 | (dev_priv->pch_type == PCH_CPT && | |
1434 | !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { | |
1435 | DRM_INFO("Display fused off, disabling\n"); | |
1436 | info->num_pipes = 0; | |
1437 | } else if (fuse_strap & IVB_PIPE_C_DISABLE) { | |
1438 | DRM_INFO("PipeC fused off\n"); | |
1439 | info->num_pipes -= 1; | |
1440 | } | |
1441 | } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { | |
1442 | u32 dfsm = I915_READ(SKL_DFSM); | |
1443 | u8 disabled_mask = 0; | |
1444 | bool invalid; | |
1445 | int num_bits; | |
1446 | ||
1447 | if (dfsm & SKL_DFSM_PIPE_A_DISABLE) | |
1448 | disabled_mask |= BIT(PIPE_A); | |
1449 | if (dfsm & SKL_DFSM_PIPE_B_DISABLE) | |
1450 | disabled_mask |= BIT(PIPE_B); | |
1451 | if (dfsm & SKL_DFSM_PIPE_C_DISABLE) | |
1452 | disabled_mask |= BIT(PIPE_C); | |
1453 | ||
1454 | num_bits = hweight8(disabled_mask); | |
1455 | ||
1456 | switch (disabled_mask) { | |
1457 | case BIT(PIPE_A): | |
1458 | case BIT(PIPE_B): | |
1459 | case BIT(PIPE_A) | BIT(PIPE_B): | |
1460 | case BIT(PIPE_A) | BIT(PIPE_C): | |
1461 | invalid = true; | |
1462 | break; | |
1463 | default: | |
1464 | invalid = false; | |
1465 | } | |
1466 | ||
1467 | if (num_bits > info->num_pipes || invalid) | |
1468 | DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", | |
1469 | disabled_mask); | |
1470 | else | |
1471 | info->num_pipes -= num_bits; | |
1472 | } | |
1473 | ||
1474 | /* Initialize slice/subslice/EU info */ | |
1475 | if (IS_CHERRYVIEW(dev)) | |
1476 | cherryview_sseu_info_init(dev); | |
1477 | else if (IS_BROADWELL(dev)) | |
1478 | broadwell_sseu_info_init(dev); | |
1479 | else if (INTEL_INFO(dev)->gen >= 9) | |
1480 | gen9_sseu_info_init(dev); | |
1481 | ||
1482 | info->has_snoop = !info->has_llc; | |
1483 | ||
1484 | /* Snooping is broken on BXT A stepping. */ | |
1485 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) | |
1486 | info->has_snoop = false; | |
1487 | ||
1488 | DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); | |
1489 | DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); | |
1490 | DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); | |
1491 | DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); | |
1492 | DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); | |
1493 | DRM_DEBUG_DRIVER("has slice power gating: %s\n", | |
1494 | info->has_slice_pg ? "y" : "n"); | |
1495 | DRM_DEBUG_DRIVER("has subslice power gating: %s\n", | |
1496 | info->has_subslice_pg ? "y" : "n"); | |
1497 | DRM_DEBUG_DRIVER("has EU power gating: %s\n", | |
1498 | info->has_eu_pg ? "y" : "n"); | |
1499 | ||
1500 | i915.enable_execlists = | |
1501 | intel_sanitize_enable_execlists(dev_priv, | |
1502 | i915.enable_execlists); | |
1503 | ||
1504 | /* | |
1505 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | |
1506 | * user's requested state against the hardware/driver capabilities. We | |
1507 | * do this now so that we can print out any log messages once rather | |
1508 | * than every time we check intel_enable_ppgtt(). | |
1509 | */ | |
1510 | i915.enable_ppgtt = | |
1511 | intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); | |
1512 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); | |
1513 | } | |
1514 | ||
1515 | static void intel_init_dpio(struct drm_i915_private *dev_priv) | |
1516 | { | |
1517 | /* | |
1518 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | |
1519 | * CHV x1 PHY (DP/HDMI D) | |
1520 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | |
1521 | */ | |
1522 | if (IS_CHERRYVIEW(dev_priv)) { | |
1523 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | |
1524 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | |
1525 | } else if (IS_VALLEYVIEW(dev_priv)) { | |
1526 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | |
1527 | } | |
1528 | } | |
1529 | ||
1530 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) | |
1531 | { | |
1532 | /* | |
1533 | * The i915 workqueue is primarily used for batched retirement of | |
1534 | * requests (and thus managing bo) once the task has been completed | |
1535 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
1536 | * need high-priority retirement, such as waiting for an explicit | |
1537 | * bo. | |
1538 | * | |
1539 | * It is also used for periodic low-priority events, such as | |
1540 | * idle-timers and recording error state. | |
1541 | * | |
1542 | * All tasks on the workqueue are expected to acquire the dev mutex | |
1543 | * so there is no point in running more than one instance of the | |
1544 | * workqueue at any time. Use an ordered one. | |
1545 | */ | |
1546 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
1547 | if (dev_priv->wq == NULL) | |
1548 | goto out_err; | |
1549 | ||
1550 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); | |
1551 | if (dev_priv->hotplug.dp_wq == NULL) | |
1552 | goto out_free_wq; | |
1553 | ||
1554 | dev_priv->gpu_error.hangcheck_wq = | |
1555 | alloc_ordered_workqueue("i915-hangcheck", 0); | |
1556 | if (dev_priv->gpu_error.hangcheck_wq == NULL) | |
1557 | goto out_free_dp_wq; | |
1558 | ||
1559 | return 0; | |
1560 | ||
1561 | out_free_dp_wq: | |
1562 | destroy_workqueue(dev_priv->hotplug.dp_wq); | |
1563 | out_free_wq: | |
1564 | destroy_workqueue(dev_priv->wq); | |
1565 | out_err: | |
1566 | DRM_ERROR("Failed to allocate workqueues.\n"); | |
1567 | ||
1568 | return -ENOMEM; | |
1569 | } | |
1570 | ||
1571 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) | |
1572 | { | |
1573 | destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); | |
1574 | destroy_workqueue(dev_priv->hotplug.dp_wq); | |
1575 | destroy_workqueue(dev_priv->wq); | |
1576 | } | |
1577 | ||
1578 | /** | |
1579 | * i915_driver_init_early - setup state not requiring device access | |
1580 | * @dev_priv: device private | |
1581 | * | |
1582 | * Initialize everything that is a "SW-only" state, that is state not | |
1583 | * requiring accessing the device or exposing the driver via kernel internal | |
1584 | * or userspace interfaces. Example steps belonging here: lock initialization, | |
1585 | * system memory allocation, setting up device specific attributes and | |
1586 | * function hooks not requiring accessing the device. | |
1587 | */ | |
1588 | static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |
1589 | const struct pci_device_id *ent) | |
1590 | { | |
1591 | const struct intel_device_info *match_info = | |
1592 | (struct intel_device_info *)ent->driver_data; | |
1593 | struct intel_device_info *device_info; | |
1594 | int ret = 0; | |
1595 | ||
1596 | if (i915_inject_load_failure()) | |
1597 | return -ENODEV; | |
1598 | ||
1599 | /* Setup the write-once "constant" device info */ | |
1600 | device_info = (struct intel_device_info *)&dev_priv->info; | |
1601 | memcpy(device_info, match_info, sizeof(*device_info)); | |
1602 | device_info->device_id = dev_priv->drm.pdev->device; | |
1603 | ||
1604 | BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); | |
1605 | device_info->gen_mask = BIT(device_info->gen - 1); | |
1606 | ||
1607 | spin_lock_init(&dev_priv->irq_lock); | |
1608 | spin_lock_init(&dev_priv->gpu_error.lock); | |
1609 | mutex_init(&dev_priv->backlight_lock); | |
1610 | spin_lock_init(&dev_priv->uncore.lock); | |
1611 | spin_lock_init(&dev_priv->mm.object_stat_lock); | |
1612 | spin_lock_init(&dev_priv->mmio_flip_lock); | |
1613 | mutex_init(&dev_priv->sb_lock); | |
1614 | mutex_init(&dev_priv->modeset_restore_lock); | |
1615 | mutex_init(&dev_priv->av_mutex); | |
1616 | mutex_init(&dev_priv->wm.wm_mutex); | |
1617 | mutex_init(&dev_priv->pps_mutex); | |
1618 | ||
1619 | ret = i915_workqueues_init(dev_priv); | |
1620 | if (ret < 0) | |
1621 | return ret; | |
1622 | ||
1623 | ret = intel_gvt_init(dev_priv); | |
1624 | if (ret < 0) | |
1625 | goto err_workqueues; | |
1626 | ||
1627 | /* This must be called before any calls to HAS_PCH_* */ | |
1628 | intel_detect_pch(&dev_priv->drm); | |
1629 | ||
1630 | intel_pm_setup(&dev_priv->drm); | |
1631 | intel_init_dpio(dev_priv); | |
1632 | intel_power_domains_init(dev_priv); | |
1633 | intel_irq_init(dev_priv); | |
1634 | intel_init_display_hooks(dev_priv); | |
1635 | intel_init_clock_gating_hooks(dev_priv); | |
1636 | intel_init_audio_hooks(dev_priv); | |
1637 | i915_gem_load_init(&dev_priv->drm); | |
1638 | ||
1639 | intel_display_crc_init(&dev_priv->drm); | |
1640 | ||
1641 | i915_dump_device_info(dev_priv); | |
1642 | ||
1643 | /* Not all pre-production machines fall into this category, only the | |
1644 | * very first ones. Almost everything should work, except for maybe | |
1645 | * suspend/resume. And we don't implement workarounds that affect only | |
1646 | * pre-production machines. */ | |
1647 | if (IS_HSW_EARLY_SDV(dev_priv)) | |
1648 | DRM_INFO("This is an early pre-production Haswell machine. " | |
1649 | "It may not be fully functional.\n"); | |
1650 | ||
1651 | return 0; | |
1652 | ||
1653 | err_workqueues: | |
1654 | i915_workqueues_cleanup(dev_priv); | |
1655 | return ret; | |
1656 | } | |
1657 | ||
1658 | /** | |
1659 | * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() | |
1660 | * @dev_priv: device private | |
1661 | */ | |
1662 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) | |
1663 | { | |
1664 | i915_gem_load_cleanup(dev_priv->dev); | |
1665 | i915_workqueues_cleanup(dev_priv); | |
1666 | } | |
1667 | ||
1668 | static int i915_mmio_setup(struct drm_device *dev) | |
1669 | { | |
1670 | struct drm_i915_private *dev_priv = to_i915(dev); | |
1671 | int mmio_bar; | |
1672 | int mmio_size; | |
1673 | ||
1674 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | |
1675 | /* | |
1676 | * Before gen4, the registers and the GTT are behind different BARs. | |
1677 | * However, from gen4 onwards, the registers and the GTT are shared | |
1678 | * in the same BAR, so we want to restrict this ioremap from | |
1679 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
1680 | * the register BAR remains the same size for all the earlier | |
1681 | * generations up to Ironlake. | |
1682 | */ | |
1683 | if (INTEL_INFO(dev)->gen < 5) | |
1684 | mmio_size = 512 * 1024; | |
1685 | else | |
1686 | mmio_size = 2 * 1024 * 1024; | |
1687 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | |
1688 | if (dev_priv->regs == NULL) { | |
1689 | DRM_ERROR("failed to map registers\n"); | |
1690 | ||
1691 | return -EIO; | |
1692 | } | |
1693 | ||
1694 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
1695 | intel_setup_mchbar(dev); | |
1696 | ||
1697 | return 0; | |
1698 | } | |
1699 | ||
1700 | static void i915_mmio_cleanup(struct drm_device *dev) | |
1701 | { | |
1702 | struct drm_i915_private *dev_priv = to_i915(dev); | |
1703 | ||
1704 | intel_teardown_mchbar(dev); | |
1705 | pci_iounmap(dev->pdev, dev_priv->regs); | |
1706 | } | |
1707 | ||
1708 | /** | |
1709 | * i915_driver_init_mmio - setup device MMIO | |
1710 | * @dev_priv: device private | |
1711 | * | |
1712 | * Setup minimal device state necessary for MMIO accesses later in the | |
1713 | * initialization sequence. The setup here should avoid any other device-wide | |
1714 | * side effects or exposing the driver via kernel internal or user space | |
1715 | * interfaces. | |
1716 | */ | |
1717 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | |
1718 | { | |
1719 | struct drm_device *dev = dev_priv->dev; | |
1720 | int ret; | |
1721 | ||
1722 | if (i915_inject_load_failure()) | |
1723 | return -ENODEV; | |
1724 | ||
1725 | if (i915_get_bridge_dev(dev)) | |
1726 | return -EIO; | |
1727 | ||
1728 | ret = i915_mmio_setup(dev); | |
1729 | if (ret < 0) | |
1730 | goto put_bridge; | |
1731 | ||
1732 | intel_uncore_init(dev_priv); | |
1733 | ||
1734 | return 0; | |
1735 | ||
1736 | put_bridge: | |
1737 | pci_dev_put(dev_priv->bridge_dev); | |
1738 | ||
1739 | return ret; | |
1740 | } | |
1741 | ||
1742 | /** | |
1743 | * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() | |
1744 | * @dev_priv: device private | |
1745 | */ | |
1746 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | |
1747 | { | |
1748 | struct drm_device *dev = dev_priv->dev; | |
1749 | ||
1750 | intel_uncore_fini(dev_priv); | |
1751 | i915_mmio_cleanup(dev); | |
1752 | pci_dev_put(dev_priv->bridge_dev); | |
1753 | } | |
1754 | ||
1755 | /** | |
1756 | * i915_driver_init_hw - setup state requiring device access | |
1757 | * @dev_priv: device private | |
1758 | * | |
1759 | * Setup state that requires accessing the device, but doesn't require | |
1760 | * exposing the driver via kernel internal or userspace interfaces. | |
1761 | */ | |
1762 | static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |
1763 | { | |
1764 | struct drm_device *dev = dev_priv->dev; | |
1765 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | |
1766 | uint32_t aperture_size; | |
1767 | int ret; | |
1768 | ||
1769 | if (i915_inject_load_failure()) | |
1770 | return -ENODEV; | |
1771 | ||
1772 | intel_device_info_runtime_init(dev); | |
1773 | ||
1774 | ret = i915_ggtt_init_hw(dev); | |
1775 | if (ret) | |
1776 | return ret; | |
1777 | ||
1778 | ret = i915_ggtt_enable_hw(dev); | |
1779 | if (ret) { | |
1780 | DRM_ERROR("failed to enable GGTT\n"); | |
1781 | goto out_ggtt; | |
1782 | } | |
1783 | ||
1784 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, | |
1785 | * otherwise the vga fbdev driver falls over. */ | |
1786 | ret = i915_kick_out_firmware_fb(dev_priv); | |
1787 | if (ret) { | |
1788 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); | |
1789 | goto out_ggtt; | |
1790 | } | |
1791 | ||
1792 | ret = i915_kick_out_vgacon(dev_priv); | |
1793 | if (ret) { | |
1794 | DRM_ERROR("failed to remove conflicting VGA console\n"); | |
1795 | goto out_ggtt; | |
1796 | } | |
1797 | ||
1798 | pci_set_master(dev->pdev); | |
1799 | ||
1800 | /* overlay on gen2 is broken and can't address above 1G */ | |
1801 | if (IS_GEN2(dev)) { | |
1802 | ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | |
1803 | if (ret) { | |
1804 | DRM_ERROR("failed to set DMA mask\n"); | |
1805 | ||
1806 | goto out_ggtt; | |
1807 | } | |
1808 | } | |
1809 | ||
1810 | ||
1811 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | |
1812 | * using 32bit addressing, overwriting memory if HWS is located | |
1813 | * above 4GB. | |
1814 | * | |
1815 | * The documentation also mentions an issue with undefined | |
1816 | * behaviour if any general state is accessed within a page above 4GB, | |
1817 | * which also needs to be handled carefully. | |
1818 | */ | |
1819 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { | |
1820 | ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | |
1821 | ||
1822 | if (ret) { | |
1823 | DRM_ERROR("failed to set DMA mask\n"); | |
1824 | ||
1825 | goto out_ggtt; | |
1826 | } | |
1827 | } | |
1828 | ||
1829 | aperture_size = ggtt->mappable_end; | |
1830 | ||
1831 | ggtt->mappable = | |
1832 | io_mapping_create_wc(ggtt->mappable_base, | |
1833 | aperture_size); | |
1834 | if (!ggtt->mappable) { | |
1835 | ret = -EIO; | |
1836 | goto out_ggtt; | |
1837 | } | |
1838 | ||
1839 | ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, | |
1840 | aperture_size); | |
1841 | ||
1842 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, | |
1843 | PM_QOS_DEFAULT_VALUE); | |
1844 | ||
1845 | intel_uncore_sanitize(dev_priv); | |
1846 | ||
1847 | intel_opregion_setup(dev_priv); | |
1848 | ||
1849 | i915_gem_load_init_fences(dev_priv); | |
1850 | ||
1851 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1852 | * integrated graphics even though the support isn't actually there | |
1853 | * according to the published specs. It doesn't appear to function | |
1854 | * correctly in testing on 945G. | |
1855 | * This may be a side effect of MSI having been made available for PEG | |
1856 | * and the registers being closely associated. | |
1857 | * | |
1858 | * According to chipset errata, on the 965GM, MSI interrupts may | |
1859 | * be lost or delayed, but we use them anyways to avoid | |
1860 | * stuck interrupts on some machines. | |
1861 | */ | |
1862 | if (!IS_I945G(dev) && !IS_I945GM(dev)) { | |
1863 | if (pci_enable_msi(dev->pdev) < 0) | |
1864 | DRM_DEBUG_DRIVER("can't enable MSI"); | |
1865 | } | |
1866 | ||
1867 | return 0; | |
1868 | ||
1869 | out_ggtt: | |
1870 | i915_ggtt_cleanup_hw(dev); | |
1871 | ||
1872 | return ret; | |
1873 | } | |
1874 | ||
1875 | /** | |
1876 | * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() | |
1877 | * @dev_priv: device private | |
1878 | */ | |
1879 | static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) | |
1880 | { | |
1881 | struct drm_device *dev = dev_priv->dev; | |
1882 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | |
1883 | ||
1884 | if (dev->pdev->msi_enabled) | |
1885 | pci_disable_msi(dev->pdev); | |
1886 | ||
1887 | pm_qos_remove_request(&dev_priv->pm_qos); | |
1888 | arch_phys_wc_del(ggtt->mtrr); | |
1889 | io_mapping_free(ggtt->mappable); | |
1890 | i915_ggtt_cleanup_hw(dev); | |
1891 | } | |
1892 | ||
1893 | /** | |
1894 | * i915_driver_register - register the driver with the rest of the system | |
1895 | * @dev_priv: device private | |
1896 | * | |
1897 | * Perform any steps necessary to make the driver available via kernel | |
1898 | * internal or userspace interfaces. | |
1899 | */ | |
1900 | static void i915_driver_register(struct drm_i915_private *dev_priv) | |
1901 | { | |
1902 | struct drm_device *dev = dev_priv->dev; | |
1903 | ||
1904 | i915_gem_shrinker_init(dev_priv); | |
1905 | ||
1906 | /* | |
1907 | * Notify a valid surface after modesetting, | |
1908 | * when running inside a VM. | |
1909 | */ | |
1910 | if (intel_vgpu_active(dev_priv)) | |
1911 | I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); | |
1912 | ||
1913 | /* Reveal our presence to userspace */ | |
1914 | if (drm_dev_register(dev, 0) == 0) { | |
1915 | i915_debugfs_register(dev_priv); | |
1916 | i915_setup_sysfs(dev); | |
1917 | } else | |
1918 | DRM_ERROR("Failed to register driver for userspace access!\n"); | |
1919 | ||
1920 | if (INTEL_INFO(dev_priv)->num_pipes) { | |
1921 | /* Must be done after probing outputs */ | |
1922 | intel_opregion_register(dev_priv); | |
1923 | acpi_video_register(); | |
1924 | } | |
1925 | ||
1926 | if (IS_GEN5(dev_priv)) | |
1927 | intel_gpu_ips_init(dev_priv); | |
1928 | ||
1929 | i915_audio_component_init(dev_priv); | |
1930 | ||
1931 | /* | |
1932 | * Some ports require correctly set-up hpd registers for detection to | |
1933 | * work properly (leading to ghost connected connector status), e.g. VGA | |
1934 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
1935 | * irqs are fully enabled. We do it last so that the async config | |
1936 | * cannot run before the connectors are registered. | |
1937 | */ | |
1938 | intel_fbdev_initial_config_async(dev); | |
1939 | } | |
1940 | ||
1941 | /** | |
1942 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() | |
1943 | * @dev_priv: device private | |
1944 | */ | |
1945 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |
1946 | { | |
1947 | i915_audio_component_cleanup(dev_priv); | |
1948 | ||
1949 | intel_gpu_ips_teardown(); | |
1950 | acpi_video_unregister(); | |
1951 | intel_opregion_unregister(dev_priv); | |
1952 | ||
1953 | i915_teardown_sysfs(dev_priv->dev); | |
1954 | i915_debugfs_unregister(dev_priv); | |
1955 | drm_dev_unregister(dev_priv->dev); | |
1956 | ||
1957 | i915_gem_shrinker_cleanup(dev_priv); | |
1958 | } | |
1959 | ||
1960 | /** | |
1961 | * i915_driver_load - setup chip and create an initial config | |
1962 | * @dev: DRM device | |
1963 | * @flags: startup flags | |
1964 | * | |
1965 | * The driver load routine has to do several things: | |
1966 | * - drive output discovery via intel_modeset_init() | |
1967 | * - initialize the memory manager | |
1968 | * - allocate initial config memory | |
1969 | * - setup the DRM framebuffer with the allocated memory | |
1970 | */ | |
1971 | static int i915_driver_load(struct pci_dev *pdev, | |
1972 | const struct pci_device_id *ent) | |
1973 | { | |
1974 | struct drm_i915_private *dev_priv; | |
1975 | int ret; | |
7d87a7f7 | 1976 | |
0673ad47 CW |
1977 | ret = -ENOMEM; |
1978 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
1979 | if (dev_priv) | |
1980 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); | |
1981 | if (ret) { | |
1982 | dev_printk(KERN_ERR, &pdev->dev, | |
1983 | "[" DRM_NAME ":%s] allocation failed\n", __func__); | |
1984 | kfree(dev_priv); | |
1985 | return ret; | |
1986 | } | |
72bbf0af | 1987 | |
0673ad47 CW |
1988 | /* Must be set before calling __i915_printk */ |
1989 | dev_priv->drm.pdev = pdev; | |
1990 | dev_priv->drm.dev_private = dev_priv; | |
1991 | dev_priv->dev = &dev_priv->drm; | |
719388e1 | 1992 | |
0673ad47 CW |
1993 | ret = pci_enable_device(pdev); |
1994 | if (ret) | |
1995 | goto out_free_priv; | |
1347f5b4 | 1996 | |
0673ad47 | 1997 | pci_set_drvdata(pdev, &dev_priv->drm); |
ef11bdb3 | 1998 | |
0673ad47 CW |
1999 | ret = i915_driver_init_early(dev_priv, ent); |
2000 | if (ret < 0) | |
2001 | goto out_pci_disable; | |
ef11bdb3 | 2002 | |
0673ad47 | 2003 | intel_runtime_pm_get(dev_priv); |
1da177e4 | 2004 | |
0673ad47 CW |
2005 | ret = i915_driver_init_mmio(dev_priv); |
2006 | if (ret < 0) | |
2007 | goto out_runtime_pm_put; | |
79e53945 | 2008 | |
0673ad47 CW |
2009 | ret = i915_driver_init_hw(dev_priv); |
2010 | if (ret < 0) | |
2011 | goto out_cleanup_mmio; | |
30c964a6 RB |
2012 | |
2013 | /* | |
0673ad47 CW |
2014 | * TODO: move the vblank init and parts of modeset init steps into one |
2015 | * of the i915_driver_init_/i915_driver_register functions according | |
2016 | * to the role/effect of the given init step. | |
30c964a6 | 2017 | */ |
0673ad47 CW |
2018 | if (INTEL_INFO(dev_priv)->num_pipes) { |
2019 | ret = drm_vblank_init(dev_priv->dev, | |
2020 | INTEL_INFO(dev_priv)->num_pipes); | |
2021 | if (ret) | |
2022 | goto out_cleanup_hw; | |
30c964a6 RB |
2023 | } |
2024 | ||
0673ad47 CW |
2025 | ret = i915_load_modeset_init(dev_priv->dev); |
2026 | if (ret < 0) | |
2027 | goto out_cleanup_vblank; | |
2028 | ||
2029 | i915_driver_register(dev_priv); | |
2030 | ||
2031 | intel_runtime_pm_enable(dev_priv); | |
2032 | ||
2033 | intel_runtime_pm_put(dev_priv); | |
2034 | ||
2035 | return 0; | |
2036 | ||
2037 | out_cleanup_vblank: | |
2038 | drm_vblank_cleanup(dev_priv->dev); | |
2039 | out_cleanup_hw: | |
2040 | i915_driver_cleanup_hw(dev_priv); | |
2041 | out_cleanup_mmio: | |
2042 | i915_driver_cleanup_mmio(dev_priv); | |
2043 | out_runtime_pm_put: | |
2044 | intel_runtime_pm_put(dev_priv); | |
2045 | i915_driver_cleanup_early(dev_priv); | |
2046 | out_pci_disable: | |
2047 | pci_disable_device(pdev); | |
2048 | out_free_priv: | |
2049 | i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); | |
2050 | drm_dev_unref(&dev_priv->drm); | |
30c964a6 RB |
2051 | return ret; |
2052 | } | |
2053 | ||
0673ad47 | 2054 | static int i915_driver_unload(struct drm_device *dev) |
3bad0781 ZW |
2055 | { |
2056 | struct drm_i915_private *dev_priv = dev->dev_private; | |
0673ad47 | 2057 | int ret; |
3bad0781 | 2058 | |
0673ad47 CW |
2059 | intel_fbdev_fini(dev); |
2060 | ||
2061 | ret = i915_gem_suspend(dev); | |
2062 | if (ret) { | |
2063 | DRM_ERROR("failed to idle hardware: %d\n", ret); | |
2064 | return ret; | |
ce1bb329 BW |
2065 | } |
2066 | ||
0673ad47 CW |
2067 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
2068 | ||
2069 | i915_driver_unregister(dev_priv); | |
2070 | ||
2071 | drm_vblank_cleanup(dev); | |
2072 | ||
2073 | intel_modeset_cleanup(dev); | |
2074 | ||
3bad0781 | 2075 | /* |
0673ad47 CW |
2076 | * free the memory space allocated for the child device |
2077 | * config parsed from VBT | |
3bad0781 | 2078 | */ |
0673ad47 CW |
2079 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
2080 | kfree(dev_priv->vbt.child_dev); | |
2081 | dev_priv->vbt.child_dev = NULL; | |
2082 | dev_priv->vbt.child_dev_num = 0; | |
2083 | } | |
2084 | kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); | |
2085 | dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; | |
2086 | kfree(dev_priv->vbt.lfp_lvds_vbt_mode); | |
2087 | dev_priv->vbt.lfp_lvds_vbt_mode = NULL; | |
3bad0781 | 2088 | |
0673ad47 CW |
2089 | vga_switcheroo_unregister_client(dev->pdev); |
2090 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
bcdb72ac | 2091 | |
0673ad47 | 2092 | intel_csr_ucode_fini(dev_priv); |
bcdb72ac | 2093 | |
0673ad47 CW |
2094 | /* Free error state after interrupts are fully disabled. */ |
2095 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | |
2096 | i915_destroy_error_state(dev); | |
2097 | ||
2098 | /* Flush any outstanding unpin_work. */ | |
2099 | flush_workqueue(dev_priv->wq); | |
2100 | ||
2101 | intel_guc_fini(dev); | |
2102 | i915_gem_fini(dev); | |
2103 | intel_fbc_cleanup_cfb(dev_priv); | |
2104 | ||
2105 | intel_power_domains_fini(dev_priv); | |
2106 | ||
2107 | i915_driver_cleanup_hw(dev_priv); | |
2108 | i915_driver_cleanup_mmio(dev_priv); | |
2109 | ||
2110 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | |
2111 | ||
2112 | i915_driver_cleanup_early(dev_priv); | |
2113 | ||
2114 | return 0; | |
3bad0781 ZW |
2115 | } |
2116 | ||
0673ad47 | 2117 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2911a35b | 2118 | { |
0673ad47 | 2119 | int ret; |
2911a35b | 2120 | |
0673ad47 CW |
2121 | ret = i915_gem_open(dev, file); |
2122 | if (ret) | |
2123 | return ret; | |
2911a35b | 2124 | |
0673ad47 CW |
2125 | return 0; |
2126 | } | |
71386ef9 | 2127 | |
0673ad47 CW |
2128 | /** |
2129 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
2130 | * @dev: DRM device | |
2131 | * | |
2132 | * Take care of cleaning up after all DRM clients have exited. In the | |
2133 | * mode setting case, we want to restore the kernel's initial mode (just | |
2134 | * in case the last client left us in a bad state). | |
2135 | * | |
2136 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
2137 | * and DMA structures, since the kernel won't be using them, and clea | |
2138 | * up any GEM state. | |
2139 | */ | |
2140 | static void i915_driver_lastclose(struct drm_device *dev) | |
2141 | { | |
2142 | intel_fbdev_restore_mode(dev); | |
2143 | vga_switcheroo_process_delayed_switch(); | |
2144 | } | |
2911a35b | 2145 | |
0673ad47 CW |
2146 | static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) |
2147 | { | |
2148 | mutex_lock(&dev->struct_mutex); | |
2149 | i915_gem_context_close(dev, file); | |
2150 | i915_gem_release(dev, file); | |
2151 | mutex_unlock(&dev->struct_mutex); | |
2152 | } | |
2153 | ||
2154 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | |
2155 | { | |
2156 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
2157 | ||
2158 | kfree(file_priv); | |
2911a35b BW |
2159 | } |
2160 | ||
07f9cd0b ID |
2161 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
2162 | { | |
2163 | struct drm_device *dev = dev_priv->dev; | |
19c8054c | 2164 | struct intel_encoder *encoder; |
07f9cd0b ID |
2165 | |
2166 | drm_modeset_lock_all(dev); | |
19c8054c JN |
2167 | for_each_intel_encoder(dev, encoder) |
2168 | if (encoder->suspend) | |
2169 | encoder->suspend(encoder); | |
07f9cd0b ID |
2170 | drm_modeset_unlock_all(dev); |
2171 | } | |
2172 | ||
1a5df187 PZ |
2173 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
2174 | bool rpm_resume); | |
507e126e | 2175 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
f75a1985 | 2176 | |
bc87229f ID |
2177 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
2178 | { | |
2179 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
2180 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
2181 | return true; | |
2182 | #endif | |
2183 | return false; | |
2184 | } | |
ebc32824 | 2185 | |
5e365c39 | 2186 | static int i915_drm_suspend(struct drm_device *dev) |
ba8bbcf6 | 2187 | { |
61caf87c | 2188 | struct drm_i915_private *dev_priv = dev->dev_private; |
e5747e3a | 2189 | pci_power_t opregion_target_state; |
d5818938 | 2190 | int error; |
61caf87c | 2191 | |
b8efb17b ZR |
2192 | /* ignore lid events during suspend */ |
2193 | mutex_lock(&dev_priv->modeset_restore_lock); | |
2194 | dev_priv->modeset_restore = MODESET_SUSPENDED; | |
2195 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
2196 | ||
1f814dac ID |
2197 | disable_rpm_wakeref_asserts(dev_priv); |
2198 | ||
c67a470b PZ |
2199 | /* We do a lot of poking in a lot of registers, make sure they work |
2200 | * properly. */ | |
da7e29bd | 2201 | intel_display_set_init_power(dev_priv, true); |
cb10799c | 2202 | |
5bcf719b DA |
2203 | drm_kms_helper_poll_disable(dev); |
2204 | ||
ba8bbcf6 | 2205 | pci_save_state(dev->pdev); |
ba8bbcf6 | 2206 | |
d5818938 DV |
2207 | error = i915_gem_suspend(dev); |
2208 | if (error) { | |
2209 | dev_err(&dev->pdev->dev, | |
2210 | "GEM idle failed, resume might fail\n"); | |
1f814dac | 2211 | goto out; |
d5818938 | 2212 | } |
db1b76ca | 2213 | |
a1c41994 AD |
2214 | intel_guc_suspend(dev); |
2215 | ||
dc97997a | 2216 | intel_suspend_gt_powersave(dev_priv); |
a261b246 | 2217 | |
6b72d486 | 2218 | intel_display_suspend(dev); |
2eb5252e | 2219 | |
d5818938 | 2220 | intel_dp_mst_suspend(dev); |
7d708ee4 | 2221 | |
d5818938 DV |
2222 | intel_runtime_pm_disable_interrupts(dev_priv); |
2223 | intel_hpd_cancel_work(dev_priv); | |
09b64267 | 2224 | |
d5818938 | 2225 | intel_suspend_encoders(dev_priv); |
0e32b39c | 2226 | |
d5818938 | 2227 | intel_suspend_hw(dev); |
5669fcac | 2228 | |
828c7908 BW |
2229 | i915_gem_suspend_gtt_mappings(dev); |
2230 | ||
9e06dd39 JB |
2231 | i915_save_state(dev); |
2232 | ||
bc87229f | 2233 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
6f9f4b7a | 2234 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
e5747e3a | 2235 | |
dc97997a | 2236 | intel_uncore_forcewake_reset(dev_priv, false); |
03d92e47 | 2237 | intel_opregion_unregister(dev_priv); |
8ee1c3db | 2238 | |
82e3b8c1 | 2239 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
3fa016a0 | 2240 | |
62d5d69b MK |
2241 | dev_priv->suspend_count++; |
2242 | ||
85e90679 KCA |
2243 | intel_display_set_init_power(dev_priv, false); |
2244 | ||
f74ed08d | 2245 | intel_csr_ucode_suspend(dev_priv); |
f514c2d8 | 2246 | |
1f814dac ID |
2247 | out: |
2248 | enable_rpm_wakeref_asserts(dev_priv); | |
2249 | ||
2250 | return error; | |
84b79f8d RW |
2251 | } |
2252 | ||
ab3be73f | 2253 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
c3c09c95 ID |
2254 | { |
2255 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
bc87229f | 2256 | bool fw_csr; |
c3c09c95 ID |
2257 | int ret; |
2258 | ||
1f814dac ID |
2259 | disable_rpm_wakeref_asserts(dev_priv); |
2260 | ||
a7c8125f ID |
2261 | fw_csr = !IS_BROXTON(dev_priv) && |
2262 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | |
bc87229f ID |
2263 | /* |
2264 | * In case of firmware assisted context save/restore don't manually | |
2265 | * deinit the power domains. This also means the CSR/DMC firmware will | |
2266 | * stay active, it will power down any HW resources as required and | |
2267 | * also enable deeper system power states that would be blocked if the | |
2268 | * firmware was inactive. | |
2269 | */ | |
2270 | if (!fw_csr) | |
2271 | intel_power_domains_suspend(dev_priv); | |
73dfc227 | 2272 | |
507e126e | 2273 | ret = 0; |
b8aea3d1 | 2274 | if (IS_BROXTON(dev_priv)) |
507e126e | 2275 | bxt_enable_dc9(dev_priv); |
b8aea3d1 | 2276 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
507e126e ID |
2277 | hsw_enable_pc8(dev_priv); |
2278 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
2279 | ret = vlv_suspend_complete(dev_priv); | |
c3c09c95 ID |
2280 | |
2281 | if (ret) { | |
2282 | DRM_ERROR("Suspend complete failed: %d\n", ret); | |
bc87229f ID |
2283 | if (!fw_csr) |
2284 | intel_power_domains_init_hw(dev_priv, true); | |
c3c09c95 | 2285 | |
1f814dac | 2286 | goto out; |
c3c09c95 ID |
2287 | } |
2288 | ||
2289 | pci_disable_device(drm_dev->pdev); | |
ab3be73f | 2290 | /* |
54875571 | 2291 | * During hibernation on some platforms the BIOS may try to access |
ab3be73f ID |
2292 | * the device even though it's already in D3 and hang the machine. So |
2293 | * leave the device in D0 on those platforms and hope the BIOS will | |
54875571 ID |
2294 | * power down the device properly. The issue was seen on multiple old |
2295 | * GENs with different BIOS vendors, so having an explicit blacklist | |
2296 | * is inpractical; apply the workaround on everything pre GEN6. The | |
2297 | * platforms where the issue was seen: | |
2298 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
2299 | * Fujitsu FSC S7110 | |
2300 | * Acer Aspire 1830T | |
ab3be73f | 2301 | */ |
54875571 | 2302 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) |
ab3be73f | 2303 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); |
c3c09c95 | 2304 | |
bc87229f ID |
2305 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
2306 | ||
1f814dac ID |
2307 | out: |
2308 | enable_rpm_wakeref_asserts(dev_priv); | |
2309 | ||
2310 | return ret; | |
c3c09c95 ID |
2311 | } |
2312 | ||
1751fcf9 | 2313 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
84b79f8d RW |
2314 | { |
2315 | int error; | |
2316 | ||
2317 | if (!dev || !dev->dev_private) { | |
2318 | DRM_ERROR("dev: %p\n", dev); | |
2319 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | |
2320 | return -ENODEV; | |
2321 | } | |
2322 | ||
0b14cbd2 ID |
2323 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
2324 | state.event != PM_EVENT_FREEZE)) | |
2325 | return -EINVAL; | |
5bcf719b DA |
2326 | |
2327 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
2328 | return 0; | |
6eecba33 | 2329 | |
5e365c39 | 2330 | error = i915_drm_suspend(dev); |
84b79f8d RW |
2331 | if (error) |
2332 | return error; | |
2333 | ||
ab3be73f | 2334 | return i915_drm_suspend_late(dev, false); |
ba8bbcf6 JB |
2335 | } |
2336 | ||
5e365c39 | 2337 | static int i915_drm_resume(struct drm_device *dev) |
76c4b250 ID |
2338 | { |
2339 | struct drm_i915_private *dev_priv = dev->dev_private; | |
ac840ae5 | 2340 | int ret; |
9d49c0ef | 2341 | |
1f814dac ID |
2342 | disable_rpm_wakeref_asserts(dev_priv); |
2343 | ||
ac840ae5 VS |
2344 | ret = i915_ggtt_enable_hw(dev); |
2345 | if (ret) | |
2346 | DRM_ERROR("failed to re-enable GGTT\n"); | |
2347 | ||
f74ed08d ID |
2348 | intel_csr_ucode_resume(dev_priv); |
2349 | ||
d5818938 DV |
2350 | mutex_lock(&dev->struct_mutex); |
2351 | i915_gem_restore_gtt_mappings(dev); | |
2352 | mutex_unlock(&dev->struct_mutex); | |
9d49c0ef | 2353 | |
61caf87c | 2354 | i915_restore_state(dev); |
6f9f4b7a | 2355 | intel_opregion_setup(dev_priv); |
61caf87c | 2356 | |
d5818938 DV |
2357 | intel_init_pch_refclk(dev); |
2358 | drm_mode_config_reset(dev); | |
1833b134 | 2359 | |
364aece0 PA |
2360 | /* |
2361 | * Interrupts have to be enabled before any batches are run. If not the | |
2362 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
2363 | * update/restore the context. | |
2364 | * | |
2365 | * Modeset enabling in intel_modeset_init_hw() also needs working | |
2366 | * interrupts. | |
2367 | */ | |
2368 | intel_runtime_pm_enable_interrupts(dev_priv); | |
2369 | ||
d5818938 DV |
2370 | mutex_lock(&dev->struct_mutex); |
2371 | if (i915_gem_init_hw(dev)) { | |
2372 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); | |
805de8f4 | 2373 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
d5818938 DV |
2374 | } |
2375 | mutex_unlock(&dev->struct_mutex); | |
226485e9 | 2376 | |
a1c41994 AD |
2377 | intel_guc_resume(dev); |
2378 | ||
d5818938 | 2379 | intel_modeset_init_hw(dev); |
24576d23 | 2380 | |
d5818938 DV |
2381 | spin_lock_irq(&dev_priv->irq_lock); |
2382 | if (dev_priv->display.hpd_irq_setup) | |
91d14251 | 2383 | dev_priv->display.hpd_irq_setup(dev_priv); |
d5818938 | 2384 | spin_unlock_irq(&dev_priv->irq_lock); |
0e32b39c | 2385 | |
d5818938 | 2386 | intel_dp_mst_resume(dev); |
e7d6f7d7 | 2387 | |
a16b7658 L |
2388 | intel_display_resume(dev); |
2389 | ||
d5818938 DV |
2390 | /* |
2391 | * ... but also need to make sure that hotplug processing | |
2392 | * doesn't cause havoc. Like in the driver load code we don't | |
2393 | * bother with the tiny race here where we might loose hotplug | |
2394 | * notifications. | |
2395 | * */ | |
2396 | intel_hpd_init(dev_priv); | |
2397 | /* Config may have changed between suspend and resume */ | |
2398 | drm_helper_hpd_irq_event(dev); | |
1daed3fb | 2399 | |
03d92e47 | 2400 | intel_opregion_register(dev_priv); |
44834a67 | 2401 | |
82e3b8c1 | 2402 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
073f34d9 | 2403 | |
b8efb17b ZR |
2404 | mutex_lock(&dev_priv->modeset_restore_lock); |
2405 | dev_priv->modeset_restore = MODESET_DONE; | |
2406 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
8a187455 | 2407 | |
6f9f4b7a | 2408 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
e5747e3a | 2409 | |
ee6f280e ID |
2410 | drm_kms_helper_poll_enable(dev); |
2411 | ||
1f814dac ID |
2412 | enable_rpm_wakeref_asserts(dev_priv); |
2413 | ||
074c6ada | 2414 | return 0; |
84b79f8d RW |
2415 | } |
2416 | ||
5e365c39 | 2417 | static int i915_drm_resume_early(struct drm_device *dev) |
84b79f8d | 2418 | { |
36d61e67 | 2419 | struct drm_i915_private *dev_priv = dev->dev_private; |
44410cd0 | 2420 | int ret; |
36d61e67 | 2421 | |
76c4b250 ID |
2422 | /* |
2423 | * We have a resume ordering issue with the snd-hda driver also | |
2424 | * requiring our device to be power up. Due to the lack of a | |
2425 | * parent/child relationship we currently solve this with an early | |
2426 | * resume hook. | |
2427 | * | |
2428 | * FIXME: This should be solved with a special hdmi sink device or | |
2429 | * similar so that power domains can be employed. | |
2430 | */ | |
44410cd0 ID |
2431 | |
2432 | /* | |
2433 | * Note that we need to set the power state explicitly, since we | |
2434 | * powered off the device during freeze and the PCI core won't power | |
2435 | * it back up for us during thaw. Powering off the device during | |
2436 | * freeze is not a hard requirement though, and during the | |
2437 | * suspend/resume phases the PCI core makes sure we get here with the | |
2438 | * device powered on. So in case we change our freeze logic and keep | |
2439 | * the device powered we can also remove the following set power state | |
2440 | * call. | |
2441 | */ | |
2442 | ret = pci_set_power_state(dev->pdev, PCI_D0); | |
2443 | if (ret) { | |
2444 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | |
2445 | goto out; | |
2446 | } | |
2447 | ||
2448 | /* | |
2449 | * Note that pci_enable_device() first enables any parent bridge | |
2450 | * device and only then sets the power state for this device. The | |
2451 | * bridge enabling is a nop though, since bridge devices are resumed | |
2452 | * first. The order of enabling power and enabling the device is | |
2453 | * imposed by the PCI core as described above, so here we preserve the | |
2454 | * same order for the freeze/thaw phases. | |
2455 | * | |
2456 | * TODO: eventually we should remove pci_disable_device() / | |
2457 | * pci_enable_enable_device() from suspend/resume. Due to how they | |
2458 | * depend on the device enable refcount we can't anyway depend on them | |
2459 | * disabling/enabling the device. | |
2460 | */ | |
bc87229f ID |
2461 | if (pci_enable_device(dev->pdev)) { |
2462 | ret = -EIO; | |
2463 | goto out; | |
2464 | } | |
84b79f8d RW |
2465 | |
2466 | pci_set_master(dev->pdev); | |
2467 | ||
1f814dac ID |
2468 | disable_rpm_wakeref_asserts(dev_priv); |
2469 | ||
666a4537 | 2470 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1a5df187 | 2471 | ret = vlv_resume_prepare(dev_priv, false); |
36d61e67 | 2472 | if (ret) |
ff0b187f DL |
2473 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
2474 | ret); | |
36d61e67 | 2475 | |
dc97997a | 2476 | intel_uncore_early_sanitize(dev_priv, true); |
efee833a | 2477 | |
dc97997a | 2478 | if (IS_BROXTON(dev_priv)) { |
da2f41d1 ID |
2479 | if (!dev_priv->suspended_to_idle) |
2480 | gen9_sanitize_dc_state(dev_priv); | |
507e126e | 2481 | bxt_disable_dc9(dev_priv); |
da2f41d1 | 2482 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
a9a6b73a | 2483 | hsw_disable_pc8(dev_priv); |
da2f41d1 | 2484 | } |
efee833a | 2485 | |
dc97997a | 2486 | intel_uncore_sanitize(dev_priv); |
bc87229f | 2487 | |
a7c8125f ID |
2488 | if (IS_BROXTON(dev_priv) || |
2489 | !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) | |
bc87229f ID |
2490 | intel_power_domains_init_hw(dev_priv, true); |
2491 | ||
6e35e8ab ID |
2492 | enable_rpm_wakeref_asserts(dev_priv); |
2493 | ||
bc87229f ID |
2494 | out: |
2495 | dev_priv->suspended_to_idle = false; | |
36d61e67 ID |
2496 | |
2497 | return ret; | |
76c4b250 ID |
2498 | } |
2499 | ||
1751fcf9 | 2500 | int i915_resume_switcheroo(struct drm_device *dev) |
76c4b250 | 2501 | { |
50a0072f | 2502 | int ret; |
76c4b250 | 2503 | |
097dd837 ID |
2504 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
2505 | return 0; | |
2506 | ||
5e365c39 | 2507 | ret = i915_drm_resume_early(dev); |
50a0072f ID |
2508 | if (ret) |
2509 | return ret; | |
2510 | ||
5a17514e ID |
2511 | return i915_drm_resume(dev); |
2512 | } | |
2513 | ||
11ed50ec | 2514 | /** |
f3953dcb | 2515 | * i915_reset - reset chip after a hang |
11ed50ec | 2516 | * @dev: drm device to reset |
11ed50ec BG |
2517 | * |
2518 | * Reset the chip. Useful if a hang is detected. Returns zero on successful | |
2519 | * reset or otherwise an error code. | |
2520 | * | |
2521 | * Procedure is fairly simple: | |
2522 | * - reset the chip using the reset reg | |
2523 | * - re-init context state | |
2524 | * - re-init hardware status page | |
2525 | * - re-init ring buffer | |
2526 | * - re-init interrupt state | |
2527 | * - re-init display | |
2528 | */ | |
c033666a | 2529 | int i915_reset(struct drm_i915_private *dev_priv) |
11ed50ec | 2530 | { |
c033666a | 2531 | struct drm_device *dev = dev_priv->dev; |
d98c52cf CW |
2532 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
2533 | unsigned reset_counter; | |
0573ed4a | 2534 | int ret; |
11ed50ec | 2535 | |
dc97997a | 2536 | intel_reset_gt_powersave(dev_priv); |
dbea3cea | 2537 | |
d54a02c0 | 2538 | mutex_lock(&dev->struct_mutex); |
11ed50ec | 2539 | |
d98c52cf CW |
2540 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
2541 | atomic_andnot(I915_WEDGED, &error->reset_counter); | |
77f01230 | 2542 | |
d98c52cf CW |
2543 | /* Clear the reset-in-progress flag and increment the reset epoch. */ |
2544 | reset_counter = atomic_inc_return(&error->reset_counter); | |
2545 | if (WARN_ON(__i915_reset_in_progress(reset_counter))) { | |
2546 | ret = -EIO; | |
2547 | goto error; | |
2548 | } | |
2549 | ||
2550 | i915_gem_reset(dev); | |
2e7c8ee7 | 2551 | |
dc97997a | 2552 | ret = intel_gpu_reset(dev_priv, ALL_ENGINES); |
be62acb4 MK |
2553 | |
2554 | /* Also reset the gpu hangman. */ | |
d98c52cf | 2555 | if (error->stop_rings != 0) { |
be62acb4 | 2556 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); |
d98c52cf | 2557 | error->stop_rings = 0; |
be62acb4 | 2558 | if (ret == -ENODEV) { |
f2d91a2c DV |
2559 | DRM_INFO("Reset not implemented, but ignoring " |
2560 | "error for simulated gpu hangs\n"); | |
be62acb4 MK |
2561 | ret = 0; |
2562 | } | |
2e7c8ee7 | 2563 | } |
be62acb4 | 2564 | |
d8f2716a DV |
2565 | if (i915_stop_ring_allow_warn(dev_priv)) |
2566 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); | |
2567 | ||
0573ed4a | 2568 | if (ret) { |
804e59a8 CW |
2569 | if (ret != -ENODEV) |
2570 | DRM_ERROR("Failed to reset chip: %i\n", ret); | |
2571 | else | |
2572 | DRM_DEBUG_DRIVER("GPU reset disabled\n"); | |
d98c52cf | 2573 | goto error; |
11ed50ec BG |
2574 | } |
2575 | ||
1362b776 VS |
2576 | intel_overlay_reset(dev_priv); |
2577 | ||
11ed50ec BG |
2578 | /* Ok, now get things going again... */ |
2579 | ||
2580 | /* | |
2581 | * Everything depends on having the GTT running, so we need to start | |
2582 | * there. Fortunately we don't need to do this unless we reset the | |
2583 | * chip at a PCI level. | |
2584 | * | |
2585 | * Next we need to restore the context, but we don't use those | |
2586 | * yet either... | |
2587 | * | |
2588 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
2589 | * was running at the time of the reset (i.e. we weren't VT | |
2590 | * switched away). | |
2591 | */ | |
33d30a9c | 2592 | ret = i915_gem_init_hw(dev); |
33d30a9c DV |
2593 | if (ret) { |
2594 | DRM_ERROR("Failed hw init on reset %d\n", ret); | |
d98c52cf | 2595 | goto error; |
11ed50ec BG |
2596 | } |
2597 | ||
d98c52cf CW |
2598 | mutex_unlock(&dev->struct_mutex); |
2599 | ||
33d30a9c DV |
2600 | /* |
2601 | * rps/rc6 re-init is necessary to restore state lost after the | |
2602 | * reset and the re-install of gt irqs. Skip for ironlake per | |
2603 | * previous concerns that it doesn't respond well to some forms | |
2604 | * of re-init after reset. | |
2605 | */ | |
2606 | if (INTEL_INFO(dev)->gen > 5) | |
dc97997a | 2607 | intel_enable_gt_powersave(dev_priv); |
33d30a9c | 2608 | |
11ed50ec | 2609 | return 0; |
d98c52cf CW |
2610 | |
2611 | error: | |
2612 | atomic_or(I915_WEDGED, &error->reset_counter); | |
2613 | mutex_unlock(&dev->struct_mutex); | |
2614 | return ret; | |
11ed50ec BG |
2615 | } |
2616 | ||
56550d94 | 2617 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
112b715e | 2618 | { |
01a06850 DV |
2619 | struct intel_device_info *intel_info = |
2620 | (struct intel_device_info *) ent->driver_data; | |
2621 | ||
d330a953 | 2622 | if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { |
b833d685 BW |
2623 | DRM_INFO("This hardware requires preliminary hardware support.\n" |
2624 | "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); | |
2625 | return -ENODEV; | |
2626 | } | |
2627 | ||
5fe49d86 CW |
2628 | /* Only bind to function 0 of the device. Early generations |
2629 | * used function 1 as a placeholder for multi-head. This causes | |
2630 | * us confusion instead, especially on the systems where both | |
2631 | * functions have the same PCI-ID! | |
2632 | */ | |
2633 | if (PCI_FUNC(pdev->devfn)) | |
2634 | return -ENODEV; | |
2635 | ||
b00e5334 | 2636 | if (vga_switcheroo_client_probe_defer(pdev)) |
704ab614 LW |
2637 | return -EPROBE_DEFER; |
2638 | ||
0673ad47 | 2639 | return i915_driver_load(pdev, ent); |
112b715e KH |
2640 | } |
2641 | ||
2642 | static void | |
2643 | i915_pci_remove(struct pci_dev *pdev) | |
2644 | { | |
2645 | struct drm_device *dev = pci_get_drvdata(pdev); | |
2646 | ||
3e783bac CW |
2647 | i915_driver_unload(dev); |
2648 | drm_dev_unref(dev); | |
112b715e KH |
2649 | } |
2650 | ||
84b79f8d | 2651 | static int i915_pm_suspend(struct device *dev) |
112b715e | 2652 | { |
84b79f8d RW |
2653 | struct pci_dev *pdev = to_pci_dev(dev); |
2654 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | |
112b715e | 2655 | |
84b79f8d RW |
2656 | if (!drm_dev || !drm_dev->dev_private) { |
2657 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | |
2658 | return -ENODEV; | |
2659 | } | |
112b715e | 2660 | |
5bcf719b DA |
2661 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
2662 | return 0; | |
2663 | ||
5e365c39 | 2664 | return i915_drm_suspend(drm_dev); |
76c4b250 ID |
2665 | } |
2666 | ||
2667 | static int i915_pm_suspend_late(struct device *dev) | |
2668 | { | |
888d0d42 | 2669 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
76c4b250 ID |
2670 | |
2671 | /* | |
c965d995 | 2672 | * We have a suspend ordering issue with the snd-hda driver also |
76c4b250 ID |
2673 | * requiring our device to be power up. Due to the lack of a |
2674 | * parent/child relationship we currently solve this with an late | |
2675 | * suspend hook. | |
2676 | * | |
2677 | * FIXME: This should be solved with a special hdmi sink device or | |
2678 | * similar so that power domains can be employed. | |
2679 | */ | |
2680 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
2681 | return 0; | |
112b715e | 2682 | |
ab3be73f ID |
2683 | return i915_drm_suspend_late(drm_dev, false); |
2684 | } | |
2685 | ||
2686 | static int i915_pm_poweroff_late(struct device *dev) | |
2687 | { | |
2688 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | |
2689 | ||
2690 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
2691 | return 0; | |
2692 | ||
2693 | return i915_drm_suspend_late(drm_dev, true); | |
cbda12d7 ZW |
2694 | } |
2695 | ||
76c4b250 ID |
2696 | static int i915_pm_resume_early(struct device *dev) |
2697 | { | |
888d0d42 | 2698 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
76c4b250 | 2699 | |
097dd837 ID |
2700 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
2701 | return 0; | |
2702 | ||
5e365c39 | 2703 | return i915_drm_resume_early(drm_dev); |
76c4b250 ID |
2704 | } |
2705 | ||
84b79f8d | 2706 | static int i915_pm_resume(struct device *dev) |
cbda12d7 | 2707 | { |
888d0d42 | 2708 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
84b79f8d | 2709 | |
097dd837 ID |
2710 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
2711 | return 0; | |
2712 | ||
5a17514e | 2713 | return i915_drm_resume(drm_dev); |
cbda12d7 ZW |
2714 | } |
2715 | ||
1f19ac2a CW |
2716 | /* freeze: before creating the hibernation_image */ |
2717 | static int i915_pm_freeze(struct device *dev) | |
2718 | { | |
2719 | return i915_pm_suspend(dev); | |
2720 | } | |
2721 | ||
2722 | static int i915_pm_freeze_late(struct device *dev) | |
2723 | { | |
461fb99c CW |
2724 | int ret; |
2725 | ||
2726 | ret = i915_pm_suspend_late(dev); | |
2727 | if (ret) | |
2728 | return ret; | |
2729 | ||
2730 | ret = i915_gem_freeze_late(dev_to_i915(dev)); | |
2731 | if (ret) | |
2732 | return ret; | |
2733 | ||
2734 | return 0; | |
1f19ac2a CW |
2735 | } |
2736 | ||
2737 | /* thaw: called after creating the hibernation image, but before turning off. */ | |
2738 | static int i915_pm_thaw_early(struct device *dev) | |
2739 | { | |
2740 | return i915_pm_resume_early(dev); | |
2741 | } | |
2742 | ||
2743 | static int i915_pm_thaw(struct device *dev) | |
2744 | { | |
2745 | return i915_pm_resume(dev); | |
2746 | } | |
2747 | ||
2748 | /* restore: called after loading the hibernation image. */ | |
2749 | static int i915_pm_restore_early(struct device *dev) | |
2750 | { | |
2751 | return i915_pm_resume_early(dev); | |
2752 | } | |
2753 | ||
2754 | static int i915_pm_restore(struct device *dev) | |
2755 | { | |
2756 | return i915_pm_resume(dev); | |
2757 | } | |
2758 | ||
ddeea5b0 ID |
2759 | /* |
2760 | * Save all Gunit registers that may be lost after a D3 and a subsequent | |
2761 | * S0i[R123] transition. The list of registers needing a save/restore is | |
2762 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit | |
2763 | * registers in the following way: | |
2764 | * - Driver: saved/restored by the driver | |
2765 | * - Punit : saved/restored by the Punit firmware | |
2766 | * - No, w/o marking: no need to save/restore, since the register is R/O or | |
2767 | * used internally by the HW in a way that doesn't depend | |
2768 | * keeping the content across a suspend/resume. | |
2769 | * - Debug : used for debugging | |
2770 | * | |
2771 | * We save/restore all registers marked with 'Driver', with the following | |
2772 | * exceptions: | |
2773 | * - Registers out of use, including also registers marked with 'Debug'. | |
2774 | * These have no effect on the driver's operation, so we don't save/restore | |
2775 | * them to reduce the overhead. | |
2776 | * - Registers that are fully setup by an initialization function called from | |
2777 | * the resume path. For example many clock gating and RPS/RC6 registers. | |
2778 | * - Registers that provide the right functionality with their reset defaults. | |
2779 | * | |
2780 | * TODO: Except for registers that based on the above 3 criteria can be safely | |
2781 | * ignored, we save/restore all others, practically treating the HW context as | |
2782 | * a black-box for the driver. Further investigation is needed to reduce the | |
2783 | * saved/restored registers even further, by following the same 3 criteria. | |
2784 | */ | |
2785 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2786 | { | |
2787 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2788 | int i; | |
2789 | ||
2790 | /* GAM 0x4000-0x4770 */ | |
2791 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); | |
2792 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); | |
2793 | s->arb_mode = I915_READ(ARB_MODE); | |
2794 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); | |
2795 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); | |
2796 | ||
2797 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2798 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
ddeea5b0 ID |
2799 | |
2800 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); | |
b5f1c97f | 2801 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
ddeea5b0 ID |
2802 | |
2803 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); | |
2804 | s->ecochk = I915_READ(GAM_ECOCHK); | |
2805 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); | |
2806 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); | |
2807 | ||
2808 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); | |
2809 | ||
2810 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2811 | s->g3dctl = I915_READ(VLV_G3DCTL); | |
2812 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); | |
2813 | s->mbctl = I915_READ(GEN6_MBCTL); | |
2814 | ||
2815 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2816 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); | |
2817 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); | |
2818 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); | |
2819 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); | |
2820 | s->rstctl = I915_READ(GEN6_RSTCTL); | |
2821 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); | |
2822 | ||
2823 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2824 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); | |
2825 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); | |
2826 | s->rpdeuc = I915_READ(GEN6_RPDEUC); | |
2827 | s->ecobus = I915_READ(ECOBUS); | |
2828 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); | |
2829 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); | |
2830 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); | |
2831 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); | |
2832 | s->rcedata = I915_READ(VLV_RCEDATA); | |
2833 | s->spare2gh = I915_READ(VLV_SPAREG2H); | |
2834 | ||
2835 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2836 | s->gt_imr = I915_READ(GTIMR); | |
2837 | s->gt_ier = I915_READ(GTIER); | |
2838 | s->pm_imr = I915_READ(GEN6_PMIMR); | |
2839 | s->pm_ier = I915_READ(GEN6_PMIER); | |
2840 | ||
2841 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2842 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
ddeea5b0 ID |
2843 | |
2844 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2845 | s->tilectl = I915_READ(TILECTL); | |
2846 | s->gt_fifoctl = I915_READ(GTFIFOCTL); | |
2847 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2848 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2849 | s->pmwgicz = I915_READ(VLV_PMWGICZ); | |
2850 | ||
2851 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2852 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | |
2853 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | |
9c25210f | 2854 | s->pcbr = I915_READ(VLV_PCBR); |
ddeea5b0 ID |
2855 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
2856 | ||
2857 | /* | |
2858 | * Not saving any of: | |
2859 | * DFT, 0x9800-0x9EC0 | |
2860 | * SARB, 0xB000-0xB1FC | |
2861 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 | |
2862 | * PCI CFG | |
2863 | */ | |
2864 | } | |
2865 | ||
2866 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2867 | { | |
2868 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2869 | u32 val; | |
2870 | int i; | |
2871 | ||
2872 | /* GAM 0x4000-0x4770 */ | |
2873 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); | |
2874 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); | |
2875 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); | |
2876 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); | |
2877 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); | |
2878 | ||
2879 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2880 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
ddeea5b0 ID |
2881 | |
2882 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); | |
b5f1c97f | 2883 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
ddeea5b0 ID |
2884 | |
2885 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); | |
2886 | I915_WRITE(GAM_ECOCHK, s->ecochk); | |
2887 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); | |
2888 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); | |
2889 | ||
2890 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); | |
2891 | ||
2892 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2893 | I915_WRITE(VLV_G3DCTL, s->g3dctl); | |
2894 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); | |
2895 | I915_WRITE(GEN6_MBCTL, s->mbctl); | |
2896 | ||
2897 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2898 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); | |
2899 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); | |
2900 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); | |
2901 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); | |
2902 | I915_WRITE(GEN6_RSTCTL, s->rstctl); | |
2903 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); | |
2904 | ||
2905 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2906 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); | |
2907 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); | |
2908 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); | |
2909 | I915_WRITE(ECOBUS, s->ecobus); | |
2910 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); | |
2911 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); | |
2912 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); | |
2913 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); | |
2914 | I915_WRITE(VLV_RCEDATA, s->rcedata); | |
2915 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); | |
2916 | ||
2917 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2918 | I915_WRITE(GTIMR, s->gt_imr); | |
2919 | I915_WRITE(GTIER, s->gt_ier); | |
2920 | I915_WRITE(GEN6_PMIMR, s->pm_imr); | |
2921 | I915_WRITE(GEN6_PMIER, s->pm_ier); | |
2922 | ||
2923 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2924 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
ddeea5b0 ID |
2925 | |
2926 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2927 | I915_WRITE(TILECTL, s->tilectl); | |
2928 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); | |
2929 | /* | |
2930 | * Preserve the GT allow wake and GFX force clock bit, they are not | |
2931 | * be restored, as they are used to control the s0ix suspend/resume | |
2932 | * sequence by the caller. | |
2933 | */ | |
2934 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2935 | val &= VLV_GTLC_ALLOWWAKEREQ; | |
2936 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; | |
2937 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2938 | ||
2939 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2940 | val &= VLV_GFX_CLK_FORCE_ON_BIT; | |
2941 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2942 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2943 | ||
2944 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); | |
2945 | ||
2946 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2947 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | |
2948 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | |
9c25210f | 2949 | I915_WRITE(VLV_PCBR, s->pcbr); |
ddeea5b0 ID |
2950 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
2951 | } | |
2952 | ||
650ad970 ID |
2953 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
2954 | { | |
2955 | u32 val; | |
2956 | int err; | |
2957 | ||
650ad970 | 2958 | #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) |
650ad970 ID |
2959 | |
2960 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2961 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2962 | if (force_on) | |
2963 | val |= VLV_GFX_CLK_FORCE_ON_BIT; | |
2964 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2965 | ||
2966 | if (!force_on) | |
2967 | return 0; | |
2968 | ||
8d4eee9c | 2969 | err = wait_for(COND, 20); |
650ad970 ID |
2970 | if (err) |
2971 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", | |
2972 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | |
2973 | ||
2974 | return err; | |
2975 | #undef COND | |
2976 | } | |
2977 | ||
ddeea5b0 ID |
2978 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
2979 | { | |
2980 | u32 val; | |
2981 | int err = 0; | |
2982 | ||
2983 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2984 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | |
2985 | if (allow) | |
2986 | val |= VLV_GTLC_ALLOWWAKEREQ; | |
2987 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2988 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | |
2989 | ||
2990 | #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ | |
2991 | allow) | |
2992 | err = wait_for(COND, 1); | |
2993 | if (err) | |
2994 | DRM_ERROR("timeout disabling GT waking\n"); | |
2995 | return err; | |
2996 | #undef COND | |
2997 | } | |
2998 | ||
2999 | static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, | |
3000 | bool wait_for_on) | |
3001 | { | |
3002 | u32 mask; | |
3003 | u32 val; | |
3004 | int err; | |
3005 | ||
3006 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | |
3007 | val = wait_for_on ? mask : 0; | |
3008 | #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) | |
3009 | if (COND) | |
3010 | return 0; | |
3011 | ||
3012 | DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", | |
87ad3212 JN |
3013 | onoff(wait_for_on), |
3014 | I915_READ(VLV_GTLC_PW_STATUS)); | |
ddeea5b0 ID |
3015 | |
3016 | /* | |
3017 | * RC6 transitioning can be delayed up to 2 msec (see | |
3018 | * valleyview_enable_rps), use 3 msec for safety. | |
3019 | */ | |
3020 | err = wait_for(COND, 3); | |
3021 | if (err) | |
3022 | DRM_ERROR("timeout waiting for GT wells to go %s\n", | |
87ad3212 | 3023 | onoff(wait_for_on)); |
ddeea5b0 ID |
3024 | |
3025 | return err; | |
3026 | #undef COND | |
3027 | } | |
3028 | ||
3029 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | |
3030 | { | |
3031 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) | |
3032 | return; | |
3033 | ||
6fa283b0 | 3034 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
ddeea5b0 ID |
3035 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
3036 | } | |
3037 | ||
ebc32824 | 3038 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
ddeea5b0 ID |
3039 | { |
3040 | u32 mask; | |
3041 | int err; | |
3042 | ||
3043 | /* | |
3044 | * Bspec defines the following GT well on flags as debug only, so | |
3045 | * don't treat them as hard failures. | |
3046 | */ | |
3047 | (void)vlv_wait_for_gt_wells(dev_priv, false); | |
3048 | ||
3049 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | |
3050 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | |
3051 | ||
3052 | vlv_check_no_gt_access(dev_priv); | |
3053 | ||
3054 | err = vlv_force_gfx_clock(dev_priv, true); | |
3055 | if (err) | |
3056 | goto err1; | |
3057 | ||
3058 | err = vlv_allow_gt_wake(dev_priv, false); | |
3059 | if (err) | |
3060 | goto err2; | |
98711167 | 3061 | |
2d1fe073 | 3062 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 3063 | vlv_save_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
3064 | |
3065 | err = vlv_force_gfx_clock(dev_priv, false); | |
3066 | if (err) | |
3067 | goto err2; | |
3068 | ||
3069 | return 0; | |
3070 | ||
3071 | err2: | |
3072 | /* For safety always re-enable waking and disable gfx clock forcing */ | |
3073 | vlv_allow_gt_wake(dev_priv, true); | |
3074 | err1: | |
3075 | vlv_force_gfx_clock(dev_priv, false); | |
3076 | ||
3077 | return err; | |
3078 | } | |
3079 | ||
016970be SK |
3080 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
3081 | bool rpm_resume) | |
ddeea5b0 ID |
3082 | { |
3083 | struct drm_device *dev = dev_priv->dev; | |
3084 | int err; | |
3085 | int ret; | |
3086 | ||
3087 | /* | |
3088 | * If any of the steps fail just try to continue, that's the best we | |
3089 | * can do at this point. Return the first error code (which will also | |
3090 | * leave RPM permanently disabled). | |
3091 | */ | |
3092 | ret = vlv_force_gfx_clock(dev_priv, true); | |
3093 | ||
2d1fe073 | 3094 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 3095 | vlv_restore_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
3096 | |
3097 | err = vlv_allow_gt_wake(dev_priv, true); | |
3098 | if (!ret) | |
3099 | ret = err; | |
3100 | ||
3101 | err = vlv_force_gfx_clock(dev_priv, false); | |
3102 | if (!ret) | |
3103 | ret = err; | |
3104 | ||
3105 | vlv_check_no_gt_access(dev_priv); | |
3106 | ||
016970be SK |
3107 | if (rpm_resume) { |
3108 | intel_init_clock_gating(dev); | |
3109 | i915_gem_restore_fences(dev); | |
3110 | } | |
ddeea5b0 ID |
3111 | |
3112 | return ret; | |
3113 | } | |
3114 | ||
97bea207 | 3115 | static int intel_runtime_suspend(struct device *device) |
8a187455 PZ |
3116 | { |
3117 | struct pci_dev *pdev = to_pci_dev(device); | |
3118 | struct drm_device *dev = pci_get_drvdata(pdev); | |
3119 | struct drm_i915_private *dev_priv = dev->dev_private; | |
0ab9cfeb | 3120 | int ret; |
8a187455 | 3121 | |
dc97997a | 3122 | if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) |
c6df39b5 ID |
3123 | return -ENODEV; |
3124 | ||
604effb7 ID |
3125 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
3126 | return -ENODEV; | |
3127 | ||
8a187455 PZ |
3128 | DRM_DEBUG_KMS("Suspending device\n"); |
3129 | ||
d6102977 ID |
3130 | /* |
3131 | * We could deadlock here in case another thread holding struct_mutex | |
3132 | * calls RPM suspend concurrently, since the RPM suspend will wait | |
3133 | * first for this RPM suspend to finish. In this case the concurrent | |
3134 | * RPM resume will be followed by its RPM suspend counterpart. Still | |
3135 | * for consistency return -EAGAIN, which will reschedule this suspend. | |
3136 | */ | |
3137 | if (!mutex_trylock(&dev->struct_mutex)) { | |
3138 | DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); | |
3139 | /* | |
3140 | * Bump the expiration timestamp, otherwise the suspend won't | |
3141 | * be rescheduled. | |
3142 | */ | |
3143 | pm_runtime_mark_last_busy(device); | |
3144 | ||
3145 | return -EAGAIN; | |
3146 | } | |
1f814dac ID |
3147 | |
3148 | disable_rpm_wakeref_asserts(dev_priv); | |
3149 | ||
d6102977 ID |
3150 | /* |
3151 | * We are safe here against re-faults, since the fault handler takes | |
3152 | * an RPM reference. | |
3153 | */ | |
3154 | i915_gem_release_all_mmaps(dev_priv); | |
3155 | mutex_unlock(&dev->struct_mutex); | |
3156 | ||
825f2728 JL |
3157 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
3158 | ||
a1c41994 AD |
3159 | intel_guc_suspend(dev); |
3160 | ||
dc97997a | 3161 | intel_suspend_gt_powersave(dev_priv); |
2eb5252e | 3162 | intel_runtime_pm_disable_interrupts(dev_priv); |
b5478bcd | 3163 | |
507e126e ID |
3164 | ret = 0; |
3165 | if (IS_BROXTON(dev_priv)) { | |
3166 | bxt_display_core_uninit(dev_priv); | |
3167 | bxt_enable_dc9(dev_priv); | |
3168 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | |
3169 | hsw_enable_pc8(dev_priv); | |
3170 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | |
3171 | ret = vlv_suspend_complete(dev_priv); | |
3172 | } | |
3173 | ||
0ab9cfeb ID |
3174 | if (ret) { |
3175 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | |
b963291c | 3176 | intel_runtime_pm_enable_interrupts(dev_priv); |
0ab9cfeb | 3177 | |
1f814dac ID |
3178 | enable_rpm_wakeref_asserts(dev_priv); |
3179 | ||
0ab9cfeb ID |
3180 | return ret; |
3181 | } | |
a8a8bd54 | 3182 | |
dc97997a | 3183 | intel_uncore_forcewake_reset(dev_priv, false); |
1f814dac ID |
3184 | |
3185 | enable_rpm_wakeref_asserts(dev_priv); | |
3186 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); | |
55ec45c2 | 3187 | |
bc3b9346 | 3188 | if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
55ec45c2 MK |
3189 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
3190 | ||
8a187455 | 3191 | dev_priv->pm.suspended = true; |
1fb2362b KCA |
3192 | |
3193 | /* | |
c8a0bd42 PZ |
3194 | * FIXME: We really should find a document that references the arguments |
3195 | * used below! | |
1fb2362b | 3196 | */ |
6f9f4b7a | 3197 | if (IS_BROADWELL(dev_priv)) { |
d37ae19a PZ |
3198 | /* |
3199 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
3200 | * being detected, and the call we do at intel_runtime_resume() | |
3201 | * won't be able to restore them. Since PCI_D3hot matches the | |
3202 | * actual specification and appears to be working, use it. | |
3203 | */ | |
6f9f4b7a | 3204 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
d37ae19a | 3205 | } else { |
c8a0bd42 PZ |
3206 | /* |
3207 | * current versions of firmware which depend on this opregion | |
3208 | * notification have repurposed the D1 definition to mean | |
3209 | * "runtime suspended" vs. what you would normally expect (D3) | |
3210 | * to distinguish it from notifications that might be sent via | |
3211 | * the suspend path. | |
3212 | */ | |
6f9f4b7a | 3213 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
c8a0bd42 | 3214 | } |
8a187455 | 3215 | |
59bad947 | 3216 | assert_forcewakes_inactive(dev_priv); |
dc9fb09c | 3217 | |
a8a8bd54 | 3218 | DRM_DEBUG_KMS("Device suspended\n"); |
8a187455 PZ |
3219 | return 0; |
3220 | } | |
3221 | ||
97bea207 | 3222 | static int intel_runtime_resume(struct device *device) |
8a187455 PZ |
3223 | { |
3224 | struct pci_dev *pdev = to_pci_dev(device); | |
3225 | struct drm_device *dev = pci_get_drvdata(pdev); | |
3226 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1a5df187 | 3227 | int ret = 0; |
8a187455 | 3228 | |
604effb7 ID |
3229 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
3230 | return -ENODEV; | |
8a187455 PZ |
3231 | |
3232 | DRM_DEBUG_KMS("Resuming device\n"); | |
3233 | ||
1f814dac ID |
3234 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
3235 | disable_rpm_wakeref_asserts(dev_priv); | |
3236 | ||
6f9f4b7a | 3237 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
8a187455 | 3238 | dev_priv->pm.suspended = false; |
55ec45c2 MK |
3239 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
3240 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | |
8a187455 | 3241 | |
a1c41994 AD |
3242 | intel_guc_resume(dev); |
3243 | ||
1a5df187 PZ |
3244 | if (IS_GEN6(dev_priv)) |
3245 | intel_init_pch_refclk(dev); | |
31335cec | 3246 | |
507e126e ID |
3247 | if (IS_BROXTON(dev)) { |
3248 | bxt_disable_dc9(dev_priv); | |
3249 | bxt_display_core_init(dev_priv, true); | |
f62c79b3 ID |
3250 | if (dev_priv->csr.dmc_payload && |
3251 | (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) | |
3252 | gen9_enable_dc5(dev_priv); | |
507e126e | 3253 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
1a5df187 | 3254 | hsw_disable_pc8(dev_priv); |
507e126e | 3255 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1a5df187 | 3256 | ret = vlv_resume_prepare(dev_priv, true); |
507e126e | 3257 | } |
1a5df187 | 3258 | |
0ab9cfeb ID |
3259 | /* |
3260 | * No point of rolling back things in case of an error, as the best | |
3261 | * we can do is to hope that things will still work (and disable RPM). | |
3262 | */ | |
92b806d3 | 3263 | i915_gem_init_swizzling(dev); |
dc97997a | 3264 | gen6_update_ring_freq(dev_priv); |
92b806d3 | 3265 | |
b963291c | 3266 | intel_runtime_pm_enable_interrupts(dev_priv); |
08d8a232 VS |
3267 | |
3268 | /* | |
3269 | * On VLV/CHV display interrupts are part of the display | |
3270 | * power well, so hpd is reinitialized from there. For | |
3271 | * everyone else do it here. | |
3272 | */ | |
666a4537 | 3273 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
08d8a232 VS |
3274 | intel_hpd_init(dev_priv); |
3275 | ||
dc97997a | 3276 | intel_enable_gt_powersave(dev_priv); |
b5478bcd | 3277 | |
1f814dac ID |
3278 | enable_rpm_wakeref_asserts(dev_priv); |
3279 | ||
0ab9cfeb ID |
3280 | if (ret) |
3281 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | |
3282 | else | |
3283 | DRM_DEBUG_KMS("Device resumed\n"); | |
3284 | ||
3285 | return ret; | |
8a187455 PZ |
3286 | } |
3287 | ||
b4b78d12 | 3288 | static const struct dev_pm_ops i915_pm_ops = { |
5545dbbf ID |
3289 | /* |
3290 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
3291 | * PMSG_RESUME] | |
3292 | */ | |
0206e353 | 3293 | .suspend = i915_pm_suspend, |
76c4b250 ID |
3294 | .suspend_late = i915_pm_suspend_late, |
3295 | .resume_early = i915_pm_resume_early, | |
0206e353 | 3296 | .resume = i915_pm_resume, |
5545dbbf ID |
3297 | |
3298 | /* | |
3299 | * S4 event handlers | |
3300 | * @freeze, @freeze_late : called (1) before creating the | |
3301 | * hibernation image [PMSG_FREEZE] and | |
3302 | * (2) after rebooting, before restoring | |
3303 | * the image [PMSG_QUIESCE] | |
3304 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
3305 | * image, before writing it [PMSG_THAW] | |
3306 | * and (2) after failing to create or | |
3307 | * restore the image [PMSG_RECOVER] | |
3308 | * @poweroff, @poweroff_late: called after writing the hibernation | |
3309 | * image, before rebooting [PMSG_HIBERNATE] | |
3310 | * @restore, @restore_early : called after rebooting and restoring the | |
3311 | * hibernation image [PMSG_RESTORE] | |
3312 | */ | |
1f19ac2a CW |
3313 | .freeze = i915_pm_freeze, |
3314 | .freeze_late = i915_pm_freeze_late, | |
3315 | .thaw_early = i915_pm_thaw_early, | |
3316 | .thaw = i915_pm_thaw, | |
36d61e67 | 3317 | .poweroff = i915_pm_suspend, |
ab3be73f | 3318 | .poweroff_late = i915_pm_poweroff_late, |
1f19ac2a CW |
3319 | .restore_early = i915_pm_restore_early, |
3320 | .restore = i915_pm_restore, | |
5545dbbf ID |
3321 | |
3322 | /* S0ix (via runtime suspend) event handlers */ | |
97bea207 PZ |
3323 | .runtime_suspend = intel_runtime_suspend, |
3324 | .runtime_resume = intel_runtime_resume, | |
cbda12d7 ZW |
3325 | }; |
3326 | ||
78b68556 | 3327 | static const struct vm_operations_struct i915_gem_vm_ops = { |
de151cf6 | 3328 | .fault = i915_gem_fault, |
ab00b3e5 JB |
3329 | .open = drm_gem_vm_open, |
3330 | .close = drm_gem_vm_close, | |
de151cf6 JB |
3331 | }; |
3332 | ||
e08e96de AV |
3333 | static const struct file_operations i915_driver_fops = { |
3334 | .owner = THIS_MODULE, | |
3335 | .open = drm_open, | |
3336 | .release = drm_release, | |
3337 | .unlocked_ioctl = drm_ioctl, | |
3338 | .mmap = drm_gem_mmap, | |
3339 | .poll = drm_poll, | |
e08e96de AV |
3340 | .read = drm_read, |
3341 | #ifdef CONFIG_COMPAT | |
3342 | .compat_ioctl = i915_compat_ioctl, | |
3343 | #endif | |
3344 | .llseek = noop_llseek, | |
3345 | }; | |
3346 | ||
0673ad47 CW |
3347 | static int |
3348 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | |
3349 | struct drm_file *file) | |
3350 | { | |
3351 | return -ENODEV; | |
3352 | } | |
3353 | ||
3354 | static const struct drm_ioctl_desc i915_ioctls[] = { | |
3355 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3356 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | |
3357 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), | |
3358 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), | |
3359 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), | |
3360 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), | |
3361 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), | |
3362 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3363 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
3364 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
3365 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3366 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), | |
3367 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3368 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3369 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), | |
3370 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), | |
3371 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3372 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3373 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | |
3374 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), | |
3375 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
3376 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
3377 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
3378 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), | |
3379 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), | |
3380 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
3381 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3382 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
3383 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), | |
3384 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), | |
3385 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), | |
3386 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), | |
3387 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), | |
3388 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), | |
3389 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), | |
3390 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), | |
3391 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), | |
3392 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), | |
3393 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | |
3394 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), | |
3395 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
3396 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
3397 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), | |
3398 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), | |
3399 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
3400 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), | |
3401 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), | |
3402 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), | |
3403 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), | |
3404 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | |
3405 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | |
3406 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | |
3407 | }; | |
3408 | ||
1da177e4 | 3409 | static struct drm_driver driver = { |
0c54781b MW |
3410 | /* Don't use MTRRs here; the Xserver or userspace app should |
3411 | * deal with them for Intel hardware. | |
792d2b9a | 3412 | */ |
673a394b | 3413 | .driver_features = |
10ba5012 | 3414 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
1751fcf9 | 3415 | DRIVER_RENDER | DRIVER_MODESET, |
673a394b | 3416 | .open = i915_driver_open, |
22eae947 DA |
3417 | .lastclose = i915_driver_lastclose, |
3418 | .preclose = i915_driver_preclose, | |
673a394b | 3419 | .postclose = i915_driver_postclose, |
915b4d11 | 3420 | .set_busid = drm_pci_set_busid, |
d8e29209 | 3421 | |
673a394b | 3422 | .gem_free_object = i915_gem_free_object, |
de151cf6 | 3423 | .gem_vm_ops = &i915_gem_vm_ops, |
1286ff73 DV |
3424 | |
3425 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
3426 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
3427 | .gem_prime_export = i915_gem_prime_export, | |
3428 | .gem_prime_import = i915_gem_prime_import, | |
3429 | ||
ff72145b | 3430 | .dumb_create = i915_gem_dumb_create, |
da6b51d0 | 3431 | .dumb_map_offset = i915_gem_mmap_gtt, |
43387b37 | 3432 | .dumb_destroy = drm_gem_dumb_destroy, |
1da177e4 | 3433 | .ioctls = i915_ioctls, |
0673ad47 | 3434 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
e08e96de | 3435 | .fops = &i915_driver_fops, |
22eae947 DA |
3436 | .name = DRIVER_NAME, |
3437 | .desc = DRIVER_DESC, | |
3438 | .date = DRIVER_DATE, | |
3439 | .major = DRIVER_MAJOR, | |
3440 | .minor = DRIVER_MINOR, | |
3441 | .patchlevel = DRIVER_PATCHLEVEL, | |
1da177e4 LT |
3442 | }; |
3443 | ||
8410ea3b DA |
3444 | static struct pci_driver i915_pci_driver = { |
3445 | .name = DRIVER_NAME, | |
3446 | .id_table = pciidlist, | |
3447 | .probe = i915_pci_probe, | |
3448 | .remove = i915_pci_remove, | |
3449 | .driver.pm = &i915_pm_ops, | |
3450 | }; | |
3451 | ||
1da177e4 LT |
3452 | static int __init i915_init(void) |
3453 | { | |
79e53945 | 3454 | /* |
fd930478 CW |
3455 | * Enable KMS by default, unless explicitly overriden by |
3456 | * either the i915.modeset prarameter or by the | |
3457 | * vga_text_mode_force boot option. | |
79e53945 | 3458 | */ |
fd930478 CW |
3459 | |
3460 | if (i915.modeset == 0) | |
3461 | driver.driver_features &= ~DRIVER_MODESET; | |
79e53945 | 3462 | |
d330a953 | 3463 | if (vgacon_text_force() && i915.modeset == -1) |
79e53945 | 3464 | driver.driver_features &= ~DRIVER_MODESET; |
79e53945 | 3465 | |
b30324ad | 3466 | if (!(driver.driver_features & DRIVER_MODESET)) { |
b30324ad | 3467 | /* Silently fail loading to not upset userspace. */ |
c2dac868 | 3468 | DRM_DEBUG_DRIVER("KMS disabled.\n"); |
b30324ad | 3469 | return 0; |
b30324ad | 3470 | } |
3885c6bb | 3471 | |
c5b852f3 | 3472 | if (i915.nuclear_pageflip) |
b2e7723b MR |
3473 | driver.driver_features |= DRIVER_ATOMIC; |
3474 | ||
8410ea3b | 3475 | return drm_pci_init(&driver, &i915_pci_driver); |
1da177e4 LT |
3476 | } |
3477 | ||
3478 | static void __exit i915_exit(void) | |
3479 | { | |
b33ecdd1 DV |
3480 | if (!(driver.driver_features & DRIVER_MODESET)) |
3481 | return; /* Never loaded a driver. */ | |
b33ecdd1 | 3482 | |
8410ea3b | 3483 | drm_pci_exit(&driver, &i915_pci_driver); |
1da177e4 LT |
3484 | } |
3485 | ||
3486 | module_init(i915_init); | |
3487 | module_exit(i915_exit); | |
3488 | ||
0a6d1631 | 3489 | MODULE_AUTHOR("Tungsten Graphics, Inc."); |
1eab9234 | 3490 | MODULE_AUTHOR("Intel Corporation"); |
0a6d1631 | 3491 | |
b5e89ed5 | 3492 | MODULE_DESCRIPTION(DRIVER_DESC); |
1da177e4 | 3493 | MODULE_LICENSE("GPL and additional rights"); |