]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/i915/i915_drv.c
drm/i915/kbl: Add Kabylake PCI ID
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / i915 / i915_drv.c
CommitLineData
1da177e4
LT
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4 29
5669fcac 30#include <linux/device.h>
e5747e3a 31#include <linux/acpi.h>
760285e7
DH
32#include <drm/drmP.h>
33#include <drm/i915_drm.h>
1da177e4 34#include "i915_drv.h"
990bbdad 35#include "i915_trace.h"
f49f0586 36#include "intel_drv.h"
1da177e4 37
79e53945 38#include <linux/console.h>
e0cd3608 39#include <linux/module.h>
d6102977 40#include <linux/pm_runtime.h>
760285e7 41#include <drm/drm_crtc_helper.h>
79e53945 42
112b715e
KH
43static struct drm_driver driver;
44
a57c774a
AK
45#define GEN_DEFAULT_PIPEOFFSETS \
46 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
47 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
48 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
49 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
a57c774a
AK
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
84fd4f4e
RB
52#define GEN_CHV_PIPEOFFSETS \
53 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
54 CHV_PIPE_C_OFFSET }, \
55 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
56 CHV_TRANSCODER_C_OFFSET, }, \
84fd4f4e
RB
57 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
58 CHV_PALETTE_C_OFFSET }
a57c774a 59
5efb3e28
VS
60#define CURSOR_OFFSETS \
61 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
62
63#define IVB_CURSOR_OFFSETS \
64 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
65
9a7e8492 66static const struct intel_device_info intel_i830_info = {
7eb552ae 67 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
31578148 68 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 69 .ring_mask = RENDER_RING,
a57c774a 70 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 71 CURSOR_OFFSETS,
cfdf1fa2
KH
72};
73
9a7e8492 74static const struct intel_device_info intel_845g_info = {
7eb552ae 75 .gen = 2, .num_pipes = 1,
31578148 76 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 77 .ring_mask = RENDER_RING,
a57c774a 78 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 79 CURSOR_OFFSETS,
cfdf1fa2
KH
80};
81
9a7e8492 82static const struct intel_device_info intel_i85x_info = {
7eb552ae 83 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
5ce8ba7c 84 .cursor_needs_physical = 1,
31578148 85 .has_overlay = 1, .overlay_needs_physical = 1,
fd70d52a 86 .has_fbc = 1,
73ae478c 87 .ring_mask = RENDER_RING,
a57c774a 88 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 89 CURSOR_OFFSETS,
cfdf1fa2
KH
90};
91
9a7e8492 92static const struct intel_device_info intel_i865g_info = {
7eb552ae 93 .gen = 2, .num_pipes = 1,
31578148 94 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 95 .ring_mask = RENDER_RING,
a57c774a 96 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 97 CURSOR_OFFSETS,
cfdf1fa2
KH
98};
99
9a7e8492 100static const struct intel_device_info intel_i915g_info = {
7eb552ae 101 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
31578148 102 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 103 .ring_mask = RENDER_RING,
a57c774a 104 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 105 CURSOR_OFFSETS,
cfdf1fa2 106};
9a7e8492 107static const struct intel_device_info intel_i915gm_info = {
7eb552ae 108 .gen = 3, .is_mobile = 1, .num_pipes = 2,
b295d1b6 109 .cursor_needs_physical = 1,
31578148 110 .has_overlay = 1, .overlay_needs_physical = 1,
a6c45cf0 111 .supports_tv = 1,
fd70d52a 112 .has_fbc = 1,
73ae478c 113 .ring_mask = RENDER_RING,
a57c774a 114 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 115 CURSOR_OFFSETS,
cfdf1fa2 116};
9a7e8492 117static const struct intel_device_info intel_i945g_info = {
7eb552ae 118 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
31578148 119 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 120 .ring_mask = RENDER_RING,
a57c774a 121 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 122 CURSOR_OFFSETS,
cfdf1fa2 123};
9a7e8492 124static const struct intel_device_info intel_i945gm_info = {
7eb552ae 125 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
b295d1b6 126 .has_hotplug = 1, .cursor_needs_physical = 1,
31578148 127 .has_overlay = 1, .overlay_needs_physical = 1,
a6c45cf0 128 .supports_tv = 1,
fd70d52a 129 .has_fbc = 1,
73ae478c 130 .ring_mask = RENDER_RING,
a57c774a 131 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 132 CURSOR_OFFSETS,
cfdf1fa2
KH
133};
134
9a7e8492 135static const struct intel_device_info intel_i965g_info = {
7eb552ae 136 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
c96c3a8c 137 .has_hotplug = 1,
31578148 138 .has_overlay = 1,
73ae478c 139 .ring_mask = RENDER_RING,
a57c774a 140 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 141 CURSOR_OFFSETS,
cfdf1fa2
KH
142};
143
9a7e8492 144static const struct intel_device_info intel_i965gm_info = {
7eb552ae 145 .gen = 4, .is_crestline = 1, .num_pipes = 2,
e3c4e5dd 146 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
31578148 147 .has_overlay = 1,
a6c45cf0 148 .supports_tv = 1,
73ae478c 149 .ring_mask = RENDER_RING,
a57c774a 150 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 151 CURSOR_OFFSETS,
cfdf1fa2
KH
152};
153
9a7e8492 154static const struct intel_device_info intel_g33_info = {
7eb552ae 155 .gen = 3, .is_g33 = 1, .num_pipes = 2,
c96c3a8c 156 .need_gfx_hws = 1, .has_hotplug = 1,
31578148 157 .has_overlay = 1,
73ae478c 158 .ring_mask = RENDER_RING,
a57c774a 159 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 160 CURSOR_OFFSETS,
cfdf1fa2
KH
161};
162
9a7e8492 163static const struct intel_device_info intel_g45_info = {
7eb552ae 164 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
c96c3a8c 165 .has_pipe_cxsr = 1, .has_hotplug = 1,
73ae478c 166 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 167 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 168 CURSOR_OFFSETS,
cfdf1fa2
KH
169};
170
9a7e8492 171static const struct intel_device_info intel_gm45_info = {
7eb552ae 172 .gen = 4, .is_g4x = 1, .num_pipes = 2,
e3c4e5dd 173 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
c96c3a8c 174 .has_pipe_cxsr = 1, .has_hotplug = 1,
a6c45cf0 175 .supports_tv = 1,
73ae478c 176 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 177 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 178 CURSOR_OFFSETS,
cfdf1fa2
KH
179};
180
9a7e8492 181static const struct intel_device_info intel_pineview_info = {
7eb552ae 182 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
c96c3a8c 183 .need_gfx_hws = 1, .has_hotplug = 1,
31578148 184 .has_overlay = 1,
a57c774a 185 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 186 CURSOR_OFFSETS,
cfdf1fa2
KH
187};
188
9a7e8492 189static const struct intel_device_info intel_ironlake_d_info = {
7eb552ae 190 .gen = 5, .num_pipes = 2,
5a117db7 191 .need_gfx_hws = 1, .has_hotplug = 1,
73ae478c 192 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 193 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 194 CURSOR_OFFSETS,
cfdf1fa2
KH
195};
196
9a7e8492 197static const struct intel_device_info intel_ironlake_m_info = {
7eb552ae 198 .gen = 5, .is_mobile = 1, .num_pipes = 2,
e3c4e5dd 199 .need_gfx_hws = 1, .has_hotplug = 1,
c1a9f047 200 .has_fbc = 1,
73ae478c 201 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 202 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 203 CURSOR_OFFSETS,
cfdf1fa2
KH
204};
205
9a7e8492 206static const struct intel_device_info intel_sandybridge_d_info = {
7eb552ae 207 .gen = 6, .num_pipes = 2,
c96c3a8c 208 .need_gfx_hws = 1, .has_hotplug = 1,
cbaef0f1 209 .has_fbc = 1,
73ae478c 210 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3d29b842 211 .has_llc = 1,
a57c774a 212 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 213 CURSOR_OFFSETS,
f6e450a6
EA
214};
215
9a7e8492 216static const struct intel_device_info intel_sandybridge_m_info = {
7eb552ae 217 .gen = 6, .is_mobile = 1, .num_pipes = 2,
c96c3a8c 218 .need_gfx_hws = 1, .has_hotplug = 1,
9c04f015 219 .has_fbc = 1,
73ae478c 220 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3d29b842 221 .has_llc = 1,
a57c774a 222 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 223 CURSOR_OFFSETS,
a13e4093
EA
224};
225
219f4fdb
BW
226#define GEN7_FEATURES \
227 .gen = 7, .num_pipes = 3, \
228 .need_gfx_hws = 1, .has_hotplug = 1, \
cbaef0f1 229 .has_fbc = 1, \
73ae478c 230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
ab484f8f 231 .has_llc = 1
219f4fdb 232
c76b615c 233static const struct intel_device_info intel_ivybridge_d_info = {
219f4fdb
BW
234 GEN7_FEATURES,
235 .is_ivybridge = 1,
a57c774a 236 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 237 IVB_CURSOR_OFFSETS,
c76b615c
JB
238};
239
240static const struct intel_device_info intel_ivybridge_m_info = {
219f4fdb
BW
241 GEN7_FEATURES,
242 .is_ivybridge = 1,
243 .is_mobile = 1,
a57c774a 244 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 245 IVB_CURSOR_OFFSETS,
c76b615c
JB
246};
247
999bcdea
BW
248static const struct intel_device_info intel_ivybridge_q_info = {
249 GEN7_FEATURES,
250 .is_ivybridge = 1,
251 .num_pipes = 0, /* legal, last one wins */
a57c774a 252 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 253 IVB_CURSOR_OFFSETS,
999bcdea
BW
254};
255
70a3eb7a 256static const struct intel_device_info intel_valleyview_m_info = {
219f4fdb
BW
257 GEN7_FEATURES,
258 .is_mobile = 1,
259 .num_pipes = 2,
70a3eb7a 260 .is_valleyview = 1,
fba5d532 261 .display_mmio_offset = VLV_DISPLAY_BASE,
cbaef0f1 262 .has_fbc = 0, /* legal, last one wins */
30ccd964 263 .has_llc = 0, /* legal, last one wins */
a57c774a 264 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 265 CURSOR_OFFSETS,
70a3eb7a
JB
266};
267
268static const struct intel_device_info intel_valleyview_d_info = {
219f4fdb
BW
269 GEN7_FEATURES,
270 .num_pipes = 2,
70a3eb7a 271 .is_valleyview = 1,
fba5d532 272 .display_mmio_offset = VLV_DISPLAY_BASE,
cbaef0f1 273 .has_fbc = 0, /* legal, last one wins */
30ccd964 274 .has_llc = 0, /* legal, last one wins */
a57c774a 275 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 276 CURSOR_OFFSETS,
70a3eb7a
JB
277};
278
4cae9ae0 279static const struct intel_device_info intel_haswell_d_info = {
219f4fdb
BW
280 GEN7_FEATURES,
281 .is_haswell = 1,
dd93be58 282 .has_ddi = 1,
30568c45 283 .has_fpga_dbg = 1,
73ae478c 284 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
a57c774a 285 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 286 IVB_CURSOR_OFFSETS,
4cae9ae0
ED
287};
288
289static const struct intel_device_info intel_haswell_m_info = {
219f4fdb
BW
290 GEN7_FEATURES,
291 .is_haswell = 1,
292 .is_mobile = 1,
dd93be58 293 .has_ddi = 1,
30568c45 294 .has_fpga_dbg = 1,
73ae478c 295 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
a57c774a 296 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 297 IVB_CURSOR_OFFSETS,
c76b615c
JB
298};
299
4d4dead6 300static const struct intel_device_info intel_broadwell_d_info = {
4b30553d 301 .gen = 8, .num_pipes = 3,
4d4dead6
BW
302 .need_gfx_hws = 1, .has_hotplug = 1,
303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
304 .has_llc = 1,
305 .has_ddi = 1,
66bc2cab 306 .has_fpga_dbg = 1,
8f94d24b 307 .has_fbc = 1,
a57c774a 308 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 309 IVB_CURSOR_OFFSETS,
4d4dead6
BW
310};
311
312static const struct intel_device_info intel_broadwell_m_info = {
4b30553d 313 .gen = 8, .is_mobile = 1, .num_pipes = 3,
4d4dead6
BW
314 .need_gfx_hws = 1, .has_hotplug = 1,
315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
316 .has_llc = 1,
317 .has_ddi = 1,
66bc2cab 318 .has_fpga_dbg = 1,
8f94d24b 319 .has_fbc = 1,
a57c774a 320 GEN_DEFAULT_PIPEOFFSETS,
15d24aa5 321 IVB_CURSOR_OFFSETS,
4d4dead6
BW
322};
323
fd3c269f
ZY
324static const struct intel_device_info intel_broadwell_gt3d_info = {
325 .gen = 8, .num_pipes = 3,
326 .need_gfx_hws = 1, .has_hotplug = 1,
845f74a7 327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
fd3c269f
ZY
328 .has_llc = 1,
329 .has_ddi = 1,
66bc2cab 330 .has_fpga_dbg = 1,
fd3c269f
ZY
331 .has_fbc = 1,
332 GEN_DEFAULT_PIPEOFFSETS,
15d24aa5 333 IVB_CURSOR_OFFSETS,
fd3c269f
ZY
334};
335
336static const struct intel_device_info intel_broadwell_gt3m_info = {
337 .gen = 8, .is_mobile = 1, .num_pipes = 3,
338 .need_gfx_hws = 1, .has_hotplug = 1,
845f74a7 339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
fd3c269f
ZY
340 .has_llc = 1,
341 .has_ddi = 1,
66bc2cab 342 .has_fpga_dbg = 1,
fd3c269f
ZY
343 .has_fbc = 1,
344 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 345 IVB_CURSOR_OFFSETS,
fd3c269f
ZY
346};
347
7d87a7f7 348static const struct intel_device_info intel_cherryview_info = {
07fddb14 349 .gen = 8, .num_pipes = 3,
7d87a7f7
VS
350 .need_gfx_hws = 1, .has_hotplug = 1,
351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
352 .is_valleyview = 1,
353 .display_mmio_offset = VLV_DISPLAY_BASE,
84fd4f4e 354 GEN_CHV_PIPEOFFSETS,
5efb3e28 355 CURSOR_OFFSETS,
7d87a7f7
VS
356};
357
72bbf0af 358static const struct intel_device_info intel_skylake_info = {
7201c0b3 359 .is_skylake = 1,
72bbf0af
DL
360 .gen = 9, .num_pipes = 3,
361 .need_gfx_hws = 1, .has_hotplug = 1,
362 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
363 .has_llc = 1,
364 .has_ddi = 1,
6c908bf4 365 .has_fpga_dbg = 1,
043efb11 366 .has_fbc = 1,
72bbf0af
DL
367 GEN_DEFAULT_PIPEOFFSETS,
368 IVB_CURSOR_OFFSETS,
369};
370
719388e1 371static const struct intel_device_info intel_skylake_gt3_info = {
719388e1
DL
372 .is_skylake = 1,
373 .gen = 9, .num_pipes = 3,
374 .need_gfx_hws = 1, .has_hotplug = 1,
375 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
376 .has_llc = 1,
377 .has_ddi = 1,
6c908bf4 378 .has_fpga_dbg = 1,
719388e1
DL
379 .has_fbc = 1,
380 GEN_DEFAULT_PIPEOFFSETS,
381 IVB_CURSOR_OFFSETS,
382};
383
1347f5b4
DL
384static const struct intel_device_info intel_broxton_info = {
385 .is_preliminary = 1,
7526ac19 386 .is_broxton = 1,
1347f5b4
DL
387 .gen = 9,
388 .need_gfx_hws = 1, .has_hotplug = 1,
389 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
390 .num_pipes = 3,
391 .has_ddi = 1,
6c908bf4 392 .has_fpga_dbg = 1,
ce89db2e 393 .has_fbc = 1,
1347f5b4
DL
394 GEN_DEFAULT_PIPEOFFSETS,
395 IVB_CURSOR_OFFSETS,
396};
397
ef11bdb3
RV
398static const struct intel_device_info intel_kabylake_info = {
399 .is_preliminary = 1,
400 .is_kabylake = 1,
401 .gen = 9,
402 .num_pipes = 3,
403 .need_gfx_hws = 1, .has_hotplug = 1,
404 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
405 .has_llc = 1,
406 .has_ddi = 1,
407 .has_fpga_dbg = 1,
408 .has_fbc = 1,
409 GEN_DEFAULT_PIPEOFFSETS,
410 IVB_CURSOR_OFFSETS,
411};
412
413static const struct intel_device_info intel_kabylake_gt3_info = {
414 .is_preliminary = 1,
415 .is_kabylake = 1,
416 .gen = 9,
417 .num_pipes = 3,
418 .need_gfx_hws = 1, .has_hotplug = 1,
419 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
420 .has_llc = 1,
421 .has_ddi = 1,
422 .has_fpga_dbg = 1,
423 .has_fbc = 1,
424 GEN_DEFAULT_PIPEOFFSETS,
425 IVB_CURSOR_OFFSETS,
426};
427
a0a18075
JB
428/*
429 * Make sure any device matches here are from most specific to most
430 * general. For example, since the Quanta match is based on the subsystem
431 * and subvendor IDs, we need it to come before the more general IVB
432 * PCI ID matches, otherwise we'll use the wrong info struct above.
433 */
3cb27f38
JN
434static const struct pci_device_id pciidlist[] = {
435 INTEL_I830_IDS(&intel_i830_info),
436 INTEL_I845G_IDS(&intel_845g_info),
437 INTEL_I85X_IDS(&intel_i85x_info),
438 INTEL_I865G_IDS(&intel_i865g_info),
439 INTEL_I915G_IDS(&intel_i915g_info),
440 INTEL_I915GM_IDS(&intel_i915gm_info),
441 INTEL_I945G_IDS(&intel_i945g_info),
442 INTEL_I945GM_IDS(&intel_i945gm_info),
443 INTEL_I965G_IDS(&intel_i965g_info),
444 INTEL_G33_IDS(&intel_g33_info),
445 INTEL_I965GM_IDS(&intel_i965gm_info),
446 INTEL_GM45_IDS(&intel_gm45_info),
447 INTEL_G45_IDS(&intel_g45_info),
448 INTEL_PINEVIEW_IDS(&intel_pineview_info),
449 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
450 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
451 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
452 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
453 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
454 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
455 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
456 INTEL_HSW_D_IDS(&intel_haswell_d_info),
457 INTEL_HSW_M_IDS(&intel_haswell_m_info),
458 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
459 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
460 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
461 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
462 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
463 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
464 INTEL_CHV_IDS(&intel_cherryview_info),
465 INTEL_SKL_GT1_IDS(&intel_skylake_info),
466 INTEL_SKL_GT2_IDS(&intel_skylake_info),
467 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
468 INTEL_BXT_IDS(&intel_broxton_info),
d97044b6
D
469 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
470 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
471 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
49ae35f2 472 {0, 0, 0}
1da177e4
LT
473};
474
79e53945 475MODULE_DEVICE_TABLE(pci, pciidlist);
79e53945 476
30c964a6
RB
477static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
478{
479 enum intel_pch ret = PCH_NOP;
480
481 /*
482 * In a virtualized passthrough environment we can be in a
483 * setup where the ISA bridge is not able to be passed through.
484 * In this case, a south bridge can be emulated and we have to
485 * make an educated guess as to which PCH is really there.
486 */
487
488 if (IS_GEN5(dev)) {
489 ret = PCH_IBX;
490 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
491 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
492 ret = PCH_CPT;
493 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
494 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
495 ret = PCH_LPT;
496 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
ef11bdb3 497 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
30c964a6
RB
498 ret = PCH_SPT;
499 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
500 }
501
502 return ret;
503}
504
0206e353 505void intel_detect_pch(struct drm_device *dev)
3bad0781
ZW
506{
507 struct drm_i915_private *dev_priv = dev->dev_private;
bcdb72ac 508 struct pci_dev *pch = NULL;
3bad0781 509
ce1bb329
BW
510 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
511 * (which really amounts to a PCH but no South Display).
512 */
513 if (INTEL_INFO(dev)->num_pipes == 0) {
514 dev_priv->pch_type = PCH_NOP;
ce1bb329
BW
515 return;
516 }
517
3bad0781
ZW
518 /*
519 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
520 * make graphics device passthrough work easy for VMM, that only
521 * need to expose ISA bridge to let driver know the real hardware
522 * underneath. This is a requirement from virtualization team.
6a9c4b35
RG
523 *
524 * In some virtualized environments (e.g. XEN), there is irrelevant
525 * ISA bridge in the system. To work reliably, we should scan trhough
526 * all the ISA bridge devices and check for the first match, instead
527 * of only checking the first one.
3bad0781 528 */
bcdb72ac 529 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
3bad0781 530 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
bcdb72ac 531 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
17a303ec 532 dev_priv->pch_id = id;
3bad0781 533
90711d50
JB
534 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
535 dev_priv->pch_type = PCH_IBX;
536 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
7fcb83cd 537 WARN_ON(!IS_GEN5(dev));
90711d50 538 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
3bad0781
ZW
539 dev_priv->pch_type = PCH_CPT;
540 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
7fcb83cd 541 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
c792513b
JB
542 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
543 /* PantherPoint is CPT compatible */
544 dev_priv->pch_type = PCH_CPT;
492ab669 545 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
7fcb83cd 546 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
eb877ebf
ED
547 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
548 dev_priv->pch_type = PCH_LPT;
549 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
a35cc9d0
RV
550 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
551 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
e76e0634
BW
552 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
553 dev_priv->pch_type = PCH_LPT;
554 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
a35cc9d0
RV
555 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
556 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
e7e7ea20
S
557 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
558 dev_priv->pch_type = PCH_SPT;
559 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
ef11bdb3
RV
560 WARN_ON(!IS_SKYLAKE(dev) &&
561 !IS_KABYLAKE(dev));
e7e7ea20
S
562 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
563 dev_priv->pch_type = PCH_SPT;
564 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
ef11bdb3
RV
565 WARN_ON(!IS_SKYLAKE(dev) &&
566 !IS_KABYLAKE(dev));
30c964a6
RB
567 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
568 dev_priv->pch_type = intel_virt_detect_pch(dev);
bcdb72ac
ID
569 } else
570 continue;
571
6a9c4b35 572 break;
3bad0781 573 }
3bad0781 574 }
6a9c4b35 575 if (!pch)
bcdb72ac
ID
576 DRM_DEBUG_KMS("No PCH found.\n");
577
578 pci_dev_put(pch);
3bad0781
ZW
579}
580
2911a35b
BW
581bool i915_semaphore_is_enabled(struct drm_device *dev)
582{
583 if (INTEL_INFO(dev)->gen < 6)
a08acaf2 584 return false;
2911a35b 585
d330a953
JN
586 if (i915.semaphores >= 0)
587 return i915.semaphores;
2911a35b 588
71386ef9
OM
589 /* TODO: make semaphores and Execlists play nicely together */
590 if (i915.enable_execlists)
591 return false;
592
be71eabe
RV
593 /* Until we get further testing... */
594 if (IS_GEN8(dev))
595 return false;
596
59de3295 597#ifdef CONFIG_INTEL_IOMMU
2911a35b 598 /* Enable semaphores on SNB when IO remapping is off */
59de3295
DV
599 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
600 return false;
601#endif
2911a35b 602
a08acaf2 603 return true;
2911a35b
BW
604}
605
eb805623
DV
606void i915_firmware_load_error_print(const char *fw_path, int err)
607{
608 DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
609
610 /*
611 * If the reason is not known assume -ENOENT since that's the most
612 * usual failure mode.
613 */
614 if (!err)
615 err = -ENOENT;
616
617 if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
618 return;
619
620 DRM_ERROR(
621 "The driver is built-in, so to load the firmware you need to\n"
622 "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
623 "in your initrd/initramfs image.\n");
624}
625
07f9cd0b
ID
626static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
627{
628 struct drm_device *dev = dev_priv->dev;
629 struct drm_encoder *encoder;
630
631 drm_modeset_lock_all(dev);
632 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
633 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
634
635 if (intel_encoder->suspend)
636 intel_encoder->suspend(intel_encoder);
637 }
638 drm_modeset_unlock_all(dev);
639}
640
ebc32824 641static int intel_suspend_complete(struct drm_i915_private *dev_priv);
1a5df187
PZ
642static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
643 bool rpm_resume);
f75a1985 644static int skl_resume_prepare(struct drm_i915_private *dev_priv);
a9a6b73a 645static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
f75a1985 646
ebc32824 647
5e365c39 648static int i915_drm_suspend(struct drm_device *dev)
ba8bbcf6 649{
61caf87c 650 struct drm_i915_private *dev_priv = dev->dev_private;
e5747e3a 651 pci_power_t opregion_target_state;
d5818938 652 int error;
61caf87c 653
b8efb17b
ZR
654 /* ignore lid events during suspend */
655 mutex_lock(&dev_priv->modeset_restore_lock);
656 dev_priv->modeset_restore = MODESET_SUSPENDED;
657 mutex_unlock(&dev_priv->modeset_restore_lock);
658
c67a470b
PZ
659 /* We do a lot of poking in a lot of registers, make sure they work
660 * properly. */
da7e29bd 661 intel_display_set_init_power(dev_priv, true);
cb10799c 662
5bcf719b
DA
663 drm_kms_helper_poll_disable(dev);
664
ba8bbcf6 665 pci_save_state(dev->pdev);
ba8bbcf6 666
d5818938
DV
667 error = i915_gem_suspend(dev);
668 if (error) {
669 dev_err(&dev->pdev->dev,
670 "GEM idle failed, resume might fail\n");
671 return error;
672 }
db1b76ca 673
a1c41994
AD
674 intel_guc_suspend(dev);
675
d5818938 676 intel_suspend_gt_powersave(dev);
a261b246 677
d5818938
DV
678 /*
679 * Disable CRTCs directly since we want to preserve sw state
680 * for _thaw. Also, power gate the CRTC power wells.
681 */
682 drm_modeset_lock_all(dev);
6b72d486 683 intel_display_suspend(dev);
d5818938 684 drm_modeset_unlock_all(dev);
2eb5252e 685
d5818938 686 intel_dp_mst_suspend(dev);
7d708ee4 687
d5818938
DV
688 intel_runtime_pm_disable_interrupts(dev_priv);
689 intel_hpd_cancel_work(dev_priv);
09b64267 690
d5818938 691 intel_suspend_encoders(dev_priv);
0e32b39c 692
d5818938 693 intel_suspend_hw(dev);
5669fcac 694
828c7908
BW
695 i915_gem_suspend_gtt_mappings(dev);
696
9e06dd39
JB
697 i915_save_state(dev);
698
95fa2eee
ID
699 opregion_target_state = PCI_D3cold;
700#if IS_ENABLED(CONFIG_ACPI_SLEEP)
701 if (acpi_target_system_state() < ACPI_STATE_S3)
e5747e3a 702 opregion_target_state = PCI_D1;
95fa2eee 703#endif
e5747e3a
JB
704 intel_opregion_notify_adapter(dev, opregion_target_state);
705
156c7ca0 706 intel_uncore_forcewake_reset(dev, false);
44834a67 707 intel_opregion_fini(dev);
8ee1c3db 708
82e3b8c1 709 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
3fa016a0 710
62d5d69b
MK
711 dev_priv->suspend_count++;
712
85e90679
KCA
713 intel_display_set_init_power(dev_priv, false);
714
61caf87c 715 return 0;
84b79f8d
RW
716}
717
ab3be73f 718static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
c3c09c95
ID
719{
720 struct drm_i915_private *dev_priv = drm_dev->dev_private;
721 int ret;
722
723 ret = intel_suspend_complete(dev_priv);
724
725 if (ret) {
726 DRM_ERROR("Suspend complete failed: %d\n", ret);
727
728 return ret;
729 }
730
731 pci_disable_device(drm_dev->pdev);
ab3be73f 732 /*
54875571 733 * During hibernation on some platforms the BIOS may try to access
ab3be73f
ID
734 * the device even though it's already in D3 and hang the machine. So
735 * leave the device in D0 on those platforms and hope the BIOS will
54875571
ID
736 * power down the device properly. The issue was seen on multiple old
737 * GENs with different BIOS vendors, so having an explicit blacklist
738 * is inpractical; apply the workaround on everything pre GEN6. The
739 * platforms where the issue was seen:
740 * Lenovo Thinkpad X301, X61s, X60, T60, X41
741 * Fujitsu FSC S7110
742 * Acer Aspire 1830T
ab3be73f 743 */
54875571 744 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
ab3be73f 745 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
c3c09c95
ID
746
747 return 0;
748}
749
1751fcf9 750int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
84b79f8d
RW
751{
752 int error;
753
754 if (!dev || !dev->dev_private) {
755 DRM_ERROR("dev: %p\n", dev);
756 DRM_ERROR("DRM not initialized, aborting suspend.\n");
757 return -ENODEV;
758 }
759
0b14cbd2
ID
760 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
761 state.event != PM_EVENT_FREEZE))
762 return -EINVAL;
5bcf719b
DA
763
764 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
765 return 0;
6eecba33 766
5e365c39 767 error = i915_drm_suspend(dev);
84b79f8d
RW
768 if (error)
769 return error;
770
ab3be73f 771 return i915_drm_suspend_late(dev, false);
ba8bbcf6
JB
772}
773
5e365c39 774static int i915_drm_resume(struct drm_device *dev)
76c4b250
ID
775{
776 struct drm_i915_private *dev_priv = dev->dev_private;
9d49c0ef 777
d5818938
DV
778 mutex_lock(&dev->struct_mutex);
779 i915_gem_restore_gtt_mappings(dev);
780 mutex_unlock(&dev->struct_mutex);
9d49c0ef 781
61caf87c 782 i915_restore_state(dev);
44834a67 783 intel_opregion_setup(dev);
61caf87c 784
d5818938
DV
785 intel_init_pch_refclk(dev);
786 drm_mode_config_reset(dev);
1833b134 787
364aece0
PA
788 /*
789 * Interrupts have to be enabled before any batches are run. If not the
790 * GPU will hang. i915_gem_init_hw() will initiate batches to
791 * update/restore the context.
792 *
793 * Modeset enabling in intel_modeset_init_hw() also needs working
794 * interrupts.
795 */
796 intel_runtime_pm_enable_interrupts(dev_priv);
797
d5818938
DV
798 mutex_lock(&dev->struct_mutex);
799 if (i915_gem_init_hw(dev)) {
800 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
805de8f4 801 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
d5818938
DV
802 }
803 mutex_unlock(&dev->struct_mutex);
226485e9 804
a1c41994
AD
805 intel_guc_resume(dev);
806
d5818938 807 intel_modeset_init_hw(dev);
24576d23 808
d5818938
DV
809 spin_lock_irq(&dev_priv->irq_lock);
810 if (dev_priv->display.hpd_irq_setup)
811 dev_priv->display.hpd_irq_setup(dev);
812 spin_unlock_irq(&dev_priv->irq_lock);
0e32b39c 813
d5818938 814 drm_modeset_lock_all(dev);
043e9bda 815 intel_display_resume(dev);
d5818938 816 drm_modeset_unlock_all(dev);
15239099 817
d5818938 818 intel_dp_mst_resume(dev);
e7d6f7d7 819
d5818938
DV
820 /*
821 * ... but also need to make sure that hotplug processing
822 * doesn't cause havoc. Like in the driver load code we don't
823 * bother with the tiny race here where we might loose hotplug
824 * notifications.
825 * */
826 intel_hpd_init(dev_priv);
827 /* Config may have changed between suspend and resume */
828 drm_helper_hpd_irq_event(dev);
1daed3fb 829
44834a67
CW
830 intel_opregion_init(dev);
831
82e3b8c1 832 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
073f34d9 833
b8efb17b
ZR
834 mutex_lock(&dev_priv->modeset_restore_lock);
835 dev_priv->modeset_restore = MODESET_DONE;
836 mutex_unlock(&dev_priv->modeset_restore_lock);
8a187455 837
e5747e3a
JB
838 intel_opregion_notify_adapter(dev, PCI_D0);
839
ee6f280e
ID
840 drm_kms_helper_poll_enable(dev);
841
074c6ada 842 return 0;
84b79f8d
RW
843}
844
5e365c39 845static int i915_drm_resume_early(struct drm_device *dev)
84b79f8d 846{
36d61e67 847 struct drm_i915_private *dev_priv = dev->dev_private;
1a5df187 848 int ret = 0;
36d61e67 849
76c4b250
ID
850 /*
851 * We have a resume ordering issue with the snd-hda driver also
852 * requiring our device to be power up. Due to the lack of a
853 * parent/child relationship we currently solve this with an early
854 * resume hook.
855 *
856 * FIXME: This should be solved with a special hdmi sink device or
857 * similar so that power domains can be employed.
858 */
84b79f8d
RW
859 if (pci_enable_device(dev->pdev))
860 return -EIO;
861
862 pci_set_master(dev->pdev);
863
efee833a 864 if (IS_VALLEYVIEW(dev_priv))
1a5df187 865 ret = vlv_resume_prepare(dev_priv, false);
36d61e67 866 if (ret)
ff0b187f
DL
867 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
868 ret);
36d61e67
ID
869
870 intel_uncore_early_sanitize(dev, true);
efee833a 871
a9a6b73a
DL
872 if (IS_BROXTON(dev))
873 ret = bxt_resume_prepare(dev_priv);
ef11bdb3 874 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
f75a1985 875 ret = skl_resume_prepare(dev_priv);
a9a6b73a
DL
876 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
877 hsw_disable_pc8(dev_priv);
efee833a 878
36d61e67
ID
879 intel_uncore_sanitize(dev);
880 intel_power_domains_init_hw(dev_priv);
881
882 return ret;
76c4b250
ID
883}
884
1751fcf9 885int i915_resume_switcheroo(struct drm_device *dev)
76c4b250 886{
50a0072f 887 int ret;
76c4b250 888
097dd837
ID
889 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
890 return 0;
891
5e365c39 892 ret = i915_drm_resume_early(dev);
50a0072f
ID
893 if (ret)
894 return ret;
895
5a17514e
ID
896 return i915_drm_resume(dev);
897}
898
11ed50ec 899/**
f3953dcb 900 * i915_reset - reset chip after a hang
11ed50ec 901 * @dev: drm device to reset
11ed50ec
BG
902 *
903 * Reset the chip. Useful if a hang is detected. Returns zero on successful
904 * reset or otherwise an error code.
905 *
906 * Procedure is fairly simple:
907 * - reset the chip using the reset reg
908 * - re-init context state
909 * - re-init hardware status page
910 * - re-init ring buffer
911 * - re-init interrupt state
912 * - re-init display
913 */
d4b8bb2a 914int i915_reset(struct drm_device *dev)
11ed50ec 915{
50227e1c 916 struct drm_i915_private *dev_priv = dev->dev_private;
2e7c8ee7 917 bool simulated;
0573ed4a 918 int ret;
11ed50ec 919
dbea3cea
ID
920 intel_reset_gt_powersave(dev);
921
d54a02c0 922 mutex_lock(&dev->struct_mutex);
11ed50ec 923
069efc1d 924 i915_gem_reset(dev);
77f01230 925
2e7c8ee7
CW
926 simulated = dev_priv->gpu_error.stop_rings != 0;
927
be62acb4
MK
928 ret = intel_gpu_reset(dev);
929
930 /* Also reset the gpu hangman. */
931 if (simulated) {
932 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
933 dev_priv->gpu_error.stop_rings = 0;
934 if (ret == -ENODEV) {
f2d91a2c
DV
935 DRM_INFO("Reset not implemented, but ignoring "
936 "error for simulated gpu hangs\n");
be62acb4
MK
937 ret = 0;
938 }
2e7c8ee7 939 }
be62acb4 940
d8f2716a
DV
941 if (i915_stop_ring_allow_warn(dev_priv))
942 pr_notice("drm/i915: Resetting chip after gpu hang\n");
943
0573ed4a 944 if (ret) {
f2d91a2c 945 DRM_ERROR("Failed to reset chip: %i\n", ret);
f953c935 946 mutex_unlock(&dev->struct_mutex);
f803aa55 947 return ret;
11ed50ec
BG
948 }
949
1362b776
VS
950 intel_overlay_reset(dev_priv);
951
11ed50ec
BG
952 /* Ok, now get things going again... */
953
954 /*
955 * Everything depends on having the GTT running, so we need to start
956 * there. Fortunately we don't need to do this unless we reset the
957 * chip at a PCI level.
958 *
959 * Next we need to restore the context, but we don't use those
960 * yet either...
961 *
962 * Ring buffer needs to be re-initialized in the KMS case, or if X
963 * was running at the time of the reset (i.e. we weren't VT
964 * switched away).
965 */
6689c167 966
33d30a9c
DV
967 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
968 dev_priv->gpu_error.reload_in_reset = true;
6689c167 969
33d30a9c 970 ret = i915_gem_init_hw(dev);
6689c167 971
33d30a9c 972 dev_priv->gpu_error.reload_in_reset = false;
f817586c 973
33d30a9c
DV
974 mutex_unlock(&dev->struct_mutex);
975 if (ret) {
976 DRM_ERROR("Failed hw init on reset %d\n", ret);
977 return ret;
11ed50ec
BG
978 }
979
33d30a9c
DV
980 /*
981 * rps/rc6 re-init is necessary to restore state lost after the
982 * reset and the re-install of gt irqs. Skip for ironlake per
983 * previous concerns that it doesn't respond well to some forms
984 * of re-init after reset.
985 */
986 if (INTEL_INFO(dev)->gen > 5)
987 intel_enable_gt_powersave(dev);
988
11ed50ec
BG
989 return 0;
990}
991
56550d94 992static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
112b715e 993{
01a06850
DV
994 struct intel_device_info *intel_info =
995 (struct intel_device_info *) ent->driver_data;
996
d330a953 997 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
b833d685
BW
998 DRM_INFO("This hardware requires preliminary hardware support.\n"
999 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
1000 return -ENODEV;
1001 }
1002
5fe49d86
CW
1003 /* Only bind to function 0 of the device. Early generations
1004 * used function 1 as a placeholder for multi-head. This causes
1005 * us confusion instead, especially on the systems where both
1006 * functions have the same PCI-ID!
1007 */
1008 if (PCI_FUNC(pdev->devfn))
1009 return -ENODEV;
1010
dcdb1674 1011 return drm_get_pci_dev(pdev, ent, &driver);
112b715e
KH
1012}
1013
1014static void
1015i915_pci_remove(struct pci_dev *pdev)
1016{
1017 struct drm_device *dev = pci_get_drvdata(pdev);
1018
1019 drm_put_dev(dev);
1020}
1021
84b79f8d 1022static int i915_pm_suspend(struct device *dev)
112b715e 1023{
84b79f8d
RW
1024 struct pci_dev *pdev = to_pci_dev(dev);
1025 struct drm_device *drm_dev = pci_get_drvdata(pdev);
112b715e 1026
84b79f8d
RW
1027 if (!drm_dev || !drm_dev->dev_private) {
1028 dev_err(dev, "DRM not initialized, aborting suspend.\n");
1029 return -ENODEV;
1030 }
112b715e 1031
5bcf719b
DA
1032 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1033 return 0;
1034
5e365c39 1035 return i915_drm_suspend(drm_dev);
76c4b250
ID
1036}
1037
1038static int i915_pm_suspend_late(struct device *dev)
1039{
888d0d42 1040 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
76c4b250
ID
1041
1042 /*
c965d995 1043 * We have a suspend ordering issue with the snd-hda driver also
76c4b250
ID
1044 * requiring our device to be power up. Due to the lack of a
1045 * parent/child relationship we currently solve this with an late
1046 * suspend hook.
1047 *
1048 * FIXME: This should be solved with a special hdmi sink device or
1049 * similar so that power domains can be employed.
1050 */
1051 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1052 return 0;
112b715e 1053
ab3be73f
ID
1054 return i915_drm_suspend_late(drm_dev, false);
1055}
1056
1057static int i915_pm_poweroff_late(struct device *dev)
1058{
1059 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1060
1061 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1062 return 0;
1063
1064 return i915_drm_suspend_late(drm_dev, true);
cbda12d7
ZW
1065}
1066
76c4b250
ID
1067static int i915_pm_resume_early(struct device *dev)
1068{
888d0d42 1069 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
76c4b250 1070
097dd837
ID
1071 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1072 return 0;
1073
5e365c39 1074 return i915_drm_resume_early(drm_dev);
76c4b250
ID
1075}
1076
84b79f8d 1077static int i915_pm_resume(struct device *dev)
cbda12d7 1078{
888d0d42 1079 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
84b79f8d 1080
097dd837
ID
1081 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1082 return 0;
1083
5a17514e 1084 return i915_drm_resume(drm_dev);
cbda12d7
ZW
1085}
1086
f75a1985
SS
1087static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1088{
0a9d2bed 1089 enum csr_state state;
f75a1985
SS
1090 /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1091
5d96d8af
DL
1092 skl_uninit_cdclk(dev_priv);
1093
0a9d2bed
AM
1094 /* TODO: wait for a completion event or
1095 * similar here instead of busy
1096 * waiting using wait_for function.
1097 */
1098 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
1099 FW_UNINITIALIZED, 1000);
1100 if (state == FW_LOADED)
1101 skl_enable_dc6(dev_priv);
1102
f75a1985
SS
1103 return 0;
1104}
1105
ebc32824 1106static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
97bea207 1107{
414de7a0 1108 hsw_enable_pc8(dev_priv);
0ab9cfeb
ID
1109
1110 return 0;
97bea207
PZ
1111}
1112
31335cec
SS
1113static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1114{
1115 struct drm_device *dev = dev_priv->dev;
1116
1117 /* TODO: when DC5 support is added disable DC5 here. */
1118
1119 broxton_ddi_phy_uninit(dev);
1120 broxton_uninit_cdclk(dev);
1121 bxt_enable_dc9(dev_priv);
1122
1123 return 0;
1124}
1125
1126static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1127{
1128 struct drm_device *dev = dev_priv->dev;
1129
1130 /* TODO: when CSR FW support is added make sure the FW is loaded */
1131
1132 bxt_disable_dc9(dev_priv);
1133
1134 /*
1135 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1136 * is available.
1137 */
1138 broxton_init_cdclk(dev);
1139 broxton_ddi_phy_init(dev);
1140 intel_prepare_ddi(dev);
1141
1142 return 0;
1143}
1144
f75a1985
SS
1145static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1146{
1147 struct drm_device *dev = dev_priv->dev;
1148
0a9d2bed
AM
1149 if (intel_csr_load_status_get(dev_priv) == FW_LOADED)
1150 skl_disable_dc6(dev_priv);
1151
5d96d8af 1152 skl_init_cdclk(dev_priv);
f75a1985
SS
1153 intel_csr_load_program(dev);
1154
1155 return 0;
1156}
1157
ddeea5b0
ID
1158/*
1159 * Save all Gunit registers that may be lost after a D3 and a subsequent
1160 * S0i[R123] transition. The list of registers needing a save/restore is
1161 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1162 * registers in the following way:
1163 * - Driver: saved/restored by the driver
1164 * - Punit : saved/restored by the Punit firmware
1165 * - No, w/o marking: no need to save/restore, since the register is R/O or
1166 * used internally by the HW in a way that doesn't depend
1167 * keeping the content across a suspend/resume.
1168 * - Debug : used for debugging
1169 *
1170 * We save/restore all registers marked with 'Driver', with the following
1171 * exceptions:
1172 * - Registers out of use, including also registers marked with 'Debug'.
1173 * These have no effect on the driver's operation, so we don't save/restore
1174 * them to reduce the overhead.
1175 * - Registers that are fully setup by an initialization function called from
1176 * the resume path. For example many clock gating and RPS/RC6 registers.
1177 * - Registers that provide the right functionality with their reset defaults.
1178 *
1179 * TODO: Except for registers that based on the above 3 criteria can be safely
1180 * ignored, we save/restore all others, practically treating the HW context as
1181 * a black-box for the driver. Further investigation is needed to reduce the
1182 * saved/restored registers even further, by following the same 3 criteria.
1183 */
1184static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1185{
1186 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1187 int i;
1188
1189 /* GAM 0x4000-0x4770 */
1190 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1191 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1192 s->arb_mode = I915_READ(ARB_MODE);
1193 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1194 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1195
1196 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
22dfe79f 1197 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
ddeea5b0
ID
1198
1199 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
b5f1c97f 1200 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
ddeea5b0
ID
1201
1202 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1203 s->ecochk = I915_READ(GAM_ECOCHK);
1204 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1205 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1206
1207 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1208
1209 /* MBC 0x9024-0x91D0, 0x8500 */
1210 s->g3dctl = I915_READ(VLV_G3DCTL);
1211 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1212 s->mbctl = I915_READ(GEN6_MBCTL);
1213
1214 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1215 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1216 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1217 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1218 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1219 s->rstctl = I915_READ(GEN6_RSTCTL);
1220 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1221
1222 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1223 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1224 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1225 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1226 s->ecobus = I915_READ(ECOBUS);
1227 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1228 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1229 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1230 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1231 s->rcedata = I915_READ(VLV_RCEDATA);
1232 s->spare2gh = I915_READ(VLV_SPAREG2H);
1233
1234 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1235 s->gt_imr = I915_READ(GTIMR);
1236 s->gt_ier = I915_READ(GTIER);
1237 s->pm_imr = I915_READ(GEN6_PMIMR);
1238 s->pm_ier = I915_READ(GEN6_PMIER);
1239
1240 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
22dfe79f 1241 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
ddeea5b0
ID
1242
1243 /* GT SA CZ domain, 0x100000-0x138124 */
1244 s->tilectl = I915_READ(TILECTL);
1245 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1246 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1247 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1248 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1249
1250 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1251 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1252 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
9c25210f 1253 s->pcbr = I915_READ(VLV_PCBR);
ddeea5b0
ID
1254 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1255
1256 /*
1257 * Not saving any of:
1258 * DFT, 0x9800-0x9EC0
1259 * SARB, 0xB000-0xB1FC
1260 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1261 * PCI CFG
1262 */
1263}
1264
1265static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1266{
1267 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1268 u32 val;
1269 int i;
1270
1271 /* GAM 0x4000-0x4770 */
1272 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1273 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1274 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1275 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1276 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1277
1278 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
22dfe79f 1279 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
ddeea5b0
ID
1280
1281 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
b5f1c97f 1282 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
ddeea5b0
ID
1283
1284 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1285 I915_WRITE(GAM_ECOCHK, s->ecochk);
1286 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1287 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1288
1289 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1290
1291 /* MBC 0x9024-0x91D0, 0x8500 */
1292 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1293 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1294 I915_WRITE(GEN6_MBCTL, s->mbctl);
1295
1296 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1297 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1298 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1299 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1300 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1301 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1302 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1303
1304 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1305 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1306 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1307 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1308 I915_WRITE(ECOBUS, s->ecobus);
1309 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1310 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1311 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1312 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1313 I915_WRITE(VLV_RCEDATA, s->rcedata);
1314 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1315
1316 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1317 I915_WRITE(GTIMR, s->gt_imr);
1318 I915_WRITE(GTIER, s->gt_ier);
1319 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1320 I915_WRITE(GEN6_PMIER, s->pm_ier);
1321
1322 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
22dfe79f 1323 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
ddeea5b0
ID
1324
1325 /* GT SA CZ domain, 0x100000-0x138124 */
1326 I915_WRITE(TILECTL, s->tilectl);
1327 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1328 /*
1329 * Preserve the GT allow wake and GFX force clock bit, they are not
1330 * be restored, as they are used to control the s0ix suspend/resume
1331 * sequence by the caller.
1332 */
1333 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1334 val &= VLV_GTLC_ALLOWWAKEREQ;
1335 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1336 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1337
1338 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1339 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1340 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1341 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1342
1343 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1344
1345 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1346 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1347 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
9c25210f 1348 I915_WRITE(VLV_PCBR, s->pcbr);
ddeea5b0
ID
1349 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1350}
1351
650ad970
ID
1352int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1353{
1354 u32 val;
1355 int err;
1356
650ad970 1357#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
650ad970
ID
1358
1359 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1360 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1361 if (force_on)
1362 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1363 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1364
1365 if (!force_on)
1366 return 0;
1367
8d4eee9c 1368 err = wait_for(COND, 20);
650ad970
ID
1369 if (err)
1370 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1371 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1372
1373 return err;
1374#undef COND
1375}
1376
ddeea5b0
ID
1377static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1378{
1379 u32 val;
1380 int err = 0;
1381
1382 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1383 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1384 if (allow)
1385 val |= VLV_GTLC_ALLOWWAKEREQ;
1386 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1387 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1388
1389#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1390 allow)
1391 err = wait_for(COND, 1);
1392 if (err)
1393 DRM_ERROR("timeout disabling GT waking\n");
1394 return err;
1395#undef COND
1396}
1397
1398static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1399 bool wait_for_on)
1400{
1401 u32 mask;
1402 u32 val;
1403 int err;
1404
1405 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1406 val = wait_for_on ? mask : 0;
1407#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1408 if (COND)
1409 return 0;
1410
1411 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1412 wait_for_on ? "on" : "off",
1413 I915_READ(VLV_GTLC_PW_STATUS));
1414
1415 /*
1416 * RC6 transitioning can be delayed up to 2 msec (see
1417 * valleyview_enable_rps), use 3 msec for safety.
1418 */
1419 err = wait_for(COND, 3);
1420 if (err)
1421 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1422 wait_for_on ? "on" : "off");
1423
1424 return err;
1425#undef COND
1426}
1427
1428static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1429{
1430 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1431 return;
1432
1433 DRM_ERROR("GT register access while GT waking disabled\n");
1434 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1435}
1436
ebc32824 1437static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
ddeea5b0
ID
1438{
1439 u32 mask;
1440 int err;
1441
1442 /*
1443 * Bspec defines the following GT well on flags as debug only, so
1444 * don't treat them as hard failures.
1445 */
1446 (void)vlv_wait_for_gt_wells(dev_priv, false);
1447
1448 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1449 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1450
1451 vlv_check_no_gt_access(dev_priv);
1452
1453 err = vlv_force_gfx_clock(dev_priv, true);
1454 if (err)
1455 goto err1;
1456
1457 err = vlv_allow_gt_wake(dev_priv, false);
1458 if (err)
1459 goto err2;
98711167
D
1460
1461 if (!IS_CHERRYVIEW(dev_priv->dev))
1462 vlv_save_gunit_s0ix_state(dev_priv);
ddeea5b0
ID
1463
1464 err = vlv_force_gfx_clock(dev_priv, false);
1465 if (err)
1466 goto err2;
1467
1468 return 0;
1469
1470err2:
1471 /* For safety always re-enable waking and disable gfx clock forcing */
1472 vlv_allow_gt_wake(dev_priv, true);
1473err1:
1474 vlv_force_gfx_clock(dev_priv, false);
1475
1476 return err;
1477}
1478
016970be
SK
1479static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1480 bool rpm_resume)
ddeea5b0
ID
1481{
1482 struct drm_device *dev = dev_priv->dev;
1483 int err;
1484 int ret;
1485
1486 /*
1487 * If any of the steps fail just try to continue, that's the best we
1488 * can do at this point. Return the first error code (which will also
1489 * leave RPM permanently disabled).
1490 */
1491 ret = vlv_force_gfx_clock(dev_priv, true);
1492
98711167
D
1493 if (!IS_CHERRYVIEW(dev_priv->dev))
1494 vlv_restore_gunit_s0ix_state(dev_priv);
ddeea5b0
ID
1495
1496 err = vlv_allow_gt_wake(dev_priv, true);
1497 if (!ret)
1498 ret = err;
1499
1500 err = vlv_force_gfx_clock(dev_priv, false);
1501 if (!ret)
1502 ret = err;
1503
1504 vlv_check_no_gt_access(dev_priv);
1505
016970be
SK
1506 if (rpm_resume) {
1507 intel_init_clock_gating(dev);
1508 i915_gem_restore_fences(dev);
1509 }
ddeea5b0
ID
1510
1511 return ret;
1512}
1513
97bea207 1514static int intel_runtime_suspend(struct device *device)
8a187455
PZ
1515{
1516 struct pci_dev *pdev = to_pci_dev(device);
1517 struct drm_device *dev = pci_get_drvdata(pdev);
1518 struct drm_i915_private *dev_priv = dev->dev_private;
0ab9cfeb 1519 int ret;
8a187455 1520
aeab0b5a 1521 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
c6df39b5
ID
1522 return -ENODEV;
1523
604effb7
ID
1524 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1525 return -ENODEV;
1526
8a187455
PZ
1527 DRM_DEBUG_KMS("Suspending device\n");
1528
d6102977
ID
1529 /*
1530 * We could deadlock here in case another thread holding struct_mutex
1531 * calls RPM suspend concurrently, since the RPM suspend will wait
1532 * first for this RPM suspend to finish. In this case the concurrent
1533 * RPM resume will be followed by its RPM suspend counterpart. Still
1534 * for consistency return -EAGAIN, which will reschedule this suspend.
1535 */
1536 if (!mutex_trylock(&dev->struct_mutex)) {
1537 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1538 /*
1539 * Bump the expiration timestamp, otherwise the suspend won't
1540 * be rescheduled.
1541 */
1542 pm_runtime_mark_last_busy(device);
1543
1544 return -EAGAIN;
1545 }
1546 /*
1547 * We are safe here against re-faults, since the fault handler takes
1548 * an RPM reference.
1549 */
1550 i915_gem_release_all_mmaps(dev_priv);
1551 mutex_unlock(&dev->struct_mutex);
1552
a1c41994
AD
1553 intel_guc_suspend(dev);
1554
fac6adb0 1555 intel_suspend_gt_powersave(dev);
2eb5252e 1556 intel_runtime_pm_disable_interrupts(dev_priv);
b5478bcd 1557
ebc32824 1558 ret = intel_suspend_complete(dev_priv);
0ab9cfeb
ID
1559 if (ret) {
1560 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
b963291c 1561 intel_runtime_pm_enable_interrupts(dev_priv);
0ab9cfeb
ID
1562
1563 return ret;
1564 }
a8a8bd54 1565
737b1506 1566 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
dc9fb09c 1567 intel_uncore_forcewake_reset(dev, false);
8a187455 1568 dev_priv->pm.suspended = true;
1fb2362b
KCA
1569
1570 /*
c8a0bd42
PZ
1571 * FIXME: We really should find a document that references the arguments
1572 * used below!
1fb2362b 1573 */
d37ae19a
PZ
1574 if (IS_BROADWELL(dev)) {
1575 /*
1576 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1577 * being detected, and the call we do at intel_runtime_resume()
1578 * won't be able to restore them. Since PCI_D3hot matches the
1579 * actual specification and appears to be working, use it.
1580 */
1581 intel_opregion_notify_adapter(dev, PCI_D3hot);
1582 } else {
c8a0bd42
PZ
1583 /*
1584 * current versions of firmware which depend on this opregion
1585 * notification have repurposed the D1 definition to mean
1586 * "runtime suspended" vs. what you would normally expect (D3)
1587 * to distinguish it from notifications that might be sent via
1588 * the suspend path.
1589 */
1590 intel_opregion_notify_adapter(dev, PCI_D1);
c8a0bd42 1591 }
8a187455 1592
59bad947 1593 assert_forcewakes_inactive(dev_priv);
dc9fb09c 1594
a8a8bd54 1595 DRM_DEBUG_KMS("Device suspended\n");
8a187455
PZ
1596 return 0;
1597}
1598
97bea207 1599static int intel_runtime_resume(struct device *device)
8a187455
PZ
1600{
1601 struct pci_dev *pdev = to_pci_dev(device);
1602 struct drm_device *dev = pci_get_drvdata(pdev);
1603 struct drm_i915_private *dev_priv = dev->dev_private;
1a5df187 1604 int ret = 0;
8a187455 1605
604effb7
ID
1606 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1607 return -ENODEV;
8a187455
PZ
1608
1609 DRM_DEBUG_KMS("Resuming device\n");
1610
cd2e9e90 1611 intel_opregion_notify_adapter(dev, PCI_D0);
8a187455
PZ
1612 dev_priv->pm.suspended = false;
1613
a1c41994
AD
1614 intel_guc_resume(dev);
1615
1a5df187
PZ
1616 if (IS_GEN6(dev_priv))
1617 intel_init_pch_refclk(dev);
31335cec
SS
1618
1619 if (IS_BROXTON(dev))
1620 ret = bxt_resume_prepare(dev_priv);
ef11bdb3 1621 else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
f75a1985 1622 ret = skl_resume_prepare(dev_priv);
1a5df187
PZ
1623 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1624 hsw_disable_pc8(dev_priv);
1625 else if (IS_VALLEYVIEW(dev_priv))
1626 ret = vlv_resume_prepare(dev_priv, true);
1627
0ab9cfeb
ID
1628 /*
1629 * No point of rolling back things in case of an error, as the best
1630 * we can do is to hope that things will still work (and disable RPM).
1631 */
92b806d3
ID
1632 i915_gem_init_swizzling(dev);
1633 gen6_update_ring_freq(dev);
1634
b963291c 1635 intel_runtime_pm_enable_interrupts(dev_priv);
08d8a232
VS
1636
1637 /*
1638 * On VLV/CHV display interrupts are part of the display
1639 * power well, so hpd is reinitialized from there. For
1640 * everyone else do it here.
1641 */
1642 if (!IS_VALLEYVIEW(dev_priv))
1643 intel_hpd_init(dev_priv);
1644
fac6adb0 1645 intel_enable_gt_powersave(dev);
b5478bcd 1646
0ab9cfeb
ID
1647 if (ret)
1648 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1649 else
1650 DRM_DEBUG_KMS("Device resumed\n");
1651
1652 return ret;
8a187455
PZ
1653}
1654
016970be
SK
1655/*
1656 * This function implements common functionality of runtime and system
1657 * suspend sequence.
1658 */
ebc32824
SK
1659static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1660{
ebc32824
SK
1661 int ret;
1662
16e44e3e 1663 if (IS_BROXTON(dev_priv))
31335cec 1664 ret = bxt_suspend_complete(dev_priv);
ef11bdb3 1665 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
f75a1985 1666 ret = skl_suspend_complete(dev_priv);
16e44e3e 1667 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ebc32824 1668 ret = hsw_suspend_complete(dev_priv);
16e44e3e 1669 else if (IS_VALLEYVIEW(dev_priv))
ebc32824 1670 ret = vlv_suspend_complete(dev_priv);
604effb7
ID
1671 else
1672 ret = 0;
ebc32824
SK
1673
1674 return ret;
1675}
1676
b4b78d12 1677static const struct dev_pm_ops i915_pm_ops = {
5545dbbf
ID
1678 /*
1679 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1680 * PMSG_RESUME]
1681 */
0206e353 1682 .suspend = i915_pm_suspend,
76c4b250
ID
1683 .suspend_late = i915_pm_suspend_late,
1684 .resume_early = i915_pm_resume_early,
0206e353 1685 .resume = i915_pm_resume,
5545dbbf
ID
1686
1687 /*
1688 * S4 event handlers
1689 * @freeze, @freeze_late : called (1) before creating the
1690 * hibernation image [PMSG_FREEZE] and
1691 * (2) after rebooting, before restoring
1692 * the image [PMSG_QUIESCE]
1693 * @thaw, @thaw_early : called (1) after creating the hibernation
1694 * image, before writing it [PMSG_THAW]
1695 * and (2) after failing to create or
1696 * restore the image [PMSG_RECOVER]
1697 * @poweroff, @poweroff_late: called after writing the hibernation
1698 * image, before rebooting [PMSG_HIBERNATE]
1699 * @restore, @restore_early : called after rebooting and restoring the
1700 * hibernation image [PMSG_RESTORE]
1701 */
36d61e67
ID
1702 .freeze = i915_pm_suspend,
1703 .freeze_late = i915_pm_suspend_late,
1704 .thaw_early = i915_pm_resume_early,
1705 .thaw = i915_pm_resume,
1706 .poweroff = i915_pm_suspend,
ab3be73f 1707 .poweroff_late = i915_pm_poweroff_late,
76c4b250 1708 .restore_early = i915_pm_resume_early,
0206e353 1709 .restore = i915_pm_resume,
5545dbbf
ID
1710
1711 /* S0ix (via runtime suspend) event handlers */
97bea207
PZ
1712 .runtime_suspend = intel_runtime_suspend,
1713 .runtime_resume = intel_runtime_resume,
cbda12d7
ZW
1714};
1715
78b68556 1716static const struct vm_operations_struct i915_gem_vm_ops = {
de151cf6 1717 .fault = i915_gem_fault,
ab00b3e5
JB
1718 .open = drm_gem_vm_open,
1719 .close = drm_gem_vm_close,
de151cf6
JB
1720};
1721
e08e96de
AV
1722static const struct file_operations i915_driver_fops = {
1723 .owner = THIS_MODULE,
1724 .open = drm_open,
1725 .release = drm_release,
1726 .unlocked_ioctl = drm_ioctl,
1727 .mmap = drm_gem_mmap,
1728 .poll = drm_poll,
e08e96de
AV
1729 .read = drm_read,
1730#ifdef CONFIG_COMPAT
1731 .compat_ioctl = i915_compat_ioctl,
1732#endif
1733 .llseek = noop_llseek,
1734};
1735
1da177e4 1736static struct drm_driver driver = {
0c54781b
MW
1737 /* Don't use MTRRs here; the Xserver or userspace app should
1738 * deal with them for Intel hardware.
792d2b9a 1739 */
673a394b 1740 .driver_features =
10ba5012 1741 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1751fcf9 1742 DRIVER_RENDER | DRIVER_MODESET,
22eae947 1743 .load = i915_driver_load,
ba8bbcf6 1744 .unload = i915_driver_unload,
673a394b 1745 .open = i915_driver_open,
22eae947
DA
1746 .lastclose = i915_driver_lastclose,
1747 .preclose = i915_driver_preclose,
673a394b 1748 .postclose = i915_driver_postclose,
915b4d11 1749 .set_busid = drm_pci_set_busid,
d8e29209 1750
955b12de 1751#if defined(CONFIG_DEBUG_FS)
27c202ad
BG
1752 .debugfs_init = i915_debugfs_init,
1753 .debugfs_cleanup = i915_debugfs_cleanup,
955b12de 1754#endif
673a394b 1755 .gem_free_object = i915_gem_free_object,
de151cf6 1756 .gem_vm_ops = &i915_gem_vm_ops,
1286ff73
DV
1757
1758 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1759 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1760 .gem_prime_export = i915_gem_prime_export,
1761 .gem_prime_import = i915_gem_prime_import,
1762
ff72145b 1763 .dumb_create = i915_gem_dumb_create,
da6b51d0 1764 .dumb_map_offset = i915_gem_mmap_gtt,
43387b37 1765 .dumb_destroy = drm_gem_dumb_destroy,
1da177e4 1766 .ioctls = i915_ioctls,
e08e96de 1767 .fops = &i915_driver_fops,
22eae947
DA
1768 .name = DRIVER_NAME,
1769 .desc = DRIVER_DESC,
1770 .date = DRIVER_DATE,
1771 .major = DRIVER_MAJOR,
1772 .minor = DRIVER_MINOR,
1773 .patchlevel = DRIVER_PATCHLEVEL,
1da177e4
LT
1774};
1775
8410ea3b
DA
1776static struct pci_driver i915_pci_driver = {
1777 .name = DRIVER_NAME,
1778 .id_table = pciidlist,
1779 .probe = i915_pci_probe,
1780 .remove = i915_pci_remove,
1781 .driver.pm = &i915_pm_ops,
1782};
1783
1da177e4
LT
1784static int __init i915_init(void)
1785{
1786 driver.num_ioctls = i915_max_ioctl;
79e53945
JB
1787
1788 /*
fd930478
CW
1789 * Enable KMS by default, unless explicitly overriden by
1790 * either the i915.modeset prarameter or by the
1791 * vga_text_mode_force boot option.
79e53945 1792 */
fd930478
CW
1793
1794 if (i915.modeset == 0)
1795 driver.driver_features &= ~DRIVER_MODESET;
79e53945
JB
1796
1797#ifdef CONFIG_VGA_CONSOLE
d330a953 1798 if (vgacon_text_force() && i915.modeset == -1)
79e53945
JB
1799 driver.driver_features &= ~DRIVER_MODESET;
1800#endif
1801
b30324ad 1802 if (!(driver.driver_features & DRIVER_MODESET)) {
b30324ad 1803 /* Silently fail loading to not upset userspace. */
c9cd7b65 1804 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
b30324ad 1805 return 0;
b30324ad 1806 }
3885c6bb 1807
c5b852f3 1808 if (i915.nuclear_pageflip)
b2e7723b
MR
1809 driver.driver_features |= DRIVER_ATOMIC;
1810
8410ea3b 1811 return drm_pci_init(&driver, &i915_pci_driver);
1da177e4
LT
1812}
1813
1814static void __exit i915_exit(void)
1815{
b33ecdd1
DV
1816 if (!(driver.driver_features & DRIVER_MODESET))
1817 return; /* Never loaded a driver. */
b33ecdd1 1818
8410ea3b 1819 drm_pci_exit(&driver, &i915_pci_driver);
1da177e4
LT
1820}
1821
1822module_init(i915_init);
1823module_exit(i915_exit);
1824
0a6d1631 1825MODULE_AUTHOR("Tungsten Graphics, Inc.");
1eab9234 1826MODULE_AUTHOR("Intel Corporation");
0a6d1631 1827
b5e89ed5 1828MODULE_DESCRIPTION(DRIVER_DESC);
1da177e4 1829MODULE_LICENSE("GPL and additional rights");