]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/i915_drv.c
drm/i915: avoid including intel_drv.h via i915_drv.h->i915_trace.h
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / i915_drv.c
CommitLineData
1da177e4
LT
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4 29
e5747e3a 30#include <linux/acpi.h>
0673ad47
CW
31#include <linux/device.h>
32#include <linux/oom.h>
e0cd3608 33#include <linux/module.h>
0673ad47
CW
34#include <linux/pci.h>
35#include <linux/pm.h>
d6102977 36#include <linux/pm_runtime.h>
0673ad47
CW
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vgaarb.h>
704ab614 40#include <linux/vga_switcheroo.h>
0673ad47
CW
41#include <linux/vt.h>
42#include <acpi/video.h>
43
a667fb40 44#include <drm/drm_atomic_helper.h>
d0e93599
SR
45#include <drm/drm_ioctl.h>
46#include <drm/drm_irq.h>
47#include <drm/drm_probe_helper.h>
0673ad47
CW
48#include <drm/i915_drm.h>
49
df0566a6
JN
50#include "display/intel_acpi.h"
51#include "display/intel_audio.h"
52#include "display/intel_bw.h"
53#include "display/intel_cdclk.h"
379bc100 54#include "display/intel_dp.h"
df0566a6 55#include "display/intel_fbdev.h"
379bc100 56#include "display/intel_gmbus.h"
df0566a6
JN
57#include "display/intel_hotplug.h"
58#include "display/intel_overlay.h"
59#include "display/intel_pipe_crc.h"
60#include "display/intel_sprite.h"
379bc100 61
10be98a7 62#include "gem/i915_gem_context.h"
afa13085 63#include "gem/i915_gem_ioctls.h"
750e76b4 64#include "gt/intel_engine_user.h"
24635c51 65#include "gt/intel_gt.h"
79ffac85 66#include "gt/intel_gt_pm.h"
112ed2d3 67#include "gt/intel_reset.h"
79ffac85 68#include "gt/intel_workarounds.h"
0f261b24 69#include "gt/uc/intel_uc.h"
112ed2d3 70
2126d3e9 71#include "i915_debugfs.h"
0673ad47 72#include "i915_drv.h"
440e2b3d 73#include "i915_irq.h"
b46a33e2 74#include "i915_pmu.h"
a446ae2c 75#include "i915_query.h"
331c201a 76#include "i915_trace.h"
0673ad47 77#include "i915_vgpu.h"
174594db 78#include "intel_csr.h"
0673ad47 79#include "intel_drv.h"
696173b0 80#include "intel_pm.h"
79e53945 81
112b715e
KH
82static struct drm_driver driver;
83
fae919f0 84#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
f2db53f1 85static unsigned int i915_probe_fail_count;
0673ad47 86
50d84418
MW
87int __i915_inject_load_error(struct drm_i915_private *i915, int err,
88 const char *func, int line)
0673ad47 89{
f2db53f1 90 if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
50d84418 91 return 0;
0673ad47 92
50d84418
MW
93 if (++i915_probe_fail_count < i915_modparams.inject_load_failure)
94 return 0;
0673ad47 95
50d84418
MW
96 __i915_printk(i915, KERN_INFO,
97 "Injecting failure %d at checkpoint %u [%s:%d]\n",
98 err, i915_modparams.inject_load_failure, func, line);
99 i915_modparams.inject_load_failure = 0;
100 return err;
0673ad47 101}
51c18bf7
CW
102
103bool i915_error_injected(void)
104{
f2db53f1 105 return i915_probe_fail_count && !i915_modparams.inject_load_failure;
51c18bf7
CW
106}
107
fae919f0 108#endif
0673ad47
CW
109
110#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
111#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
112 "providing the dmesg log by booting with drm.debug=0xf"
113
114void
115__i915_printk(struct drm_i915_private *dev_priv, const char *level,
116 const char *fmt, ...)
117{
118 static bool shown_bug_once;
c49d13ee 119 struct device *kdev = dev_priv->drm.dev;
0673ad47
CW
120 bool is_error = level[1] <= KERN_ERR[1];
121 bool is_debug = level[1] == KERN_DEBUG[1];
122 struct va_format vaf;
123 va_list args;
124
125 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
126 return;
127
128 va_start(args, fmt);
129
130 vaf.fmt = fmt;
131 vaf.va = &args;
132
8cff1f4a
CW
133 if (is_error)
134 dev_printk(level, kdev, "%pV", &vaf);
135 else
136 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
137 __builtin_return_address(0), &vaf);
138
139 va_end(args);
0673ad47
CW
140
141 if (is_error && !shown_bug_once) {
4e8507ba
CW
142 /*
143 * Ask the user to file a bug report for the error, except
144 * if they may have caused the bug by fiddling with unsafe
145 * module parameters.
146 */
147 if (!test_taint(TAINT_USER))
148 dev_notice(kdev, "%s", FDO_BUG_MSG);
0673ad47
CW
149 shown_bug_once = true;
150 }
0673ad47
CW
151}
152
da6c10c2
JN
153/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
154static enum intel_pch
155intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
156{
157 switch (id) {
158 case INTEL_PCH_IBX_DEVICE_ID_TYPE:
159 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
cf819eff 160 WARN_ON(!IS_GEN(dev_priv, 5));
da6c10c2
JN
161 return PCH_IBX;
162 case INTEL_PCH_CPT_DEVICE_ID_TYPE:
163 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
cf819eff 164 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
da6c10c2
JN
165 return PCH_CPT;
166 case INTEL_PCH_PPT_DEVICE_ID_TYPE:
167 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
cf819eff 168 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
da6c10c2
JN
169 /* PantherPoint is CPT compatible */
170 return PCH_CPT;
171 case INTEL_PCH_LPT_DEVICE_ID_TYPE:
172 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
173 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
174 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
175 return PCH_LPT;
176 case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
177 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
178 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
179 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
180 return PCH_LPT;
181 case INTEL_PCH_WPT_DEVICE_ID_TYPE:
182 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
183 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
184 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
185 /* WildcatPoint is LPT compatible */
186 return PCH_LPT;
187 case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
188 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
189 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
190 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
191 /* WildcatPoint is LPT compatible */
192 return PCH_LPT;
193 case INTEL_PCH_SPT_DEVICE_ID_TYPE:
194 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
195 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
196 return PCH_SPT;
197 case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
198 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
199 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
200 return PCH_SPT;
201 case INTEL_PCH_KBP_DEVICE_ID_TYPE:
202 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
203 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
204 !IS_COFFEELAKE(dev_priv));
9ab91a30
VS
205 /* KBP is SPT compatible */
206 return PCH_SPT;
da6c10c2
JN
207 case INTEL_PCH_CNP_DEVICE_ID_TYPE:
208 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
209 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
210 return PCH_CNP;
211 case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
212 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
213 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
214 return PCH_CNP;
729ae330
AS
215 case INTEL_PCH_CMP_DEVICE_ID_TYPE:
216 DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
217 WARN_ON(!IS_COFFEELAKE(dev_priv));
218 /* CometPoint is CNP Compatible */
219 return PCH_CNP;
da6c10c2
JN
220 case INTEL_PCH_ICP_DEVICE_ID_TYPE:
221 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
222 WARN_ON(!IS_ICELAKE(dev_priv));
223 return PCH_ICP;
c6f7acb8 224 case INTEL_PCH_MCC_DEVICE_ID_TYPE:
fc25441c 225 case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
c6f7acb8
MR
226 DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
227 WARN_ON(!IS_ELKHARTLAKE(dev_priv));
228 return PCH_MCC;
7f028892
RS
229 case INTEL_PCH_TGP_DEVICE_ID_TYPE:
230 DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
231 WARN_ON(!IS_TIGERLAKE(dev_priv));
232 return PCH_TGP;
da6c10c2
JN
233 default:
234 return PCH_NONE;
235 }
236}
0673ad47 237
435ad2c0
JN
238static bool intel_is_virt_pch(unsigned short id,
239 unsigned short svendor, unsigned short sdevice)
240{
241 return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
242 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
243 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
244 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
245 sdevice == PCI_SUBDEVICE_ID_QEMU));
246}
247
40ace64b
JN
248static unsigned short
249intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
0673ad47 250{
40ace64b 251 unsigned short id = 0;
0673ad47
CW
252
253 /*
254 * In a virtualized passthrough environment we can be in a
255 * setup where the ISA bridge is not able to be passed through.
256 * In this case, a south bridge can be emulated and we have to
257 * make an educated guess as to which PCH is really there.
258 */
259
d8df6bec
MK
260 if (IS_TIGERLAKE(dev_priv))
261 id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
262 else if (IS_ELKHARTLAKE(dev_priv))
c6f7acb8
MR
263 id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
264 else if (IS_ICELAKE(dev_priv))
993298af
RV
265 id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
266 else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
267 id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
268 else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
269 id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
40ace64b
JN
270 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
271 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
272 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
273 id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
993298af
RV
274 else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
275 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
276 else if (IS_GEN(dev_priv, 5))
277 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
40ace64b
JN
278
279 if (id)
280 DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
281 else
282 DRM_DEBUG_KMS("Assuming no PCH\n");
283
284 return id;
0673ad47
CW
285}
286
da5f53bf 287static void intel_detect_pch(struct drm_i915_private *dev_priv)
0673ad47 288{
0673ad47
CW
289 struct pci_dev *pch = NULL;
290
0673ad47
CW
291 /*
292 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
293 * make graphics device passthrough work easy for VMM, that only
294 * need to expose ISA bridge to let driver know the real hardware
295 * underneath. This is a requirement from virtualization team.
296 *
297 * In some virtualized environments (e.g. XEN), there is irrelevant
298 * ISA bridge in the system. To work reliably, we should scan trhough
299 * all the ISA bridge devices and check for the first match, instead
300 * of only checking the first one.
301 */
302 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
d67c0ac1 303 unsigned short id;
da6c10c2 304 enum intel_pch pch_type;
d67c0ac1
JN
305
306 if (pch->vendor != PCI_VENDOR_ID_INTEL)
307 continue;
308
309 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
310
da6c10c2
JN
311 pch_type = intel_pch_type(dev_priv, id);
312 if (pch_type != PCH_NONE) {
313 dev_priv->pch_type = pch_type;
40ace64b
JN
314 dev_priv->pch_id = id;
315 break;
435ad2c0 316 } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
40ace64b
JN
317 pch->subsystem_device)) {
318 id = intel_virt_detect_pch(dev_priv);
85b17e6e
JN
319 pch_type = intel_pch_type(dev_priv, id);
320
321 /* Sanity check virtual PCH id */
322 if (WARN_ON(id && pch_type == PCH_NONE))
323 id = 0;
324
40ace64b
JN
325 dev_priv->pch_type = pch_type;
326 dev_priv->pch_id = id;
327 break;
0673ad47
CW
328 }
329 }
07ba0a82
JN
330
331 /*
332 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
333 * display.
334 */
e1bf094b 335 if (pch && !HAS_DISPLAY(dev_priv)) {
07ba0a82
JN
336 DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
337 dev_priv->pch_type = PCH_NOP;
338 dev_priv->pch_id = 0;
339 }
340
0673ad47
CW
341 if (!pch)
342 DRM_DEBUG_KMS("No PCH found.\n");
343
344 pci_dev_put(pch);
345}
346
6a20fe7b
VS
347static int i915_getparam_ioctl(struct drm_device *dev, void *data,
348 struct drm_file *file_priv)
0673ad47 349{
fac5e23e 350 struct drm_i915_private *dev_priv = to_i915(dev);
52a05c30 351 struct pci_dev *pdev = dev_priv->drm.pdev;
bd41ca49 352 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
0673ad47 353 drm_i915_getparam_t *param = data;
a10f361d 354 int value;
0673ad47
CW
355
356 switch (param->param) {
357 case I915_PARAM_IRQ_ACTIVE:
358 case I915_PARAM_ALLOW_BATCHBUFFER:
359 case I915_PARAM_LAST_DISPATCH:
ef0f411f 360 case I915_PARAM_HAS_EXEC_CONSTANTS:
0673ad47
CW
361 /* Reject all old ums/dri params. */
362 return -ENODEV;
363 case I915_PARAM_CHIPSET_ID:
52a05c30 364 value = pdev->device;
0673ad47
CW
365 break;
366 case I915_PARAM_REVISION:
52a05c30 367 value = pdev->revision;
0673ad47 368 break;
0673ad47 369 case I915_PARAM_NUM_FENCES_AVAIL:
0cf289bd 370 value = dev_priv->ggtt.num_fences;
0673ad47
CW
371 break;
372 case I915_PARAM_HAS_OVERLAY:
373 value = dev_priv->overlay ? 1 : 0;
374 break;
0673ad47 375 case I915_PARAM_HAS_BSD:
8a68d464 376 value = !!dev_priv->engine[VCS0];
0673ad47
CW
377 break;
378 case I915_PARAM_HAS_BLT:
8a68d464 379 value = !!dev_priv->engine[BCS0];
0673ad47
CW
380 break;
381 case I915_PARAM_HAS_VEBOX:
8a68d464 382 value = !!dev_priv->engine[VECS0];
0673ad47
CW
383 break;
384 case I915_PARAM_HAS_BSD2:
8a68d464 385 value = !!dev_priv->engine[VCS1];
0673ad47 386 break;
0673ad47 387 case I915_PARAM_HAS_LLC:
16162470 388 value = HAS_LLC(dev_priv);
0673ad47
CW
389 break;
390 case I915_PARAM_HAS_WT:
16162470 391 value = HAS_WT(dev_priv);
0673ad47
CW
392 break;
393 case I915_PARAM_HAS_ALIASING_PPGTT:
51d623b6 394 value = INTEL_PPGTT(dev_priv);
0673ad47
CW
395 break;
396 case I915_PARAM_HAS_SEMAPHORES:
e8861964 397 value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
0673ad47 398 break;
0673ad47
CW
399 case I915_PARAM_HAS_SECURE_BATCHES:
400 value = capable(CAP_SYS_ADMIN);
401 break;
0673ad47
CW
402 case I915_PARAM_CMD_PARSER_VERSION:
403 value = i915_cmd_parser_get_version(dev_priv);
404 break;
0673ad47 405 case I915_PARAM_SUBSLICE_TOTAL:
0040fd19 406 value = intel_sseu_subslice_total(sseu);
0673ad47
CW
407 if (!value)
408 return -ENODEV;
409 break;
410 case I915_PARAM_EU_TOTAL:
bd41ca49 411 value = sseu->eu_total;
0673ad47
CW
412 if (!value)
413 return -ENODEV;
414 break;
415 case I915_PARAM_HAS_GPU_RESET:
4f044a88
MW
416 value = i915_modparams.enable_hangcheck &&
417 intel_has_gpu_reset(dev_priv);
142bc7d9
MT
418 if (value && intel_has_reset_engine(dev_priv))
419 value = 2;
0673ad47
CW
420 break;
421 case I915_PARAM_HAS_RESOURCE_STREAMER:
08e3e21a 422 value = 0;
0673ad47 423 break;
37f501af 424 case I915_PARAM_HAS_POOLED_EU:
16162470 425 value = HAS_POOLED_EU(dev_priv);
37f501af 426 break;
427 case I915_PARAM_MIN_EU_IN_POOL:
bd41ca49 428 value = sseu->min_eu_in_pool;
37f501af 429 break;
5464cd65 430 case I915_PARAM_HUC_STATUS:
8b5689d7 431 value = intel_huc_check_status(&dev_priv->gt.uc.huc);
fa265275
MW
432 if (value < 0)
433 return value;
5464cd65 434 break;
4cc69075
CW
435 case I915_PARAM_MMAP_GTT_VERSION:
436 /* Though we've started our numbering from 1, and so class all
437 * earlier versions as 0, in effect their value is undefined as
438 * the ioctl will report EINVAL for the unknown param!
439 */
440 value = i915_gem_mmap_gtt_version();
441 break;
0de9136d 442 case I915_PARAM_HAS_SCHEDULER:
3fed1808 443 value = dev_priv->caps.scheduler;
0de9136d 444 break;
beecec90 445
16162470
DW
446 case I915_PARAM_MMAP_VERSION:
447 /* Remember to bump this if the version changes! */
448 case I915_PARAM_HAS_GEM:
449 case I915_PARAM_HAS_PAGEFLIPPING:
450 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
451 case I915_PARAM_HAS_RELAXED_FENCING:
452 case I915_PARAM_HAS_COHERENT_RINGS:
453 case I915_PARAM_HAS_RELAXED_DELTA:
454 case I915_PARAM_HAS_GEN7_SOL_RESET:
455 case I915_PARAM_HAS_WAIT_TIMEOUT:
456 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
457 case I915_PARAM_HAS_PINNED_BATCHES:
458 case I915_PARAM_HAS_EXEC_NO_RELOC:
459 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
460 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
461 case I915_PARAM_HAS_EXEC_SOFTPIN:
77ae9957 462 case I915_PARAM_HAS_EXEC_ASYNC:
fec0445c 463 case I915_PARAM_HAS_EXEC_FENCE:
b0fd47ad 464 case I915_PARAM_HAS_EXEC_CAPTURE:
1a71cf2f 465 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
cf6e7bac 466 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
a88b6e4c 467 case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
16162470
DW
468 /* For the time being all of these are always true;
469 * if some supported hardware does not have one of these
470 * features this value needs to be provided from
471 * INTEL_INFO(), a feature macro, or similar.
472 */
473 value = 1;
474 break;
d2b4b979
CW
475 case I915_PARAM_HAS_CONTEXT_ISOLATION:
476 value = intel_engines_has_context_isolation(dev_priv);
477 break;
7fed555c 478 case I915_PARAM_SLICE_MASK:
bd41ca49 479 value = sseu->slice_mask;
7fed555c
RB
480 if (!value)
481 return -ENODEV;
482 break;
f5320233 483 case I915_PARAM_SUBSLICE_MASK:
a10f361d 484 value = sseu->subslice_mask[0];
f5320233
RB
485 if (!value)
486 return -ENODEV;
487 break;
dab91783 488 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
0258404f 489 value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
dab91783 490 break;
900ccf30
CW
491 case I915_PARAM_MMAP_GTT_COHERENT:
492 value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
493 break;
0673ad47
CW
494 default:
495 DRM_DEBUG("Unknown parameter %d\n", param->param);
496 return -EINVAL;
497 }
498
dda33009 499 if (put_user(value, param->value))
0673ad47 500 return -EFAULT;
0673ad47
CW
501
502 return 0;
503}
504
da5f53bf 505static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
0673ad47 506{
57b29646
SK
507 int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
508
509 dev_priv->bridge_dev =
510 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
0673ad47
CW
511 if (!dev_priv->bridge_dev) {
512 DRM_ERROR("bridge device not found\n");
513 return -1;
514 }
515 return 0;
516}
517
518/* Allocate space for the MCH regs if needed, return nonzero on error */
519static int
da5f53bf 520intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
0673ad47 521{
514e1d64 522 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
0673ad47
CW
523 u32 temp_lo, temp_hi = 0;
524 u64 mchbar_addr;
525 int ret;
526
514e1d64 527 if (INTEL_GEN(dev_priv) >= 4)
0673ad47
CW
528 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
529 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
530 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
531
532 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
533#ifdef CONFIG_PNP
534 if (mchbar_addr &&
535 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
536 return 0;
537#endif
538
539 /* Get some space for it */
540 dev_priv->mch_res.name = "i915 MCHBAR";
541 dev_priv->mch_res.flags = IORESOURCE_MEM;
542 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
543 &dev_priv->mch_res,
544 MCHBAR_SIZE, MCHBAR_SIZE,
545 PCIBIOS_MIN_MEM,
546 0, pcibios_align_resource,
547 dev_priv->bridge_dev);
548 if (ret) {
549 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
550 dev_priv->mch_res.start = 0;
551 return ret;
552 }
553
514e1d64 554 if (INTEL_GEN(dev_priv) >= 4)
0673ad47
CW
555 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
556 upper_32_bits(dev_priv->mch_res.start));
557
558 pci_write_config_dword(dev_priv->bridge_dev, reg,
559 lower_32_bits(dev_priv->mch_res.start));
560 return 0;
561}
562
563/* Setup MCHBAR if possible, return true if we should disable it again */
564static void
da5f53bf 565intel_setup_mchbar(struct drm_i915_private *dev_priv)
0673ad47 566{
514e1d64 567 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
0673ad47
CW
568 u32 temp;
569 bool enabled;
570
920a14b2 571 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
0673ad47
CW
572 return;
573
574 dev_priv->mchbar_need_disable = false;
575
50a0bc90 576 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
0673ad47
CW
577 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
578 enabled = !!(temp & DEVEN_MCHBAR_EN);
579 } else {
580 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
581 enabled = temp & 1;
582 }
583
584 /* If it's already enabled, don't have to do anything */
585 if (enabled)
586 return;
587
da5f53bf 588 if (intel_alloc_mchbar_resource(dev_priv))
0673ad47
CW
589 return;
590
591 dev_priv->mchbar_need_disable = true;
592
593 /* Space is allocated or reserved, so enable it. */
50a0bc90 594 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
0673ad47
CW
595 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
596 temp | DEVEN_MCHBAR_EN);
597 } else {
598 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
599 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
600 }
601}
602
603static void
da5f53bf 604intel_teardown_mchbar(struct drm_i915_private *dev_priv)
0673ad47 605{
514e1d64 606 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
0673ad47
CW
607
608 if (dev_priv->mchbar_need_disable) {
50a0bc90 609 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
0673ad47
CW
610 u32 deven_val;
611
612 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
613 &deven_val);
614 deven_val &= ~DEVEN_MCHBAR_EN;
615 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
616 deven_val);
617 } else {
618 u32 mchbar_val;
619
620 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
621 &mchbar_val);
622 mchbar_val &= ~1;
623 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
624 mchbar_val);
625 }
626 }
627
628 if (dev_priv->mch_res.start)
629 release_resource(&dev_priv->mch_res);
630}
631
632/* true = enable decode, false = disable decoder */
633static unsigned int i915_vga_set_decode(void *cookie, bool state)
634{
da5f53bf 635 struct drm_i915_private *dev_priv = cookie;
0673ad47 636
da5f53bf 637 intel_modeset_vga_set_state(dev_priv, state);
0673ad47
CW
638 if (state)
639 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
640 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
641 else
642 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
643}
644
361f9dc2
CW
645static int i915_resume_switcheroo(struct drm_i915_private *i915);
646static int i915_suspend_switcheroo(struct drm_i915_private *i915,
647 pm_message_t state);
7f26cb88 648
0673ad47
CW
649static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
650{
361f9dc2 651 struct drm_i915_private *i915 = pdev_to_i915(pdev);
0673ad47
CW
652 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
653
361f9dc2
CW
654 if (!i915) {
655 dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
656 return;
657 }
658
0673ad47
CW
659 if (state == VGA_SWITCHEROO_ON) {
660 pr_info("switched on\n");
361f9dc2 661 i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
0673ad47 662 /* i915 resume handler doesn't set to D0 */
52a05c30 663 pci_set_power_state(pdev, PCI_D0);
361f9dc2
CW
664 i915_resume_switcheroo(i915);
665 i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
0673ad47
CW
666 } else {
667 pr_info("switched off\n");
361f9dc2
CW
668 i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
669 i915_suspend_switcheroo(i915, pmm);
670 i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
0673ad47
CW
671 }
672}
673
674static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
675{
361f9dc2 676 struct drm_i915_private *i915 = pdev_to_i915(pdev);
0673ad47
CW
677
678 /*
679 * FIXME: open_count is protected by drm_global_mutex but that would lead to
680 * locking inversion with the driver load path. And the access here is
681 * completely racy anyway. So don't bother with locking for now.
682 */
361f9dc2 683 return i915 && i915->drm.open_count == 0;
0673ad47
CW
684}
685
686static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
687 .set_gpu_state = i915_switcheroo_set_state,
688 .reprobe = NULL,
689 .can_switch = i915_switcheroo_can_switch,
690};
691
0b61b8b0 692static int i915_driver_modeset_probe(struct drm_device *dev)
0673ad47 693{
fac5e23e 694 struct drm_i915_private *dev_priv = to_i915(dev);
52a05c30 695 struct pci_dev *pdev = dev_priv->drm.pdev;
0673ad47
CW
696 int ret;
697
50d84418 698 if (i915_inject_probe_failure(dev_priv))
0673ad47
CW
699 return -ENODEV;
700
e1bf094b 701 if (HAS_DISPLAY(dev_priv)) {
8d3bf1a3
JRS
702 ret = drm_vblank_init(&dev_priv->drm,
703 INTEL_INFO(dev_priv)->num_pipes);
704 if (ret)
705 goto out;
706 }
707
66578857 708 intel_bios_init(dev_priv);
0673ad47
CW
709
710 /* If we have > 1 VGA cards, then we need to arbitrate access
711 * to the common VGA resources.
712 *
713 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
714 * then we do not take part in VGA arbitration and the
715 * vga_client_register() fails with -ENODEV.
716 */
da5f53bf 717 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
0673ad47
CW
718 if (ret && ret != -ENODEV)
719 goto out;
720
721 intel_register_dsm_handler();
722
52a05c30 723 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
0673ad47
CW
724 if (ret)
725 goto cleanup_vga_client;
726
727 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
728 intel_update_rawclk(dev_priv);
729
730 intel_power_domains_init_hw(dev_priv, false);
731
732 intel_csr_ucode_init(dev_priv);
733
734 ret = intel_irq_install(dev_priv);
735 if (ret)
736 goto cleanup_csr;
737
3ce2ea65 738 intel_gmbus_setup(dev_priv);
0673ad47
CW
739
740 /* Important: The output setup functions called by modeset_init need
741 * working irqs for e.g. gmbus and dp aux transfers. */
b079bd17
VS
742 ret = intel_modeset_init(dev);
743 if (ret)
744 goto cleanup_irq;
0673ad47 745
bf9e8429 746 ret = i915_gem_init(dev_priv);
0673ad47 747 if (ret)
73bad7ca 748 goto cleanup_modeset;
0673ad47 749
58db08a7 750 intel_overlay_setup(dev_priv);
0673ad47 751
e1bf094b 752 if (!HAS_DISPLAY(dev_priv))
0673ad47
CW
753 return 0;
754
755 ret = intel_fbdev_init(dev);
756 if (ret)
757 goto cleanup_gem;
758
759 /* Only enable hotplug handling once the fbdev is fully set up. */
760 intel_hpd_init(dev_priv);
761
a8147d0c
JRS
762 intel_init_ipc(dev_priv);
763
0673ad47
CW
764 return 0;
765
766cleanup_gem:
5861b013 767 i915_gem_suspend(dev_priv);
78dae1ac 768 i915_gem_driver_remove(dev_priv);
3b58a945 769 i915_gem_driver_release(dev_priv);
73bad7ca 770cleanup_modeset:
78dae1ac 771 intel_modeset_driver_remove(dev);
0673ad47 772cleanup_irq:
b318b824 773 intel_irq_uninstall(dev_priv);
3ce2ea65 774 intel_gmbus_teardown(dev_priv);
0673ad47
CW
775cleanup_csr:
776 intel_csr_ucode_fini(dev_priv);
78dae1ac 777 intel_power_domains_driver_remove(dev_priv);
52a05c30 778 vga_switcheroo_unregister_client(pdev);
0673ad47 779cleanup_vga_client:
52a05c30 780 vga_client_register(pdev, NULL, NULL, NULL);
0673ad47
CW
781out:
782 return ret;
783}
784
0673ad47
CW
785static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
786{
787 struct apertures_struct *ap;
91c8a326 788 struct pci_dev *pdev = dev_priv->drm.pdev;
0673ad47
CW
789 struct i915_ggtt *ggtt = &dev_priv->ggtt;
790 bool primary;
791 int ret;
792
793 ap = alloc_apertures(1);
794 if (!ap)
795 return -ENOMEM;
796
73ebd503 797 ap->ranges[0].base = ggtt->gmadr.start;
0673ad47
CW
798 ap->ranges[0].size = ggtt->mappable_end;
799
800 primary =
801 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
802
44adece5 803 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
0673ad47
CW
804
805 kfree(ap);
806
807 return ret;
808}
0673ad47 809
0673ad47
CW
810static void intel_init_dpio(struct drm_i915_private *dev_priv)
811{
812 /*
813 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
814 * CHV x1 PHY (DP/HDMI D)
815 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
816 */
817 if (IS_CHERRYVIEW(dev_priv)) {
818 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
819 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
820 } else if (IS_VALLEYVIEW(dev_priv)) {
821 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
822 }
823}
824
825static int i915_workqueues_init(struct drm_i915_private *dev_priv)
826{
827 /*
828 * The i915 workqueue is primarily used for batched retirement of
829 * requests (and thus managing bo) once the task has been completed
e61e0f51 830 * by the GPU. i915_retire_requests() is called directly when we
0673ad47
CW
831 * need high-priority retirement, such as waiting for an explicit
832 * bo.
833 *
834 * It is also used for periodic low-priority events, such as
835 * idle-timers and recording error state.
836 *
837 * All tasks on the workqueue are expected to acquire the dev mutex
838 * so there is no point in running more than one instance of the
839 * workqueue at any time. Use an ordered one.
840 */
841 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
842 if (dev_priv->wq == NULL)
843 goto out_err;
844
845 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
846 if (dev_priv->hotplug.dp_wq == NULL)
847 goto out_free_wq;
848
0673ad47
CW
849 return 0;
850
0673ad47
CW
851out_free_wq:
852 destroy_workqueue(dev_priv->wq);
853out_err:
854 DRM_ERROR("Failed to allocate workqueues.\n");
855
856 return -ENOMEM;
857}
858
859static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
860{
0673ad47
CW
861 destroy_workqueue(dev_priv->hotplug.dp_wq);
862 destroy_workqueue(dev_priv->wq);
863}
864
4fc7e845
PZ
865/*
866 * We don't keep the workarounds for pre-production hardware, so we expect our
867 * driver to fail on these machines in one way or another. A little warning on
868 * dmesg may help both the user and the bug triagers.
6a7a6a98
CW
869 *
870 * Our policy for removing pre-production workarounds is to keep the
871 * current gen workarounds as a guide to the bring-up of the next gen
872 * (workarounds have a habit of persisting!). Anything older than that
873 * should be removed along with the complications they introduce.
4fc7e845
PZ
874 */
875static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
876{
248a124d
CW
877 bool pre = false;
878
879 pre |= IS_HSW_EARLY_SDV(dev_priv);
880 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
0102ba1f 881 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
1aca96cc 882 pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
248a124d 883
7c5ff4a2 884 if (pre) {
4fc7e845
PZ
885 DRM_ERROR("This is a pre-production stepping. "
886 "It may not be fully functional.\n");
7c5ff4a2
CW
887 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
888 }
4fc7e845
PZ
889}
890
0673ad47 891/**
0b61b8b0 892 * i915_driver_early_probe - setup state not requiring device access
0673ad47
CW
893 * @dev_priv: device private
894 *
895 * Initialize everything that is a "SW-only" state, that is state not
896 * requiring accessing the device or exposing the driver via kernel internal
897 * or userspace interfaces. Example steps belonging here: lock initialization,
898 * system memory allocation, setting up device specific attributes and
899 * function hooks not requiring accessing the device.
900 */
0b61b8b0 901static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
0673ad47 902{
0673ad47
CW
903 int ret = 0;
904
50d84418 905 if (i915_inject_probe_failure(dev_priv))
0673ad47
CW
906 return -ENODEV;
907
805446c8
TU
908 intel_device_info_subplatform_init(dev_priv);
909
01385758 910 intel_uncore_init_early(&dev_priv->uncore, dev_priv);
6cbe8830 911
0673ad47
CW
912 spin_lock_init(&dev_priv->irq_lock);
913 spin_lock_init(&dev_priv->gpu_error.lock);
914 mutex_init(&dev_priv->backlight_lock);
317eaa95 915
0673ad47 916 mutex_init(&dev_priv->sb_lock);
a75d035f
CW
917 pm_qos_add_request(&dev_priv->sb_qos,
918 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
919
0673ad47
CW
920 mutex_init(&dev_priv->av_mutex);
921 mutex_init(&dev_priv->wm.wm_mutex);
922 mutex_init(&dev_priv->pps_mutex);
9055aac7 923 mutex_init(&dev_priv->hdcp_comp_mutex);
0673ad47 924
0b1de5d5 925 i915_memcpy_init_early(dev_priv);
69c66355 926 intel_runtime_pm_init_early(&dev_priv->runtime_pm);
0b1de5d5 927
0673ad47
CW
928 ret = i915_workqueues_init(dev_priv);
929 if (ret < 0)
f3bcb0cc 930 return ret;
0673ad47 931
6f76098f
DCS
932 intel_wopcm_init_early(&dev_priv->wopcm);
933
724e9564 934 intel_gt_init_early(&dev_priv->gt, dev_priv);
24635c51 935
a0de908d
MW
936 ret = i915_gem_init_early(dev_priv);
937 if (ret < 0)
938 goto err_workqueues;
939
0673ad47 940 /* This must be called before any calls to HAS_PCH_* */
da5f53bf 941 intel_detect_pch(dev_priv);
0673ad47 942
192aa181 943 intel_pm_setup(dev_priv);
0673ad47 944 intel_init_dpio(dev_priv);
f28ec6f4
ID
945 ret = intel_power_domains_init(dev_priv);
946 if (ret < 0)
6f76098f 947 goto err_gem;
0673ad47
CW
948 intel_irq_init(dev_priv);
949 intel_init_display_hooks(dev_priv);
950 intel_init_clock_gating_hooks(dev_priv);
951 intel_init_audio_hooks(dev_priv);
36cdd013 952 intel_display_crc_init(dev_priv);
0673ad47 953
4fc7e845 954 intel_detect_preproduction_hw(dev_priv);
0673ad47
CW
955
956 return 0;
957
6f76098f 958err_gem:
f28ec6f4 959 i915_gem_cleanup_early(dev_priv);
a0de908d 960err_workqueues:
6cf72db6 961 intel_gt_driver_late_release(&dev_priv->gt);
0673ad47
CW
962 i915_workqueues_cleanup(dev_priv);
963 return ret;
964}
965
966/**
3b58a945 967 * i915_driver_late_release - cleanup the setup done in
0b61b8b0 968 * i915_driver_early_probe()
0673ad47
CW
969 * @dev_priv: device private
970 */
3b58a945 971static void i915_driver_late_release(struct drm_i915_private *dev_priv)
0673ad47 972{
cefcff8f 973 intel_irq_fini(dev_priv);
f28ec6f4 974 intel_power_domains_cleanup(dev_priv);
a0de908d 975 i915_gem_cleanup_early(dev_priv);
6cf72db6 976 intel_gt_driver_late_release(&dev_priv->gt);
0673ad47 977 i915_workqueues_cleanup(dev_priv);
a75d035f
CW
978
979 pm_qos_remove_request(&dev_priv->sb_qos);
980 mutex_destroy(&dev_priv->sb_lock);
0673ad47
CW
981}
982
0673ad47 983/**
0b61b8b0 984 * i915_driver_mmio_probe - setup device MMIO
0673ad47
CW
985 * @dev_priv: device private
986 *
987 * Setup minimal device state necessary for MMIO accesses later in the
988 * initialization sequence. The setup here should avoid any other device-wide
989 * side effects or exposing the driver via kernel internal or user space
990 * interfaces.
991 */
0b61b8b0 992static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
0673ad47 993{
0673ad47
CW
994 int ret;
995
50d84418 996 if (i915_inject_probe_failure(dev_priv))
0673ad47
CW
997 return -ENODEV;
998
da5f53bf 999 if (i915_get_bridge_dev(dev_priv))
0673ad47
CW
1000 return -EIO;
1001
3de6f852 1002 ret = intel_uncore_init_mmio(&dev_priv->uncore);
0673ad47 1003 if (ret < 0)
63ffbcda 1004 goto err_bridge;
0673ad47 1005
25286aac
DCS
1006 /* Try to make sure MCHBAR is enabled before poking at it */
1007 intel_setup_mchbar(dev_priv);
63ffbcda 1008
26376a7e
OM
1009 intel_device_info_init_mmio(dev_priv);
1010
3de6f852 1011 intel_uncore_prune_mmio_domains(&dev_priv->uncore);
26376a7e 1012
ca7b2c1b 1013 intel_uc_init_mmio(&dev_priv->gt.uc);
1fc556fa 1014
63ffbcda
JL
1015 ret = intel_engines_init_mmio(dev_priv);
1016 if (ret)
1017 goto err_uncore;
1018
24145517 1019 i915_gem_init_mmio(dev_priv);
0673ad47
CW
1020
1021 return 0;
1022
63ffbcda 1023err_uncore:
25286aac 1024 intel_teardown_mchbar(dev_priv);
3de6f852 1025 intel_uncore_fini_mmio(&dev_priv->uncore);
63ffbcda 1026err_bridge:
0673ad47
CW
1027 pci_dev_put(dev_priv->bridge_dev);
1028
1029 return ret;
1030}
1031
1032/**
0b61b8b0 1033 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
0673ad47
CW
1034 * @dev_priv: device private
1035 */
3b58a945 1036static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
0673ad47 1037{
f3bcb0cc 1038 intel_engines_cleanup(dev_priv);
25286aac 1039 intel_teardown_mchbar(dev_priv);
3de6f852 1040 intel_uncore_fini_mmio(&dev_priv->uncore);
0673ad47
CW
1041 pci_dev_put(dev_priv->bridge_dev);
1042}
1043
94b4f3ba
CW
1044static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1045{
67b7f33e 1046 intel_gvt_sanitize_options(dev_priv);
94b4f3ba
CW
1047}
1048
b185a352
VS
1049#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
1050
1051static const char *intel_dram_type_str(enum intel_dram_type type)
1052{
1053 static const char * const str[] = {
1054 DRAM_TYPE_STR(UNKNOWN),
1055 DRAM_TYPE_STR(DDR3),
1056 DRAM_TYPE_STR(DDR4),
1057 DRAM_TYPE_STR(LPDDR3),
1058 DRAM_TYPE_STR(LPDDR4),
1059 };
1060
1061 if (type >= ARRAY_SIZE(str))
1062 type = INTEL_DRAM_UNKNOWN;
1063
1064 return str[type];
1065}
1066
1067#undef DRAM_TYPE_STR
1068
54561b23
VS
1069static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
1070{
1071 return dimm->ranks * 64 / (dimm->width ?: 1);
1072}
1073
ea411e6b
VS
1074/* Returns total GB for the whole DIMM */
1075static int skl_get_dimm_size(u16 val)
5771caf8 1076{
ea411e6b
VS
1077 return val & SKL_DRAM_SIZE_MASK;
1078}
1079
1080static int skl_get_dimm_width(u16 val)
1081{
1082 if (skl_get_dimm_size(val) == 0)
80373fb6 1083 return 0;
5771caf8 1084
ea411e6b
VS
1085 switch (val & SKL_DRAM_WIDTH_MASK) {
1086 case SKL_DRAM_WIDTH_X8:
1087 case SKL_DRAM_WIDTH_X16:
1088 case SKL_DRAM_WIDTH_X32:
1089 val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1090 return 8 << val;
1091 default:
1092 MISSING_CASE(val);
1093 return 0;
1094 }
1095}
1096
1097static int skl_get_dimm_ranks(u16 val)
1098{
1099 if (skl_get_dimm_size(val) == 0)
1100 return 0;
1101
1102 val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
1103
1104 return val + 1;
5771caf8
MK
1105}
1106
6d9c1e92
VS
1107/* Returns total GB for the whole DIMM */
1108static int cnl_get_dimm_size(u16 val)
1109{
1110 return (val & CNL_DRAM_SIZE_MASK) / 2;
1111}
1112
1113static int cnl_get_dimm_width(u16 val)
1114{
1115 if (cnl_get_dimm_size(val) == 0)
1116 return 0;
1117
1118 switch (val & CNL_DRAM_WIDTH_MASK) {
1119 case CNL_DRAM_WIDTH_X8:
1120 case CNL_DRAM_WIDTH_X16:
1121 case CNL_DRAM_WIDTH_X32:
1122 val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
1123 return 8 << val;
1124 default:
1125 MISSING_CASE(val);
1126 return 0;
1127 }
1128}
1129
1130static int cnl_get_dimm_ranks(u16 val)
1131{
1132 if (cnl_get_dimm_size(val) == 0)
1133 return 0;
1134
1135 val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
1136
1137 return val + 1;
1138}
1139
86b59287 1140static bool
54561b23 1141skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
86b59287 1142{
54561b23
VS
1143 /* Convert total GB to Gb per DRAM device */
1144 return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
86b59287
MK
1145}
1146
198b8dd9 1147static void
6d9c1e92
VS
1148skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
1149 struct dram_dimm_info *dimm,
198b8dd9 1150 int channel, char dimm_name, u16 val)
5771caf8 1151{
6d9c1e92
VS
1152 if (INTEL_GEN(dev_priv) >= 10) {
1153 dimm->size = cnl_get_dimm_size(val);
1154 dimm->width = cnl_get_dimm_width(val);
1155 dimm->ranks = cnl_get_dimm_ranks(val);
1156 } else {
1157 dimm->size = skl_get_dimm_size(val);
1158 dimm->width = skl_get_dimm_width(val);
1159 dimm->ranks = skl_get_dimm_ranks(val);
1160 }
5771caf8 1161
198b8dd9
VS
1162 DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
1163 channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
1164 yesno(skl_is_16gb_dimm(dimm)));
1165}
5771caf8 1166
198b8dd9 1167static int
6d9c1e92
VS
1168skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
1169 struct dram_channel_info *ch,
198b8dd9
VS
1170 int channel, u32 val)
1171{
6d9c1e92
VS
1172 skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
1173 channel, 'L', val & 0xffff);
1174 skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
1175 channel, 'S', val >> 16);
5771caf8 1176
1d55967d 1177 if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
198b8dd9 1178 DRM_DEBUG_KMS("CH%u not populated\n", channel);
5771caf8 1179 return -EINVAL;
198b8dd9 1180 }
80373fb6 1181
1d55967d 1182 if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
80373fb6 1183 ch->ranks = 2;
1d55967d 1184 else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
80373fb6 1185 ch->ranks = 2;
5771caf8 1186 else
80373fb6 1187 ch->ranks = 1;
5771caf8 1188
54561b23 1189 ch->is_16gb_dimm =
1d55967d
VS
1190 skl_is_16gb_dimm(&ch->dimm_l) ||
1191 skl_is_16gb_dimm(&ch->dimm_s);
86b59287 1192
198b8dd9
VS
1193 DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
1194 channel, ch->ranks, yesno(ch->is_16gb_dimm));
5771caf8
MK
1195
1196 return 0;
1197}
1198
8a6c5447 1199static bool
d75434bc
VS
1200intel_is_dram_symmetric(const struct dram_channel_info *ch0,
1201 const struct dram_channel_info *ch1)
8a6c5447 1202{
d75434bc 1203 return !memcmp(ch0, ch1, sizeof(*ch0)) &&
1d55967d
VS
1204 (ch0->dimm_s.size == 0 ||
1205 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
8a6c5447
MK
1206}
1207
5771caf8
MK
1208static int
1209skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1210{
1211 struct dram_info *dram_info = &dev_priv->dram_info;
198b8dd9 1212 struct dram_channel_info ch0 = {}, ch1 = {};
d75434bc 1213 u32 val;
5771caf8
MK
1214 int ret;
1215
d75434bc 1216 val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
6d9c1e92 1217 ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
5771caf8
MK
1218 if (ret == 0)
1219 dram_info->num_channels++;
1220
d75434bc 1221 val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
6d9c1e92 1222 ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
5771caf8
MK
1223 if (ret == 0)
1224 dram_info->num_channels++;
1225
1226 if (dram_info->num_channels == 0) {
1227 DRM_INFO("Number of memory channels is zero\n");
1228 return -EINVAL;
1229 }
1230
1231 /*
1232 * If any of the channel is single rank channel, worst case output
1233 * will be same as if single rank memory, so consider single rank
1234 * memory.
1235 */
80373fb6
VS
1236 if (ch0.ranks == 1 || ch1.ranks == 1)
1237 dram_info->ranks = 1;
5771caf8 1238 else
80373fb6 1239 dram_info->ranks = max(ch0.ranks, ch1.ranks);
5771caf8 1240
80373fb6 1241 if (dram_info->ranks == 0) {
5771caf8
MK
1242 DRM_INFO("couldn't get memory rank information\n");
1243 return -EINVAL;
1244 }
86b59287 1245
5d6f36b2 1246 dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
86b59287 1247
d75434bc 1248 dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
8a6c5447 1249
d75434bc
VS
1250 DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
1251 yesno(dram_info->symmetric_memory));
5771caf8
MK
1252 return 0;
1253}
1254
b185a352
VS
1255static enum intel_dram_type
1256skl_get_dram_type(struct drm_i915_private *dev_priv)
1257{
1258 u32 val;
1259
1260 val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
1261
1262 switch (val & SKL_DRAM_DDR_TYPE_MASK) {
1263 case SKL_DRAM_DDR_TYPE_DDR3:
1264 return INTEL_DRAM_DDR3;
1265 case SKL_DRAM_DDR_TYPE_DDR4:
1266 return INTEL_DRAM_DDR4;
1267 case SKL_DRAM_DDR_TYPE_LPDDR3:
1268 return INTEL_DRAM_LPDDR3;
1269 case SKL_DRAM_DDR_TYPE_LPDDR4:
1270 return INTEL_DRAM_LPDDR4;
1271 default:
1272 MISSING_CASE(val);
1273 return INTEL_DRAM_UNKNOWN;
1274 }
1275}
1276
5771caf8
MK
1277static int
1278skl_get_dram_info(struct drm_i915_private *dev_priv)
1279{
1280 struct dram_info *dram_info = &dev_priv->dram_info;
1281 u32 mem_freq_khz, val;
1282 int ret;
1283
b185a352
VS
1284 dram_info->type = skl_get_dram_type(dev_priv);
1285 DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
1286
5771caf8
MK
1287 ret = skl_dram_get_channels_info(dev_priv);
1288 if (ret)
1289 return ret;
1290
1291 val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
1292 mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
1293 SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1294
1295 dram_info->bandwidth_kbps = dram_info->num_channels *
1296 mem_freq_khz * 8;
1297
1298 if (dram_info->bandwidth_kbps == 0) {
1299 DRM_INFO("Couldn't get system memory bandwidth\n");
1300 return -EINVAL;
1301 }
1302
1303 dram_info->valid = true;
1304 return 0;
1305}
1306
a62819a3
VS
1307/* Returns Gb per DRAM device */
1308static int bxt_get_dimm_size(u32 val)
1309{
1310 switch (val & BXT_DRAM_SIZE_MASK) {
8860343c 1311 case BXT_DRAM_SIZE_4GBIT:
a62819a3 1312 return 4;
8860343c 1313 case BXT_DRAM_SIZE_6GBIT:
a62819a3 1314 return 6;
8860343c 1315 case BXT_DRAM_SIZE_8GBIT:
a62819a3 1316 return 8;
8860343c 1317 case BXT_DRAM_SIZE_12GBIT:
a62819a3 1318 return 12;
8860343c 1319 case BXT_DRAM_SIZE_16GBIT:
a62819a3
VS
1320 return 16;
1321 default:
1322 MISSING_CASE(val);
1323 return 0;
1324 }
1325}
1326
1327static int bxt_get_dimm_width(u32 val)
1328{
1329 if (!bxt_get_dimm_size(val))
1330 return 0;
1331
1332 val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
1333
1334 return 8 << val;
1335}
1336
1337static int bxt_get_dimm_ranks(u32 val)
1338{
1339 if (!bxt_get_dimm_size(val))
1340 return 0;
1341
1342 switch (val & BXT_DRAM_RANK_MASK) {
1343 case BXT_DRAM_RANK_SINGLE:
1344 return 1;
1345 case BXT_DRAM_RANK_DUAL:
1346 return 2;
1347 default:
1348 MISSING_CASE(val);
1349 return 0;
1350 }
1351}
1352
b185a352
VS
1353static enum intel_dram_type bxt_get_dimm_type(u32 val)
1354{
1355 if (!bxt_get_dimm_size(val))
1356 return INTEL_DRAM_UNKNOWN;
1357
1358 switch (val & BXT_DRAM_TYPE_MASK) {
1359 case BXT_DRAM_TYPE_DDR3:
1360 return INTEL_DRAM_DDR3;
1361 case BXT_DRAM_TYPE_LPDDR3:
1362 return INTEL_DRAM_LPDDR3;
1363 case BXT_DRAM_TYPE_DDR4:
1364 return INTEL_DRAM_DDR4;
1365 case BXT_DRAM_TYPE_LPDDR4:
1366 return INTEL_DRAM_LPDDR4;
1367 default:
1368 MISSING_CASE(val);
1369 return INTEL_DRAM_UNKNOWN;
1370 }
1371}
1372
a62819a3
VS
1373static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
1374 u32 val)
1375{
a62819a3
VS
1376 dimm->width = bxt_get_dimm_width(val);
1377 dimm->ranks = bxt_get_dimm_ranks(val);
8860343c
VS
1378
1379 /*
1380 * Size in register is Gb per DRAM device. Convert to total
1381 * GB to match the way we report this for non-LP platforms.
1382 */
1383 dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
a62819a3
VS
1384}
1385
cbfa59d4
MK
1386static int
1387bxt_get_dram_info(struct drm_i915_private *dev_priv)
1388{
1389 struct dram_info *dram_info = &dev_priv->dram_info;
1390 u32 dram_channels;
1391 u32 mem_freq_khz, val;
1392 u8 num_active_channels;
1393 int i;
1394
1395 val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1396 mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1397 BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1398
1399 dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1400 num_active_channels = hweight32(dram_channels);
1401
1402 /* Each active bit represents 4-byte channel */
1403 dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1404
1405 if (dram_info->bandwidth_kbps == 0) {
1406 DRM_INFO("Couldn't get system memory bandwidth\n");
1407 return -EINVAL;
1408 }
1409
1410 /*
1411 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1412 */
1413 for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
a62819a3 1414 struct dram_dimm_info dimm;
b185a352 1415 enum intel_dram_type type;
cbfa59d4
MK
1416
1417 val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1418 if (val == 0xFFFFFFFF)
1419 continue;
1420
1421 dram_info->num_channels++;
a62819a3
VS
1422
1423 bxt_get_dimm_info(&dimm, val);
b185a352
VS
1424 type = bxt_get_dimm_type(val);
1425
1426 WARN_ON(type != INTEL_DRAM_UNKNOWN &&
1427 dram_info->type != INTEL_DRAM_UNKNOWN &&
1428 dram_info->type != type);
a62819a3 1429
b185a352 1430 DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
a62819a3 1431 i - BXT_D_CR_DRP0_DUNIT_START,
b185a352
VS
1432 dimm.size, dimm.width, dimm.ranks,
1433 intel_dram_type_str(type));
cbfa59d4
MK
1434
1435 /*
1436 * If any of the channel is single rank channel,
1437 * worst case output will be same as if single rank
1438 * memory, so consider single rank memory.
1439 */
80373fb6 1440 if (dram_info->ranks == 0)
a62819a3
VS
1441 dram_info->ranks = dimm.ranks;
1442 else if (dimm.ranks == 1)
80373fb6 1443 dram_info->ranks = 1;
b185a352
VS
1444
1445 if (type != INTEL_DRAM_UNKNOWN)
1446 dram_info->type = type;
cbfa59d4
MK
1447 }
1448
b185a352
VS
1449 if (dram_info->type == INTEL_DRAM_UNKNOWN ||
1450 dram_info->ranks == 0) {
1451 DRM_INFO("couldn't get memory information\n");
cbfa59d4
MK
1452 return -EINVAL;
1453 }
1454
1455 dram_info->valid = true;
1456 return 0;
1457}
1458
1459static void
1460intel_get_dram_info(struct drm_i915_private *dev_priv)
1461{
1462 struct dram_info *dram_info = &dev_priv->dram_info;
1463 int ret;
1464
5d6f36b2
VS
1465 /*
1466 * Assume 16Gb DIMMs are present until proven otherwise.
1467 * This is only used for the level 0 watermark latency
1468 * w/a which does not apply to bxt/glk.
1469 */
1470 dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1471
331ecded 1472 if (INTEL_GEN(dev_priv) < 9)
cbfa59d4
MK
1473 return;
1474
331ecded 1475 if (IS_GEN9_LP(dev_priv))
5771caf8 1476 ret = bxt_get_dram_info(dev_priv);
5771caf8 1477 else
6d9c1e92 1478 ret = skl_get_dram_info(dev_priv);
cbfa59d4
MK
1479 if (ret)
1480 return;
1481
30a533e5
VS
1482 DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
1483 dram_info->bandwidth_kbps,
1484 dram_info->num_channels);
1485
54561b23 1486 DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
80373fb6 1487 dram_info->ranks, yesno(dram_info->is_16gb_dimm));
cbfa59d4
MK
1488}
1489
f6ac993f
DCS
1490static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
1491{
1492 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
1493 const unsigned int sets[4] = { 1, 1, 2, 2 };
1494
1495 return EDRAM_NUM_BANKS(cap) *
1496 ways[EDRAM_WAYS_IDX(cap)] *
1497 sets[EDRAM_SETS_IDX(cap)];
1498}
1499
1500static void edram_detect(struct drm_i915_private *dev_priv)
1501{
1502 u32 edram_cap = 0;
1503
1504 if (!(IS_HASWELL(dev_priv) ||
1505 IS_BROADWELL(dev_priv) ||
1506 INTEL_GEN(dev_priv) >= 9))
1507 return;
1508
1509 edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
1510
1511 /* NB: We can't write IDICR yet because we don't have gt funcs set up */
1512
1513 if (!(edram_cap & EDRAM_ENABLED))
1514 return;
1515
1516 /*
1517 * The needed capability bits for size calculation are not there with
1518 * pre gen9 so return 128MB always.
1519 */
1520 if (INTEL_GEN(dev_priv) < 9)
1521 dev_priv->edram_size_mb = 128;
1522 else
1523 dev_priv->edram_size_mb =
1524 gen9_edram_size_mb(dev_priv, edram_cap);
1525
1526 DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
1527}
1528
0673ad47 1529/**
0b61b8b0 1530 * i915_driver_hw_probe - setup state requiring device access
0673ad47
CW
1531 * @dev_priv: device private
1532 *
1533 * Setup state that requires accessing the device, but doesn't require
1534 * exposing the driver via kernel internal or userspace interfaces.
1535 */
0b61b8b0 1536static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
0673ad47 1537{
52a05c30 1538 struct pci_dev *pdev = dev_priv->drm.pdev;
0673ad47
CW
1539 int ret;
1540
50d84418 1541 if (i915_inject_probe_failure(dev_priv))
0673ad47
CW
1542 return -ENODEV;
1543
1400cc7e 1544 intel_device_info_runtime_init(dev_priv);
94b4f3ba 1545
4bdafb9d
CW
1546 if (HAS_PPGTT(dev_priv)) {
1547 if (intel_vgpu_active(dev_priv) &&
ca6ac684 1548 !intel_vgpu_has_full_ppgtt(dev_priv)) {
4bdafb9d
CW
1549 i915_report_error(dev_priv,
1550 "incompatible vGPU found, support for isolated ppGTT required\n");
1551 return -ENXIO;
1552 }
1553 }
1554
46592892
CW
1555 if (HAS_EXECLISTS(dev_priv)) {
1556 /*
1557 * Older GVT emulation depends upon intercepting CSB mmio,
1558 * which we no longer use, preferring to use the HWSP cache
1559 * instead.
1560 */
1561 if (intel_vgpu_active(dev_priv) &&
1562 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
1563 i915_report_error(dev_priv,
1564 "old vGPU host found, support for HWSP emulation required\n");
1565 return -ENXIO;
1566 }
1567 }
1568
94b4f3ba 1569 intel_sanitize_options(dev_priv);
0673ad47 1570
f6ac993f
DCS
1571 /* needs to be done before ggtt probe */
1572 edram_detect(dev_priv);
1573
9f9b2792
LL
1574 i915_perf_init(dev_priv);
1575
97d6d7ab 1576 ret = i915_ggtt_probe_hw(dev_priv);
0673ad47 1577 if (ret)
9f172f6f 1578 goto err_perf;
0673ad47 1579
9f172f6f
CW
1580 /*
1581 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1582 * otherwise the vga fbdev driver falls over.
1583 */
0673ad47
CW
1584 ret = i915_kick_out_firmware_fb(dev_priv);
1585 if (ret) {
1586 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
9f172f6f 1587 goto err_ggtt;
0673ad47
CW
1588 }
1589
c6b38fbb 1590 ret = vga_remove_vgacon(pdev);
0673ad47
CW
1591 if (ret) {
1592 DRM_ERROR("failed to remove conflicting VGA console\n");
9f172f6f 1593 goto err_ggtt;
0673ad47
CW
1594 }
1595
97d6d7ab 1596 ret = i915_ggtt_init_hw(dev_priv);
0088e522 1597 if (ret)
9f172f6f 1598 goto err_ggtt;
0088e522 1599
d8a44248
TU
1600 intel_gt_init_hw(dev_priv);
1601
97d6d7ab 1602 ret = i915_ggtt_enable_hw(dev_priv);
0088e522
CW
1603 if (ret) {
1604 DRM_ERROR("failed to enable GGTT\n");
9f172f6f 1605 goto err_ggtt;
0088e522
CW
1606 }
1607
52a05c30 1608 pci_set_master(pdev);
0673ad47
CW
1609
1610 /* overlay on gen2 is broken and can't address above 1G */
cf819eff 1611 if (IS_GEN(dev_priv, 2)) {
52a05c30 1612 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
0673ad47
CW
1613 if (ret) {
1614 DRM_ERROR("failed to set DMA mask\n");
1615
9f172f6f 1616 goto err_ggtt;
0673ad47
CW
1617 }
1618 }
1619
0673ad47
CW
1620 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1621 * using 32bit addressing, overwriting memory if HWS is located
1622 * above 4GB.
1623 *
1624 * The documentation also mentions an issue with undefined
1625 * behaviour if any general state is accessed within a page above 4GB,
1626 * which also needs to be handled carefully.
1627 */
c0f86832 1628 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
52a05c30 1629 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
0673ad47
CW
1630
1631 if (ret) {
1632 DRM_ERROR("failed to set DMA mask\n");
1633
9f172f6f 1634 goto err_ggtt;
0673ad47
CW
1635 }
1636 }
1637
0673ad47
CW
1638 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1639 PM_QOS_DEFAULT_VALUE);
1640
19e0a8d4
DCS
1641 /* BIOS often leaves RC6 enabled, but disable it for hw init */
1642 intel_sanitize_gt_powersave(dev_priv);
0673ad47 1643
25d140fa 1644 intel_gt_init_workarounds(dev_priv);
0673ad47
CW
1645
1646 /* On the 945G/GM, the chipset reports the MSI capability on the
1647 * integrated graphics even though the support isn't actually there
1648 * according to the published specs. It doesn't appear to function
1649 * correctly in testing on 945G.
1650 * This may be a side effect of MSI having been made available for PEG
1651 * and the registers being closely associated.
1652 *
1653 * According to chipset errata, on the 965GM, MSI interrupts may
e38c2da0
VS
1654 * be lost or delayed, and was defeatured. MSI interrupts seem to
1655 * get lost on g4x as well, and interrupt delivery seems to stay
1656 * properly dead afterwards. So we'll just disable them for all
1657 * pre-gen5 chipsets.
8a29c778
LDM
1658 *
1659 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1660 * interrupts even when in MSI mode. This results in spurious
1661 * interrupt warnings if the legacy irq no. is shared with another
1662 * device. The kernel then disables that interrupt source and so
1663 * prevents the other device from working properly.
0673ad47 1664 */
e38c2da0 1665 if (INTEL_GEN(dev_priv) >= 5) {
52a05c30 1666 if (pci_enable_msi(pdev) < 0)
0673ad47
CW
1667 DRM_DEBUG_DRIVER("can't enable MSI");
1668 }
1669
26f837e8
ZW
1670 ret = intel_gvt_init(dev_priv);
1671 if (ret)
7ab87ede
CW
1672 goto err_msi;
1673
1674 intel_opregion_setup(dev_priv);
cbfa59d4
MK
1675 /*
1676 * Fill the dram structure to get the system raw bandwidth and
1677 * dram info. This will be used for memory latency calculation.
1678 */
1679 intel_get_dram_info(dev_priv);
1680
c457d9cf 1681 intel_bw_init_hw(dev_priv);
26f837e8 1682
0673ad47
CW
1683 return 0;
1684
7ab87ede
CW
1685err_msi:
1686 if (pdev->msi_enabled)
1687 pci_disable_msi(pdev);
1688 pm_qos_remove_request(&dev_priv->pm_qos);
9f172f6f 1689err_ggtt:
3b58a945 1690 i915_ggtt_driver_release(dev_priv);
9f172f6f
CW
1691err_perf:
1692 i915_perf_fini(dev_priv);
0673ad47
CW
1693 return ret;
1694}
1695
1696/**
78dae1ac 1697 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
0673ad47
CW
1698 * @dev_priv: device private
1699 */
78dae1ac 1700static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
0673ad47 1701{
52a05c30 1702 struct pci_dev *pdev = dev_priv->drm.pdev;
0673ad47 1703
9f9b2792
LL
1704 i915_perf_fini(dev_priv);
1705
52a05c30
DW
1706 if (pdev->msi_enabled)
1707 pci_disable_msi(pdev);
0673ad47
CW
1708
1709 pm_qos_remove_request(&dev_priv->pm_qos);
0673ad47
CW
1710}
1711
1712/**
1713 * i915_driver_register - register the driver with the rest of the system
1714 * @dev_priv: device private
1715 *
1716 * Perform any steps necessary to make the driver available via kernel
1717 * internal or userspace interfaces.
1718 */
1719static void i915_driver_register(struct drm_i915_private *dev_priv)
1720{
91c8a326 1721 struct drm_device *dev = &dev_priv->drm;
0673ad47 1722
c29579d2 1723 i915_gem_driver_register(dev_priv);
b46a33e2 1724 i915_pmu_register(dev_priv);
0673ad47
CW
1725
1726 /*
1727 * Notify a valid surface after modesetting,
1728 * when running inside a VM.
1729 */
1730 if (intel_vgpu_active(dev_priv))
1731 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1732
1733 /* Reveal our presence to userspace */
1734 if (drm_dev_register(dev, 0) == 0) {
1735 i915_debugfs_register(dev_priv);
694c2828 1736 i915_setup_sysfs(dev_priv);
442b8c06
RB
1737
1738 /* Depends on sysfs having been initialized */
1739 i915_perf_register(dev_priv);
0673ad47
CW
1740 } else
1741 DRM_ERROR("Failed to register driver for userspace access!\n");
1742
e1bf094b 1743 if (HAS_DISPLAY(dev_priv)) {
0673ad47
CW
1744 /* Must be done after probing outputs */
1745 intel_opregion_register(dev_priv);
1746 acpi_video_register();
1747 }
1748
cf819eff 1749 if (IS_GEN(dev_priv, 5))
0673ad47
CW
1750 intel_gpu_ips_init(dev_priv);
1751
eef57324 1752 intel_audio_init(dev_priv);
0673ad47
CW
1753
1754 /*
1755 * Some ports require correctly set-up hpd registers for detection to
1756 * work properly (leading to ghost connected connector status), e.g. VGA
1757 * on gm45. Hence we can only set up the initial fbdev config after hpd
1758 * irqs are fully enabled. We do it last so that the async config
1759 * cannot run before the connectors are registered.
1760 */
1761 intel_fbdev_initial_config_async(dev);
448aa911
CW
1762
1763 /*
1764 * We need to coordinate the hotplugs with the asynchronous fbdev
1765 * configuration, for which we use the fbdev->async_cookie.
1766 */
e1bf094b 1767 if (HAS_DISPLAY(dev_priv))
448aa911 1768 drm_kms_helper_poll_init(dev);
07d80572 1769
2cd9a689 1770 intel_power_domains_enable(dev_priv);
69c66355 1771 intel_runtime_pm_enable(&dev_priv->runtime_pm);
0673ad47
CW
1772}
1773
1774/**
1775 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1776 * @dev_priv: device private
1777 */
1778static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1779{
69c66355 1780 intel_runtime_pm_disable(&dev_priv->runtime_pm);
2cd9a689 1781 intel_power_domains_disable(dev_priv);
07d80572 1782
4f256d82 1783 intel_fbdev_unregister(dev_priv);
eef57324 1784 intel_audio_deinit(dev_priv);
0673ad47 1785
448aa911
CW
1786 /*
1787 * After flushing the fbdev (incl. a late async config which will
1788 * have delayed queuing of a hotplug event), then flush the hotplug
1789 * events.
1790 */
1791 drm_kms_helper_poll_fini(&dev_priv->drm);
1792
0673ad47
CW
1793 intel_gpu_ips_teardown();
1794 acpi_video_unregister();
1795 intel_opregion_unregister(dev_priv);
1796
442b8c06 1797 i915_perf_unregister(dev_priv);
b46a33e2 1798 i915_pmu_unregister(dev_priv);
442b8c06 1799
694c2828 1800 i915_teardown_sysfs(dev_priv);
d69990e0 1801 drm_dev_unplug(&dev_priv->drm);
0673ad47 1802
c29579d2 1803 i915_gem_driver_unregister(dev_priv);
0673ad47
CW
1804}
1805
27d558a1
MW
1806static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1807{
1808 if (drm_debug & DRM_UT_DRIVER) {
1809 struct drm_printer p = drm_debug_printer("i915 device info:");
1810
805446c8 1811 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
1787a984
JN
1812 INTEL_DEVID(dev_priv),
1813 INTEL_REVID(dev_priv),
1814 intel_platform_name(INTEL_INFO(dev_priv)->platform),
805446c8
TU
1815 intel_subplatform(RUNTIME_INFO(dev_priv),
1816 INTEL_INFO(dev_priv)->platform),
1787a984
JN
1817 INTEL_GEN(dev_priv));
1818
1819 intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
0258404f 1820 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
27d558a1
MW
1821 }
1822
1823 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1824 DRM_INFO("DRM_I915_DEBUG enabled\n");
1825 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1826 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
6dfc4a8f
ID
1827 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1828 DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
27d558a1
MW
1829}
1830
55ac5a16
CW
1831static struct drm_i915_private *
1832i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1833{
1834 const struct intel_device_info *match_info =
1835 (struct intel_device_info *)ent->driver_data;
1836 struct intel_device_info *device_info;
1837 struct drm_i915_private *i915;
2ddcc982 1838 int err;
55ac5a16
CW
1839
1840 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1841 if (!i915)
2ddcc982 1842 return ERR_PTR(-ENOMEM);
55ac5a16 1843
2ddcc982
AS
1844 err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1845 if (err) {
55ac5a16 1846 kfree(i915);
2ddcc982 1847 return ERR_PTR(err);
55ac5a16
CW
1848 }
1849
55ac5a16 1850 i915->drm.dev_private = i915;
361f9dc2
CW
1851
1852 i915->drm.pdev = pdev;
1853 pci_set_drvdata(pdev, i915);
55ac5a16
CW
1854
1855 /* Setup the write-once "constant" device info */
1856 device_info = mkwrite_device_info(i915);
1857 memcpy(device_info, match_info, sizeof(*device_info));
0258404f 1858 RUNTIME_INFO(i915)->device_id = pdev->device;
55ac5a16 1859
74f6e183 1860 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
55ac5a16
CW
1861
1862 return i915;
1863}
1864
31962ca6
CW
1865static void i915_driver_destroy(struct drm_i915_private *i915)
1866{
1867 struct pci_dev *pdev = i915->drm.pdev;
1868
1869 drm_dev_fini(&i915->drm);
1870 kfree(i915);
1871
1872 /* And make sure we never chase our dangling pointer from pci_dev */
1873 pci_set_drvdata(pdev, NULL);
1874}
1875
0673ad47 1876/**
b01558e5 1877 * i915_driver_probe - setup chip and create an initial config
d2ad3ae4
JL
1878 * @pdev: PCI device
1879 * @ent: matching PCI ID entry
0673ad47 1880 *
b01558e5 1881 * The driver probe routine has to do several things:
0673ad47
CW
1882 * - drive output discovery via intel_modeset_init()
1883 * - initialize the memory manager
1884 * - allocate initial config memory
1885 * - setup the DRM framebuffer with the allocated memory
1886 */
b01558e5 1887int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
0673ad47 1888{
8d2b47dd
ML
1889 const struct intel_device_info *match_info =
1890 (struct intel_device_info *)ent->driver_data;
0673ad47
CW
1891 struct drm_i915_private *dev_priv;
1892 int ret;
7d87a7f7 1893
55ac5a16 1894 dev_priv = i915_driver_create(pdev, ent);
2ddcc982
AS
1895 if (IS_ERR(dev_priv))
1896 return PTR_ERR(dev_priv);
719388e1 1897
1feb64c4
VS
1898 /* Disable nuclear pageflip by default on pre-ILK */
1899 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1900 dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
1901
0673ad47
CW
1902 ret = pci_enable_device(pdev);
1903 if (ret)
cad3688f 1904 goto out_fini;
1347f5b4 1905
0b61b8b0 1906 ret = i915_driver_early_probe(dev_priv);
0673ad47
CW
1907 if (ret < 0)
1908 goto out_pci_disable;
ef11bdb3 1909
9102650f 1910 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1da177e4 1911
9e138ea1
DCS
1912 i915_detect_vgpu(dev_priv);
1913
0b61b8b0 1914 ret = i915_driver_mmio_probe(dev_priv);
0673ad47
CW
1915 if (ret < 0)
1916 goto out_runtime_pm_put;
79e53945 1917
0b61b8b0 1918 ret = i915_driver_hw_probe(dev_priv);
0673ad47
CW
1919 if (ret < 0)
1920 goto out_cleanup_mmio;
30c964a6 1921
0b61b8b0 1922 ret = i915_driver_modeset_probe(&dev_priv->drm);
0673ad47 1923 if (ret < 0)
baf54385 1924 goto out_cleanup_hw;
0673ad47
CW
1925
1926 i915_driver_register(dev_priv);
1927
9102650f 1928 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
0673ad47 1929
27d558a1
MW
1930 i915_welcome_messages(dev_priv);
1931
0673ad47
CW
1932 return 0;
1933
0673ad47 1934out_cleanup_hw:
78dae1ac 1935 i915_driver_hw_remove(dev_priv);
3b58a945 1936 i915_ggtt_driver_release(dev_priv);
19e0a8d4
DCS
1937
1938 /* Paranoia: make sure we have disabled everything before we exit. */
1939 intel_sanitize_gt_powersave(dev_priv);
0673ad47 1940out_cleanup_mmio:
3b58a945 1941 i915_driver_mmio_release(dev_priv);
0673ad47 1942out_runtime_pm_put:
9102650f 1943 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3b58a945 1944 i915_driver_late_release(dev_priv);
0673ad47
CW
1945out_pci_disable:
1946 pci_disable_device(pdev);
cad3688f 1947out_fini:
f2db53f1 1948 i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
31962ca6 1949 i915_driver_destroy(dev_priv);
30c964a6
RB
1950 return ret;
1951}
1952
361f9dc2 1953void i915_driver_remove(struct drm_i915_private *i915)
3bad0781 1954{
361f9dc2 1955 struct pci_dev *pdev = i915->drm.pdev;
3bad0781 1956
361f9dc2 1957 disable_rpm_wakeref_asserts(&i915->runtime_pm);
07d80572 1958
361f9dc2 1959 i915_driver_unregister(i915);
99c539be 1960
141f3767
JK
1961 /*
1962 * After unregistering the device to prevent any new users, cancel
1963 * all in-flight requests so that we can quickly unbind the active
1964 * resources.
1965 */
361f9dc2 1966 intel_gt_set_wedged(&i915->gt);
141f3767 1967
4a8ab5ea
CW
1968 /* Flush any external code that still may be under the RCU lock */
1969 synchronize_rcu();
1970
361f9dc2 1971 i915_gem_suspend(i915);
ce1bb329 1972
361f9dc2 1973 drm_atomic_helper_shutdown(&i915->drm);
a667fb40 1974
361f9dc2 1975 intel_gvt_driver_remove(i915);
26f837e8 1976
361f9dc2 1977 intel_modeset_driver_remove(&i915->drm);
0673ad47 1978
361f9dc2 1979 intel_bios_driver_remove(i915);
3bad0781 1980
52a05c30
DW
1981 vga_switcheroo_unregister_client(pdev);
1982 vga_client_register(pdev, NULL, NULL, NULL);
bcdb72ac 1983
361f9dc2 1984 intel_csr_ucode_fini(i915);
bcdb72ac 1985
0673ad47 1986 /* Free error state after interrupts are fully disabled. */
361f9dc2
CW
1987 cancel_delayed_work_sync(&i915->gt.hangcheck.work);
1988 i915_reset_error_state(i915);
0673ad47 1989
361f9dc2 1990 i915_gem_driver_remove(i915);
0673ad47 1991
361f9dc2 1992 intel_power_domains_driver_remove(i915);
0673ad47 1993
361f9dc2 1994 i915_driver_hw_remove(i915);
0673ad47 1995
361f9dc2 1996 enable_rpm_wakeref_asserts(&i915->runtime_pm);
cad3688f
CW
1997}
1998
1999static void i915_driver_release(struct drm_device *dev)
2000{
2001 struct drm_i915_private *dev_priv = to_i915(dev);
69c66355 2002 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0673ad47 2003
69c66355 2004 disable_rpm_wakeref_asserts(rpm);
47bc28d7 2005
3b58a945 2006 i915_gem_driver_release(dev_priv);
47bc28d7 2007
3b58a945 2008 i915_ggtt_driver_release(dev_priv);
19e0a8d4
DCS
2009
2010 /* Paranoia: make sure we have disabled everything before we exit. */
2011 intel_sanitize_gt_powersave(dev_priv);
2012
3b58a945 2013 i915_driver_mmio_release(dev_priv);
47bc28d7 2014
69c66355 2015 enable_rpm_wakeref_asserts(rpm);
3b58a945 2016 intel_runtime_pm_driver_release(rpm);
47bc28d7 2017
3b58a945 2018 i915_driver_late_release(dev_priv);
31962ca6 2019 i915_driver_destroy(dev_priv);
3bad0781
ZW
2020}
2021
0673ad47 2022static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2911a35b 2023{
829a0af2 2024 struct drm_i915_private *i915 = to_i915(dev);
0673ad47 2025 int ret;
2911a35b 2026
829a0af2 2027 ret = i915_gem_open(i915, file);
0673ad47
CW
2028 if (ret)
2029 return ret;
2911a35b 2030
0673ad47
CW
2031 return 0;
2032}
71386ef9 2033
0673ad47
CW
2034/**
2035 * i915_driver_lastclose - clean up after all DRM clients have exited
2036 * @dev: DRM device
2037 *
2038 * Take care of cleaning up after all DRM clients have exited. In the
2039 * mode setting case, we want to restore the kernel's initial mode (just
2040 * in case the last client left us in a bad state).
2041 *
2042 * Additionally, in the non-mode setting case, we'll tear down the GTT
2043 * and DMA structures, since the kernel won't be using them, and clea
2044 * up any GEM state.
2045 */
2046static void i915_driver_lastclose(struct drm_device *dev)
2047{
2048 intel_fbdev_restore_mode(dev);
2049 vga_switcheroo_process_delayed_switch();
2050}
2911a35b 2051
7d2ec881 2052static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
0673ad47 2053{
7d2ec881
DV
2054 struct drm_i915_file_private *file_priv = file->driver_priv;
2055
0673ad47 2056 mutex_lock(&dev->struct_mutex);
829a0af2 2057 i915_gem_context_close(file);
0673ad47
CW
2058 i915_gem_release(dev, file);
2059 mutex_unlock(&dev->struct_mutex);
0673ad47
CW
2060
2061 kfree(file_priv);
515b8b7e
CW
2062
2063 /* Catch up with all the deferred frees from "this" client */
2064 i915_gem_flush_free_objects(to_i915(dev));
2911a35b
BW
2065}
2066
07f9cd0b
ID
2067static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
2068{
91c8a326 2069 struct drm_device *dev = &dev_priv->drm;
19c8054c 2070 struct intel_encoder *encoder;
07f9cd0b
ID
2071
2072 drm_modeset_lock_all(dev);
19c8054c
JN
2073 for_each_intel_encoder(dev, encoder)
2074 if (encoder->suspend)
2075 encoder->suspend(encoder);
07f9cd0b
ID
2076 drm_modeset_unlock_all(dev);
2077}
2078
1a5df187
PZ
2079static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2080 bool rpm_resume);
507e126e 2081static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
f75a1985 2082
bc87229f
ID
2083static bool suspend_to_idle(struct drm_i915_private *dev_priv)
2084{
2085#if IS_ENABLED(CONFIG_ACPI_SLEEP)
2086 if (acpi_target_system_state() < ACPI_STATE_S3)
2087 return true;
2088#endif
2089 return false;
2090}
ebc32824 2091
73b66f87
CW
2092static int i915_drm_prepare(struct drm_device *dev)
2093{
2094 struct drm_i915_private *i915 = to_i915(dev);
73b66f87
CW
2095
2096 /*
2097 * NB intel_display_suspend() may issue new requests after we've
2098 * ostensibly marked the GPU as ready-to-sleep here. We need to
2099 * split out that work and pull it forward so that after point,
2100 * the GPU is not woken again.
2101 */
5861b013 2102 i915_gem_suspend(i915);
73b66f87 2103
5861b013 2104 return 0;
73b66f87
CW
2105}
2106
5e365c39 2107static int i915_drm_suspend(struct drm_device *dev)
ba8bbcf6 2108{
fac5e23e 2109 struct drm_i915_private *dev_priv = to_i915(dev);
52a05c30 2110 struct pci_dev *pdev = dev_priv->drm.pdev;
e5747e3a 2111 pci_power_t opregion_target_state;
61caf87c 2112
9102650f 2113 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 2114
c67a470b
PZ
2115 /* We do a lot of poking in a lot of registers, make sure they work
2116 * properly. */
2cd9a689 2117 intel_power_domains_disable(dev_priv);
cb10799c 2118
5bcf719b
DA
2119 drm_kms_helper_poll_disable(dev);
2120
52a05c30 2121 pci_save_state(pdev);
ba8bbcf6 2122
6b72d486 2123 intel_display_suspend(dev);
2eb5252e 2124
1a4313d1 2125 intel_dp_mst_suspend(dev_priv);
7d708ee4 2126
d5818938
DV
2127 intel_runtime_pm_disable_interrupts(dev_priv);
2128 intel_hpd_cancel_work(dev_priv);
09b64267 2129
d5818938 2130 intel_suspend_encoders(dev_priv);
0e32b39c 2131
712bf364 2132 intel_suspend_hw(dev_priv);
5669fcac 2133
275a991c 2134 i915_gem_suspend_gtt_mappings(dev_priv);
828c7908 2135
af6dc742 2136 i915_save_state(dev_priv);
9e06dd39 2137
bc87229f 2138 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
a950adc6 2139 intel_opregion_suspend(dev_priv, opregion_target_state);
8ee1c3db 2140
82e3b8c1 2141 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
3fa016a0 2142
62d5d69b
MK
2143 dev_priv->suspend_count++;
2144
f74ed08d 2145 intel_csr_ucode_suspend(dev_priv);
f514c2d8 2146
9102650f 2147 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 2148
73b66f87 2149 return 0;
84b79f8d
RW
2150}
2151
2cd9a689
ID
2152static enum i915_drm_suspend_mode
2153get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
2154{
2155 if (hibernate)
2156 return I915_DRM_SUSPEND_HIBERNATE;
2157
2158 if (suspend_to_idle(dev_priv))
2159 return I915_DRM_SUSPEND_IDLE;
2160
2161 return I915_DRM_SUSPEND_MEM;
2162}
2163
c49d13ee 2164static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
c3c09c95 2165{
c49d13ee 2166 struct drm_i915_private *dev_priv = to_i915(dev);
52a05c30 2167 struct pci_dev *pdev = dev_priv->drm.pdev;
69c66355 2168 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
c3c09c95
ID
2169 int ret;
2170
69c66355 2171 disable_rpm_wakeref_asserts(rpm);
1f814dac 2172
ec92ad00
CW
2173 i915_gem_suspend_late(dev_priv);
2174
f7de5027 2175 intel_uncore_suspend(&dev_priv->uncore);
4c494a57 2176
2cd9a689
ID
2177 intel_power_domains_suspend(dev_priv,
2178 get_suspend_mode(dev_priv, hibernation));
73dfc227 2179
507e126e 2180 ret = 0;
3b6ac43b 2181 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
507e126e 2182 bxt_enable_dc9(dev_priv);
b8aea3d1 2183 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
507e126e
ID
2184 hsw_enable_pc8(dev_priv);
2185 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2186 ret = vlv_suspend_complete(dev_priv);
c3c09c95
ID
2187
2188 if (ret) {
2189 DRM_ERROR("Suspend complete failed: %d\n", ret);
2cd9a689 2190 intel_power_domains_resume(dev_priv);
c3c09c95 2191
1f814dac 2192 goto out;
c3c09c95
ID
2193 }
2194
52a05c30 2195 pci_disable_device(pdev);
ab3be73f 2196 /*
54875571 2197 * During hibernation on some platforms the BIOS may try to access
ab3be73f
ID
2198 * the device even though it's already in D3 and hang the machine. So
2199 * leave the device in D0 on those platforms and hope the BIOS will
54875571
ID
2200 * power down the device properly. The issue was seen on multiple old
2201 * GENs with different BIOS vendors, so having an explicit blacklist
2202 * is inpractical; apply the workaround on everything pre GEN6. The
2203 * platforms where the issue was seen:
2204 * Lenovo Thinkpad X301, X61s, X60, T60, X41
2205 * Fujitsu FSC S7110
2206 * Acer Aspire 1830T
ab3be73f 2207 */
514e1d64 2208 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
52a05c30 2209 pci_set_power_state(pdev, PCI_D3hot);
c3c09c95 2210
1f814dac 2211out:
69c66355 2212 enable_rpm_wakeref_asserts(rpm);
bd780f37 2213 if (!dev_priv->uncore.user_forcewake.count)
3b58a945 2214 intel_runtime_pm_driver_release(rpm);
1f814dac
ID
2215
2216 return ret;
c3c09c95
ID
2217}
2218
361f9dc2
CW
2219static int
2220i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
84b79f8d
RW
2221{
2222 int error;
2223
0b14cbd2
ID
2224 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
2225 state.event != PM_EVENT_FREEZE))
2226 return -EINVAL;
5bcf719b 2227
361f9dc2 2228 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
5bcf719b 2229 return 0;
6eecba33 2230
361f9dc2 2231 error = i915_drm_suspend(&i915->drm);
84b79f8d
RW
2232 if (error)
2233 return error;
2234
361f9dc2 2235 return i915_drm_suspend_late(&i915->drm, false);
ba8bbcf6
JB
2236}
2237
5e365c39 2238static int i915_drm_resume(struct drm_device *dev)
76c4b250 2239{
fac5e23e 2240 struct drm_i915_private *dev_priv = to_i915(dev);
ac840ae5 2241 int ret;
9d49c0ef 2242
9102650f 2243 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
abc80abd 2244 intel_sanitize_gt_powersave(dev_priv);
1f814dac 2245
1288786b
CW
2246 i915_gem_sanitize(dev_priv);
2247
97d6d7ab 2248 ret = i915_ggtt_enable_hw(dev_priv);
ac840ae5
VS
2249 if (ret)
2250 DRM_ERROR("failed to re-enable GGTT\n");
2251
f74ed08d
ID
2252 intel_csr_ucode_resume(dev_priv);
2253
af6dc742 2254 i915_restore_state(dev_priv);
8090ba8c 2255 intel_pps_unlock_regs_wa(dev_priv);
61caf87c 2256
c39055b0 2257 intel_init_pch_refclk(dev_priv);
1833b134 2258
364aece0
PA
2259 /*
2260 * Interrupts have to be enabled before any batches are run. If not the
2261 * GPU will hang. i915_gem_init_hw() will initiate batches to
2262 * update/restore the context.
2263 *
908764f6
ID
2264 * drm_mode_config_reset() needs AUX interrupts.
2265 *
364aece0
PA
2266 * Modeset enabling in intel_modeset_init_hw() also needs working
2267 * interrupts.
2268 */
2269 intel_runtime_pm_enable_interrupts(dev_priv);
2270
908764f6
ID
2271 drm_mode_config_reset(dev);
2272
37cd3300 2273 i915_gem_resume(dev_priv);
226485e9 2274
d5818938 2275 intel_modeset_init_hw(dev);
675f7ff3 2276 intel_init_clock_gating(dev_priv);
24576d23 2277
d5818938
DV
2278 spin_lock_irq(&dev_priv->irq_lock);
2279 if (dev_priv->display.hpd_irq_setup)
91d14251 2280 dev_priv->display.hpd_irq_setup(dev_priv);
d5818938 2281 spin_unlock_irq(&dev_priv->irq_lock);
0e32b39c 2282
1a4313d1 2283 intel_dp_mst_resume(dev_priv);
e7d6f7d7 2284
a16b7658
L
2285 intel_display_resume(dev);
2286
e0b70061
L
2287 drm_kms_helper_poll_enable(dev);
2288
d5818938
DV
2289 /*
2290 * ... but also need to make sure that hotplug processing
2291 * doesn't cause havoc. Like in the driver load code we don't
c444ad79 2292 * bother with the tiny race here where we might lose hotplug
d5818938
DV
2293 * notifications.
2294 * */
2295 intel_hpd_init(dev_priv);
1daed3fb 2296
a950adc6 2297 intel_opregion_resume(dev_priv);
44834a67 2298
82e3b8c1 2299 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
073f34d9 2300
2cd9a689
ID
2301 intel_power_domains_enable(dev_priv);
2302
9102650f 2303 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 2304
074c6ada 2305 return 0;
84b79f8d
RW
2306}
2307
5e365c39 2308static int i915_drm_resume_early(struct drm_device *dev)
84b79f8d 2309{
fac5e23e 2310 struct drm_i915_private *dev_priv = to_i915(dev);
52a05c30 2311 struct pci_dev *pdev = dev_priv->drm.pdev;
44410cd0 2312 int ret;
36d61e67 2313
76c4b250
ID
2314 /*
2315 * We have a resume ordering issue with the snd-hda driver also
2316 * requiring our device to be power up. Due to the lack of a
2317 * parent/child relationship we currently solve this with an early
2318 * resume hook.
2319 *
2320 * FIXME: This should be solved with a special hdmi sink device or
2321 * similar so that power domains can be employed.
2322 */
44410cd0
ID
2323
2324 /*
2325 * Note that we need to set the power state explicitly, since we
2326 * powered off the device during freeze and the PCI core won't power
2327 * it back up for us during thaw. Powering off the device during
2328 * freeze is not a hard requirement though, and during the
2329 * suspend/resume phases the PCI core makes sure we get here with the
2330 * device powered on. So in case we change our freeze logic and keep
2331 * the device powered we can also remove the following set power state
2332 * call.
2333 */
52a05c30 2334 ret = pci_set_power_state(pdev, PCI_D0);
44410cd0
ID
2335 if (ret) {
2336 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2cd9a689 2337 return ret;
44410cd0
ID
2338 }
2339
2340 /*
2341 * Note that pci_enable_device() first enables any parent bridge
2342 * device and only then sets the power state for this device. The
2343 * bridge enabling is a nop though, since bridge devices are resumed
2344 * first. The order of enabling power and enabling the device is
2345 * imposed by the PCI core as described above, so here we preserve the
2346 * same order for the freeze/thaw phases.
2347 *
2348 * TODO: eventually we should remove pci_disable_device() /
2349 * pci_enable_enable_device() from suspend/resume. Due to how they
2350 * depend on the device enable refcount we can't anyway depend on them
2351 * disabling/enabling the device.
2352 */
2cd9a689
ID
2353 if (pci_enable_device(pdev))
2354 return -EIO;
84b79f8d 2355
52a05c30 2356 pci_set_master(pdev);
84b79f8d 2357
9102650f 2358 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 2359
666a4537 2360 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1a5df187 2361 ret = vlv_resume_prepare(dev_priv, false);
36d61e67 2362 if (ret)
ff0b187f
DL
2363 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
2364 ret);
36d61e67 2365
f7de5027
DCS
2366 intel_uncore_resume_early(&dev_priv->uncore);
2367
eaf522f6 2368 intel_gt_check_and_clear_faults(&dev_priv->gt);
efee833a 2369
3e68928b 2370 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
0f90603c 2371 gen9_sanitize_dc_state(dev_priv);
507e126e 2372 bxt_disable_dc9(dev_priv);
da2f41d1 2373 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
a9a6b73a 2374 hsw_disable_pc8(dev_priv);
da2f41d1 2375 }
efee833a 2376
19e0a8d4 2377 intel_sanitize_gt_powersave(dev_priv);
bc87229f 2378
2cd9a689 2379 intel_power_domains_resume(dev_priv);
bc87229f 2380
0c91621c 2381 intel_gt_sanitize(&dev_priv->gt, true);
4fdd5b4e 2382
9102650f 2383 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
6e35e8ab 2384
36d61e67 2385 return ret;
76c4b250
ID
2386}
2387
361f9dc2 2388static int i915_resume_switcheroo(struct drm_i915_private *i915)
76c4b250 2389{
50a0072f 2390 int ret;
76c4b250 2391
361f9dc2 2392 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
097dd837
ID
2393 return 0;
2394
361f9dc2 2395 ret = i915_drm_resume_early(&i915->drm);
50a0072f
ID
2396 if (ret)
2397 return ret;
2398
361f9dc2 2399 return i915_drm_resume(&i915->drm);
5a17514e
ID
2400}
2401
73b66f87
CW
2402static int i915_pm_prepare(struct device *kdev)
2403{
361f9dc2 2404 struct drm_i915_private *i915 = kdev_to_i915(kdev);
73b66f87 2405
361f9dc2 2406 if (!i915) {
73b66f87
CW
2407 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2408 return -ENODEV;
2409 }
2410
361f9dc2 2411 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
73b66f87
CW
2412 return 0;
2413
361f9dc2 2414 return i915_drm_prepare(&i915->drm);
73b66f87
CW
2415}
2416
c49d13ee 2417static int i915_pm_suspend(struct device *kdev)
112b715e 2418{
361f9dc2 2419 struct drm_i915_private *i915 = kdev_to_i915(kdev);
112b715e 2420
361f9dc2 2421 if (!i915) {
c49d13ee 2422 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
84b79f8d
RW
2423 return -ENODEV;
2424 }
112b715e 2425
361f9dc2 2426 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
5bcf719b
DA
2427 return 0;
2428
361f9dc2 2429 return i915_drm_suspend(&i915->drm);
76c4b250
ID
2430}
2431
c49d13ee 2432static int i915_pm_suspend_late(struct device *kdev)
76c4b250 2433{
361f9dc2 2434 struct drm_i915_private *i915 = kdev_to_i915(kdev);
76c4b250
ID
2435
2436 /*
c965d995 2437 * We have a suspend ordering issue with the snd-hda driver also
76c4b250
ID
2438 * requiring our device to be power up. Due to the lack of a
2439 * parent/child relationship we currently solve this with an late
2440 * suspend hook.
2441 *
2442 * FIXME: This should be solved with a special hdmi sink device or
2443 * similar so that power domains can be employed.
2444 */
361f9dc2 2445 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
76c4b250 2446 return 0;
112b715e 2447
361f9dc2 2448 return i915_drm_suspend_late(&i915->drm, false);
ab3be73f
ID
2449}
2450
c49d13ee 2451static int i915_pm_poweroff_late(struct device *kdev)
ab3be73f 2452{
361f9dc2 2453 struct drm_i915_private *i915 = kdev_to_i915(kdev);
ab3be73f 2454
361f9dc2 2455 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
ab3be73f
ID
2456 return 0;
2457
361f9dc2 2458 return i915_drm_suspend_late(&i915->drm, true);
cbda12d7
ZW
2459}
2460
c49d13ee 2461static int i915_pm_resume_early(struct device *kdev)
76c4b250 2462{
361f9dc2 2463 struct drm_i915_private *i915 = kdev_to_i915(kdev);
76c4b250 2464
361f9dc2 2465 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
097dd837
ID
2466 return 0;
2467
361f9dc2 2468 return i915_drm_resume_early(&i915->drm);
76c4b250
ID
2469}
2470
c49d13ee 2471static int i915_pm_resume(struct device *kdev)
cbda12d7 2472{
361f9dc2 2473 struct drm_i915_private *i915 = kdev_to_i915(kdev);
84b79f8d 2474
361f9dc2 2475 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
097dd837
ID
2476 return 0;
2477
361f9dc2 2478 return i915_drm_resume(&i915->drm);
cbda12d7
ZW
2479}
2480
1f19ac2a 2481/* freeze: before creating the hibernation_image */
c49d13ee 2482static int i915_pm_freeze(struct device *kdev)
1f19ac2a 2483{
361f9dc2 2484 struct drm_i915_private *i915 = kdev_to_i915(kdev);
6a800eab
CW
2485 int ret;
2486
361f9dc2
CW
2487 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
2488 ret = i915_drm_suspend(&i915->drm);
dd9f31c7
ID
2489 if (ret)
2490 return ret;
2491 }
6a800eab 2492
361f9dc2 2493 ret = i915_gem_freeze(i915);
6a800eab
CW
2494 if (ret)
2495 return ret;
2496
2497 return 0;
1f19ac2a
CW
2498}
2499
c49d13ee 2500static int i915_pm_freeze_late(struct device *kdev)
1f19ac2a 2501{
361f9dc2 2502 struct drm_i915_private *i915 = kdev_to_i915(kdev);
461fb99c
CW
2503 int ret;
2504
361f9dc2
CW
2505 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
2506 ret = i915_drm_suspend_late(&i915->drm, true);
dd9f31c7
ID
2507 if (ret)
2508 return ret;
2509 }
461fb99c 2510
361f9dc2 2511 ret = i915_gem_freeze_late(i915);
461fb99c
CW
2512 if (ret)
2513 return ret;
2514
2515 return 0;
1f19ac2a
CW
2516}
2517
2518/* thaw: called after creating the hibernation image, but before turning off. */
c49d13ee 2519static int i915_pm_thaw_early(struct device *kdev)
1f19ac2a 2520{
c49d13ee 2521 return i915_pm_resume_early(kdev);
1f19ac2a
CW
2522}
2523
c49d13ee 2524static int i915_pm_thaw(struct device *kdev)
1f19ac2a 2525{
c49d13ee 2526 return i915_pm_resume(kdev);
1f19ac2a
CW
2527}
2528
2529/* restore: called after loading the hibernation image. */
c49d13ee 2530static int i915_pm_restore_early(struct device *kdev)
1f19ac2a 2531{
c49d13ee 2532 return i915_pm_resume_early(kdev);
1f19ac2a
CW
2533}
2534
c49d13ee 2535static int i915_pm_restore(struct device *kdev)
1f19ac2a 2536{
c49d13ee 2537 return i915_pm_resume(kdev);
1f19ac2a
CW
2538}
2539
ddeea5b0
ID
2540/*
2541 * Save all Gunit registers that may be lost after a D3 and a subsequent
2542 * S0i[R123] transition. The list of registers needing a save/restore is
2543 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2544 * registers in the following way:
2545 * - Driver: saved/restored by the driver
2546 * - Punit : saved/restored by the Punit firmware
2547 * - No, w/o marking: no need to save/restore, since the register is R/O or
2548 * used internally by the HW in a way that doesn't depend
2549 * keeping the content across a suspend/resume.
2550 * - Debug : used for debugging
2551 *
2552 * We save/restore all registers marked with 'Driver', with the following
2553 * exceptions:
2554 * - Registers out of use, including also registers marked with 'Debug'.
2555 * These have no effect on the driver's operation, so we don't save/restore
2556 * them to reduce the overhead.
2557 * - Registers that are fully setup by an initialization function called from
2558 * the resume path. For example many clock gating and RPS/RC6 registers.
2559 * - Registers that provide the right functionality with their reset defaults.
2560 *
2561 * TODO: Except for registers that based on the above 3 criteria can be safely
2562 * ignored, we save/restore all others, practically treating the HW context as
2563 * a black-box for the driver. Further investigation is needed to reduce the
2564 * saved/restored registers even further, by following the same 3 criteria.
2565 */
2566static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2567{
2568 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2569 int i;
2570
2571 /* GAM 0x4000-0x4770 */
2572 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2573 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2574 s->arb_mode = I915_READ(ARB_MODE);
2575 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2576 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2577
2578 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
22dfe79f 2579 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
ddeea5b0
ID
2580
2581 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
b5f1c97f 2582 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
ddeea5b0
ID
2583
2584 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2585 s->ecochk = I915_READ(GAM_ECOCHK);
2586 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2587 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2588
2589 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2590
2591 /* MBC 0x9024-0x91D0, 0x8500 */
2592 s->g3dctl = I915_READ(VLV_G3DCTL);
2593 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2594 s->mbctl = I915_READ(GEN6_MBCTL);
2595
2596 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2597 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2598 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2599 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2600 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2601 s->rstctl = I915_READ(GEN6_RSTCTL);
2602 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2603
2604 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2605 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2606 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2607 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2608 s->ecobus = I915_READ(ECOBUS);
2609 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2610 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2611 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2612 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2613 s->rcedata = I915_READ(VLV_RCEDATA);
2614 s->spare2gh = I915_READ(VLV_SPAREG2H);
2615
2616 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2617 s->gt_imr = I915_READ(GTIMR);
2618 s->gt_ier = I915_READ(GTIER);
2619 s->pm_imr = I915_READ(GEN6_PMIMR);
2620 s->pm_ier = I915_READ(GEN6_PMIER);
2621
2622 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
22dfe79f 2623 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
ddeea5b0
ID
2624
2625 /* GT SA CZ domain, 0x100000-0x138124 */
2626 s->tilectl = I915_READ(TILECTL);
2627 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2628 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2629 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2630 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2631
2632 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2633 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2634 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
9c25210f 2635 s->pcbr = I915_READ(VLV_PCBR);
ddeea5b0
ID
2636 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2637
2638 /*
2639 * Not saving any of:
2640 * DFT, 0x9800-0x9EC0
2641 * SARB, 0xB000-0xB1FC
2642 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2643 * PCI CFG
2644 */
2645}
2646
2647static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2648{
2649 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2650 u32 val;
2651 int i;
2652
2653 /* GAM 0x4000-0x4770 */
2654 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2655 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2656 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2657 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2658 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2659
2660 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
22dfe79f 2661 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
ddeea5b0
ID
2662
2663 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
b5f1c97f 2664 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
ddeea5b0
ID
2665
2666 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2667 I915_WRITE(GAM_ECOCHK, s->ecochk);
2668 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2669 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2670
2671 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2672
2673 /* MBC 0x9024-0x91D0, 0x8500 */
2674 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2675 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2676 I915_WRITE(GEN6_MBCTL, s->mbctl);
2677
2678 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2679 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2680 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2681 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2682 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2683 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2684 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2685
2686 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2687 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2688 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2689 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2690 I915_WRITE(ECOBUS, s->ecobus);
2691 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2692 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2693 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2694 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2695 I915_WRITE(VLV_RCEDATA, s->rcedata);
2696 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2697
2698 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2699 I915_WRITE(GTIMR, s->gt_imr);
2700 I915_WRITE(GTIER, s->gt_ier);
2701 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2702 I915_WRITE(GEN6_PMIER, s->pm_ier);
2703
2704 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
22dfe79f 2705 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
ddeea5b0
ID
2706
2707 /* GT SA CZ domain, 0x100000-0x138124 */
2708 I915_WRITE(TILECTL, s->tilectl);
2709 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2710 /*
2711 * Preserve the GT allow wake and GFX force clock bit, they are not
2712 * be restored, as they are used to control the s0ix suspend/resume
2713 * sequence by the caller.
2714 */
2715 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2716 val &= VLV_GTLC_ALLOWWAKEREQ;
2717 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2718 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2719
2720 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2721 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2722 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2723 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2724
2725 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2726
2727 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2728 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2729 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
9c25210f 2730 I915_WRITE(VLV_PCBR, s->pcbr);
ddeea5b0
ID
2731 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2732}
2733
5a31d30b 2734static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
3dd14c04
CW
2735 u32 mask, u32 val)
2736{
39806c3f
VS
2737 i915_reg_t reg = VLV_GTLC_PW_STATUS;
2738 u32 reg_value;
2739 int ret;
2740
3dd14c04
CW
2741 /* The HW does not like us polling for PW_STATUS frequently, so
2742 * use the sleeping loop rather than risk the busy spin within
2743 * intel_wait_for_register().
2744 *
2745 * Transitioning between RC6 states should be at most 2ms (see
2746 * valleyview_enable_rps) so use a 3ms timeout.
2747 */
5a31d30b
TU
2748 ret = wait_for(((reg_value =
2749 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
2750 == val, 3);
39806c3f
VS
2751
2752 /* just trace the final value */
2753 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2754
2755 return ret;
3dd14c04
CW
2756}
2757
650ad970
ID
2758int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2759{
2760 u32 val;
2761 int err;
2762
650ad970
ID
2763 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2764 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2765 if (force_on)
2766 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2767 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2768
2769 if (!force_on)
2770 return 0;
2771
97a04e0d 2772 err = intel_wait_for_register(&dev_priv->uncore,
c6ddc5f3
CW
2773 VLV_GTLC_SURVIVABILITY_REG,
2774 VLV_GFX_CLK_STATUS_BIT,
2775 VLV_GFX_CLK_STATUS_BIT,
2776 20);
650ad970
ID
2777 if (err)
2778 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2779 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2780
2781 return err;
650ad970
ID
2782}
2783
ddeea5b0
ID
2784static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2785{
3dd14c04 2786 u32 mask;
ddeea5b0 2787 u32 val;
3dd14c04 2788 int err;
ddeea5b0
ID
2789
2790 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2791 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2792 if (allow)
2793 val |= VLV_GTLC_ALLOWWAKEREQ;
2794 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2795 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2796
3dd14c04
CW
2797 mask = VLV_GTLC_ALLOWWAKEACK;
2798 val = allow ? mask : 0;
2799
2800 err = vlv_wait_for_pw_status(dev_priv, mask, val);
ddeea5b0
ID
2801 if (err)
2802 DRM_ERROR("timeout disabling GT waking\n");
b2736695 2803
ddeea5b0 2804 return err;
ddeea5b0
ID
2805}
2806
3dd14c04
CW
2807static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2808 bool wait_for_on)
ddeea5b0
ID
2809{
2810 u32 mask;
2811 u32 val;
ddeea5b0
ID
2812
2813 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2814 val = wait_for_on ? mask : 0;
ddeea5b0
ID
2815
2816 /*
2817 * RC6 transitioning can be delayed up to 2 msec (see
2818 * valleyview_enable_rps), use 3 msec for safety.
e01569ab
CW
2819 *
2820 * This can fail to turn off the rc6 if the GPU is stuck after a failed
2821 * reset and we are trying to force the machine to sleep.
ddeea5b0 2822 */
3dd14c04 2823 if (vlv_wait_for_pw_status(dev_priv, mask, val))
e01569ab
CW
2824 DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2825 onoff(wait_for_on));
ddeea5b0
ID
2826}
2827
2828static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2829{
2830 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2831 return;
2832
6fa283b0 2833 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
ddeea5b0
ID
2834 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2835}
2836
ebc32824 2837static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
ddeea5b0
ID
2838{
2839 u32 mask;
2840 int err;
2841
2842 /*
2843 * Bspec defines the following GT well on flags as debug only, so
2844 * don't treat them as hard failures.
2845 */
3dd14c04 2846 vlv_wait_for_gt_wells(dev_priv, false);
ddeea5b0
ID
2847
2848 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2849 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2850
2851 vlv_check_no_gt_access(dev_priv);
2852
2853 err = vlv_force_gfx_clock(dev_priv, true);
2854 if (err)
2855 goto err1;
2856
2857 err = vlv_allow_gt_wake(dev_priv, false);
2858 if (err)
2859 goto err2;
98711167 2860
2d1fe073 2861 if (!IS_CHERRYVIEW(dev_priv))
98711167 2862 vlv_save_gunit_s0ix_state(dev_priv);
ddeea5b0
ID
2863
2864 err = vlv_force_gfx_clock(dev_priv, false);
2865 if (err)
2866 goto err2;
2867
2868 return 0;
2869
2870err2:
2871 /* For safety always re-enable waking and disable gfx clock forcing */
2872 vlv_allow_gt_wake(dev_priv, true);
2873err1:
2874 vlv_force_gfx_clock(dev_priv, false);
2875
2876 return err;
2877}
2878
016970be
SK
2879static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2880 bool rpm_resume)
ddeea5b0 2881{
ddeea5b0
ID
2882 int err;
2883 int ret;
2884
2885 /*
2886 * If any of the steps fail just try to continue, that's the best we
2887 * can do at this point. Return the first error code (which will also
2888 * leave RPM permanently disabled).
2889 */
2890 ret = vlv_force_gfx_clock(dev_priv, true);
2891
2d1fe073 2892 if (!IS_CHERRYVIEW(dev_priv))
98711167 2893 vlv_restore_gunit_s0ix_state(dev_priv);
ddeea5b0
ID
2894
2895 err = vlv_allow_gt_wake(dev_priv, true);
2896 if (!ret)
2897 ret = err;
2898
2899 err = vlv_force_gfx_clock(dev_priv, false);
2900 if (!ret)
2901 ret = err;
2902
2903 vlv_check_no_gt_access(dev_priv);
2904
7c108fd8 2905 if (rpm_resume)
46f16e63 2906 intel_init_clock_gating(dev_priv);
ddeea5b0
ID
2907
2908 return ret;
2909}
2910
c49d13ee 2911static int intel_runtime_suspend(struct device *kdev)
8a187455 2912{
361f9dc2 2913 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1bf676cc 2914 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0ab9cfeb 2915 int ret;
8a187455 2916
fb6db0f5 2917 if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
c6df39b5
ID
2918 return -ENODEV;
2919
6772ffe0 2920 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
604effb7
ID
2921 return -ENODEV;
2922
8a187455
PZ
2923 DRM_DEBUG_KMS("Suspending device\n");
2924
9102650f 2925 disable_rpm_wakeref_asserts(rpm);
1f814dac 2926
d6102977
ID
2927 /*
2928 * We are safe here against re-faults, since the fault handler takes
2929 * an RPM reference.
2930 */
7c108fd8 2931 i915_gem_runtime_suspend(dev_priv);
d6102977 2932
9dfe3459 2933 intel_gt_runtime_suspend(&dev_priv->gt);
a1c41994 2934
2eb5252e 2935 intel_runtime_pm_disable_interrupts(dev_priv);
b5478bcd 2936
f7de5027 2937 intel_uncore_suspend(&dev_priv->uncore);
01c799c9 2938
507e126e 2939 ret = 0;
3e68928b
AM
2940 if (INTEL_GEN(dev_priv) >= 11) {
2941 icl_display_core_uninit(dev_priv);
2942 bxt_enable_dc9(dev_priv);
2943 } else if (IS_GEN9_LP(dev_priv)) {
507e126e
ID
2944 bxt_display_core_uninit(dev_priv);
2945 bxt_enable_dc9(dev_priv);
2946 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2947 hsw_enable_pc8(dev_priv);
2948 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2949 ret = vlv_suspend_complete(dev_priv);
2950 }
2951
0ab9cfeb
ID
2952 if (ret) {
2953 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
f7de5027 2954 intel_uncore_runtime_resume(&dev_priv->uncore);
01c799c9 2955
b963291c 2956 intel_runtime_pm_enable_interrupts(dev_priv);
0ab9cfeb 2957
9dfe3459 2958 intel_gt_runtime_resume(&dev_priv->gt);
1ed21cb4 2959
1ed21cb4
SAK
2960 i915_gem_restore_fences(dev_priv);
2961
9102650f 2962 enable_rpm_wakeref_asserts(rpm);
1f814dac 2963
0ab9cfeb
ID
2964 return ret;
2965 }
a8a8bd54 2966
9102650f 2967 enable_rpm_wakeref_asserts(rpm);
3b58a945 2968 intel_runtime_pm_driver_release(rpm);
55ec45c2 2969
2cf7bf6f 2970 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
55ec45c2
MK
2971 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2972
9102650f 2973 rpm->suspended = true;
1fb2362b
KCA
2974
2975 /*
c8a0bd42
PZ
2976 * FIXME: We really should find a document that references the arguments
2977 * used below!
1fb2362b 2978 */
6f9f4b7a 2979 if (IS_BROADWELL(dev_priv)) {
d37ae19a
PZ
2980 /*
2981 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2982 * being detected, and the call we do at intel_runtime_resume()
2983 * won't be able to restore them. Since PCI_D3hot matches the
2984 * actual specification and appears to be working, use it.
2985 */
6f9f4b7a 2986 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
d37ae19a 2987 } else {
c8a0bd42
PZ
2988 /*
2989 * current versions of firmware which depend on this opregion
2990 * notification have repurposed the D1 definition to mean
2991 * "runtime suspended" vs. what you would normally expect (D3)
2992 * to distinguish it from notifications that might be sent via
2993 * the suspend path.
2994 */
6f9f4b7a 2995 intel_opregion_notify_adapter(dev_priv, PCI_D1);
c8a0bd42 2996 }
8a187455 2997
f568eeee 2998 assert_forcewakes_inactive(&dev_priv->uncore);
dc9fb09c 2999
21d6e0bd 3000 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
19625e85
L
3001 intel_hpd_poll_init(dev_priv);
3002
a8a8bd54 3003 DRM_DEBUG_KMS("Device suspended\n");
8a187455
PZ
3004 return 0;
3005}
3006
c49d13ee 3007static int intel_runtime_resume(struct device *kdev)
8a187455 3008{
361f9dc2 3009 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1bf676cc 3010 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1a5df187 3011 int ret = 0;
8a187455 3012
6772ffe0 3013 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
604effb7 3014 return -ENODEV;
8a187455
PZ
3015
3016 DRM_DEBUG_KMS("Resuming device\n");
3017
9102650f
DCS
3018 WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
3019 disable_rpm_wakeref_asserts(rpm);
1f814dac 3020
6f9f4b7a 3021 intel_opregion_notify_adapter(dev_priv, PCI_D0);
9102650f 3022 rpm->suspended = false;
2cf7bf6f 3023 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
55ec45c2 3024 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
8a187455 3025
3e68928b
AM
3026 if (INTEL_GEN(dev_priv) >= 11) {
3027 bxt_disable_dc9(dev_priv);
3028 icl_display_core_init(dev_priv, true);
3029 if (dev_priv->csr.dmc_payload) {
3030 if (dev_priv->csr.allowed_dc_mask &
3031 DC_STATE_EN_UPTO_DC6)
3032 skl_enable_dc6(dev_priv);
3033 else if (dev_priv->csr.allowed_dc_mask &
3034 DC_STATE_EN_UPTO_DC5)
3035 gen9_enable_dc5(dev_priv);
3036 }
3037 } else if (IS_GEN9_LP(dev_priv)) {
507e126e
ID
3038 bxt_disable_dc9(dev_priv);
3039 bxt_display_core_init(dev_priv, true);
f62c79b3
ID
3040 if (dev_priv->csr.dmc_payload &&
3041 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
3042 gen9_enable_dc5(dev_priv);
507e126e 3043 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1a5df187 3044 hsw_disable_pc8(dev_priv);
507e126e 3045 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1a5df187 3046 ret = vlv_resume_prepare(dev_priv, true);
507e126e 3047 }
1a5df187 3048
f7de5027 3049 intel_uncore_runtime_resume(&dev_priv->uncore);
bedf4d79 3050
1ed21cb4
SAK
3051 intel_runtime_pm_enable_interrupts(dev_priv);
3052
0ab9cfeb
ID
3053 /*
3054 * No point of rolling back things in case of an error, as the best
3055 * we can do is to hope that things will still work (and disable RPM).
3056 */
9dfe3459 3057 intel_gt_runtime_resume(&dev_priv->gt);
83bf6d55 3058 i915_gem_restore_fences(dev_priv);
92b806d3 3059
08d8a232
VS
3060 /*
3061 * On VLV/CHV display interrupts are part of the display
3062 * power well, so hpd is reinitialized from there. For
3063 * everyone else do it here.
3064 */
666a4537 3065 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
08d8a232
VS
3066 intel_hpd_init(dev_priv);
3067
2503a0fe
KM
3068 intel_enable_ipc(dev_priv);
3069
9102650f 3070 enable_rpm_wakeref_asserts(rpm);
1f814dac 3071
0ab9cfeb
ID
3072 if (ret)
3073 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
3074 else
3075 DRM_DEBUG_KMS("Device resumed\n");
3076
3077 return ret;
8a187455
PZ
3078}
3079
42f5551d 3080const struct dev_pm_ops i915_pm_ops = {
5545dbbf
ID
3081 /*
3082 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
3083 * PMSG_RESUME]
3084 */
73b66f87 3085 .prepare = i915_pm_prepare,
0206e353 3086 .suspend = i915_pm_suspend,
76c4b250
ID
3087 .suspend_late = i915_pm_suspend_late,
3088 .resume_early = i915_pm_resume_early,
0206e353 3089 .resume = i915_pm_resume,
5545dbbf
ID
3090
3091 /*
3092 * S4 event handlers
3093 * @freeze, @freeze_late : called (1) before creating the
3094 * hibernation image [PMSG_FREEZE] and
3095 * (2) after rebooting, before restoring
3096 * the image [PMSG_QUIESCE]
3097 * @thaw, @thaw_early : called (1) after creating the hibernation
3098 * image, before writing it [PMSG_THAW]
3099 * and (2) after failing to create or
3100 * restore the image [PMSG_RECOVER]
3101 * @poweroff, @poweroff_late: called after writing the hibernation
3102 * image, before rebooting [PMSG_HIBERNATE]
3103 * @restore, @restore_early : called after rebooting and restoring the
3104 * hibernation image [PMSG_RESTORE]
3105 */
1f19ac2a
CW
3106 .freeze = i915_pm_freeze,
3107 .freeze_late = i915_pm_freeze_late,
3108 .thaw_early = i915_pm_thaw_early,
3109 .thaw = i915_pm_thaw,
36d61e67 3110 .poweroff = i915_pm_suspend,
ab3be73f 3111 .poweroff_late = i915_pm_poweroff_late,
1f19ac2a
CW
3112 .restore_early = i915_pm_restore_early,
3113 .restore = i915_pm_restore,
5545dbbf
ID
3114
3115 /* S0ix (via runtime suspend) event handlers */
97bea207
PZ
3116 .runtime_suspend = intel_runtime_suspend,
3117 .runtime_resume = intel_runtime_resume,
cbda12d7
ZW
3118};
3119
78b68556 3120static const struct vm_operations_struct i915_gem_vm_ops = {
de151cf6 3121 .fault = i915_gem_fault,
ab00b3e5
JB
3122 .open = drm_gem_vm_open,
3123 .close = drm_gem_vm_close,
de151cf6
JB
3124};
3125
e08e96de
AV
3126static const struct file_operations i915_driver_fops = {
3127 .owner = THIS_MODULE,
3128 .open = drm_open,
3129 .release = drm_release,
3130 .unlocked_ioctl = drm_ioctl,
3131 .mmap = drm_gem_mmap,
3132 .poll = drm_poll,
e08e96de 3133 .read = drm_read,
e08e96de 3134 .compat_ioctl = i915_compat_ioctl,
e08e96de
AV
3135 .llseek = noop_llseek,
3136};
3137
0673ad47
CW
3138static int
3139i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
3140 struct drm_file *file)
3141{
3142 return -ENODEV;
3143}
3144
3145static const struct drm_ioctl_desc i915_ioctls[] = {
3146 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3147 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
3148 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
3149 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
3150 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
3151 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
b972fffa 3152 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
0673ad47
CW
3153 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3154 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
3155 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
3156 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3157 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3158 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3159 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3160 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
3161 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
3162 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3163 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6a20fe7b 3164 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
b972fffa 3165 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
0673ad47
CW
3166 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
3167 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
b972fffa 3168 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
0673ad47
CW
3169 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
3170 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
b972fffa 3171 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
0673ad47
CW
3172 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3173 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3174 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
3175 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
3176 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
3177 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
3178 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
3179 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
3180 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
111dbcab
CW
3181 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
3182 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
0673ad47 3183 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
6a20fe7b 3184 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
0673ad47 3185 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
0cd54b03
DV
3186 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
3187 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
3188 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
3189 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
b972fffa 3190 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
b9171541 3191 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
0673ad47
CW
3192 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
3193 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
3194 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
3195 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
3196 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
3197 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
eec688e1 3198 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
f89823c2
LL
3199 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3200 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
a446ae2c 3201 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
7f3f317a
CW
3202 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
3203 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
0673ad47
CW
3204};
3205
1da177e4 3206static struct drm_driver driver = {
0c54781b
MW
3207 /* Don't use MTRRs here; the Xserver or userspace app should
3208 * deal with them for Intel hardware.
792d2b9a 3209 */
673a394b 3210 .driver_features =
1ff49481 3211 DRIVER_GEM | DRIVER_PRIME |
cf6e7bac 3212 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
cad3688f 3213 .release = i915_driver_release,
673a394b 3214 .open = i915_driver_open,
22eae947 3215 .lastclose = i915_driver_lastclose,
673a394b 3216 .postclose = i915_driver_postclose,
d8e29209 3217
b1f788c6 3218 .gem_close_object = i915_gem_close_object,
f0cd5182 3219 .gem_free_object_unlocked = i915_gem_free_object,
de151cf6 3220 .gem_vm_ops = &i915_gem_vm_ops,
1286ff73
DV
3221
3222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
3223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
3224 .gem_prime_export = i915_gem_prime_export,
3225 .gem_prime_import = i915_gem_prime_import,
3226
7d23e593
VS
3227 .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
3228 .get_scanout_position = i915_get_crtc_scanoutpos,
3229
ff72145b 3230 .dumb_create = i915_gem_dumb_create,
da6b51d0 3231 .dumb_map_offset = i915_gem_mmap_gtt,
1da177e4 3232 .ioctls = i915_ioctls,
0673ad47 3233 .num_ioctls = ARRAY_SIZE(i915_ioctls),
e08e96de 3234 .fops = &i915_driver_fops,
22eae947
DA
3235 .name = DRIVER_NAME,
3236 .desc = DRIVER_DESC,
3237 .date = DRIVER_DATE,
3238 .major = DRIVER_MAJOR,
3239 .minor = DRIVER_MINOR,
3240 .patchlevel = DRIVER_PATCHLEVEL,
1da177e4 3241};
66d9cb5d
CW
3242
3243#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3244#include "selftests/mock_drm.c"
3245#endif