]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 | 29 | |
e5747e3a | 30 | #include <linux/acpi.h> |
0673ad47 CW |
31 | #include <linux/device.h> |
32 | #include <linux/oom.h> | |
e0cd3608 | 33 | #include <linux/module.h> |
0673ad47 CW |
34 | #include <linux/pci.h> |
35 | #include <linux/pm.h> | |
d6102977 | 36 | #include <linux/pm_runtime.h> |
0673ad47 CW |
37 | #include <linux/pnp.h> |
38 | #include <linux/slab.h> | |
39 | #include <linux/vgaarb.h> | |
704ab614 | 40 | #include <linux/vga_switcheroo.h> |
0673ad47 CW |
41 | #include <linux/vt.h> |
42 | #include <acpi/video.h> | |
43 | ||
44 | #include <drm/drmP.h> | |
760285e7 | 45 | #include <drm/drm_crtc_helper.h> |
a667fb40 | 46 | #include <drm/drm_atomic_helper.h> |
0673ad47 CW |
47 | #include <drm/i915_drm.h> |
48 | ||
49 | #include "i915_drv.h" | |
50 | #include "i915_trace.h" | |
51 | #include "i915_vgpu.h" | |
52 | #include "intel_drv.h" | |
5464cd65 | 53 | #include "intel_uc.h" |
79e53945 | 54 | |
112b715e KH |
55 | static struct drm_driver driver; |
56 | ||
0673ad47 CW |
57 | static unsigned int i915_load_fail_count; |
58 | ||
59 | bool __i915_inject_load_failure(const char *func, int line) | |
60 | { | |
4f044a88 | 61 | if (i915_load_fail_count >= i915_modparams.inject_load_failure) |
0673ad47 CW |
62 | return false; |
63 | ||
4f044a88 | 64 | if (++i915_load_fail_count == i915_modparams.inject_load_failure) { |
0673ad47 | 65 | DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", |
4f044a88 | 66 | i915_modparams.inject_load_failure, func, line); |
0673ad47 CW |
67 | return true; |
68 | } | |
69 | ||
70 | return false; | |
71 | } | |
72 | ||
73 | #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" | |
74 | #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ | |
75 | "providing the dmesg log by booting with drm.debug=0xf" | |
76 | ||
77 | void | |
78 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |
79 | const char *fmt, ...) | |
80 | { | |
81 | static bool shown_bug_once; | |
c49d13ee | 82 | struct device *kdev = dev_priv->drm.dev; |
0673ad47 CW |
83 | bool is_error = level[1] <= KERN_ERR[1]; |
84 | bool is_debug = level[1] == KERN_DEBUG[1]; | |
85 | struct va_format vaf; | |
86 | va_list args; | |
87 | ||
88 | if (is_debug && !(drm_debug & DRM_UT_DRIVER)) | |
89 | return; | |
90 | ||
91 | va_start(args, fmt); | |
92 | ||
93 | vaf.fmt = fmt; | |
94 | vaf.va = &args; | |
95 | ||
c49d13ee | 96 | dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", |
0673ad47 CW |
97 | __builtin_return_address(0), &vaf); |
98 | ||
99 | if (is_error && !shown_bug_once) { | |
c49d13ee | 100 | dev_notice(kdev, "%s", FDO_BUG_MSG); |
0673ad47 CW |
101 | shown_bug_once = true; |
102 | } | |
103 | ||
104 | va_end(args); | |
105 | } | |
106 | ||
107 | static bool i915_error_injected(struct drm_i915_private *dev_priv) | |
108 | { | |
4f044a88 MW |
109 | return i915_modparams.inject_load_failure && |
110 | i915_load_fail_count == i915_modparams.inject_load_failure; | |
0673ad47 CW |
111 | } |
112 | ||
113 | #define i915_load_error(dev_priv, fmt, ...) \ | |
114 | __i915_printk(dev_priv, \ | |
115 | i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ | |
116 | fmt, ##__VA_ARGS__) | |
117 | ||
118 | ||
fd6b8f43 | 119 | static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) |
0673ad47 CW |
120 | { |
121 | enum intel_pch ret = PCH_NOP; | |
122 | ||
123 | /* | |
124 | * In a virtualized passthrough environment we can be in a | |
125 | * setup where the ISA bridge is not able to be passed through. | |
126 | * In this case, a south bridge can be emulated and we have to | |
127 | * make an educated guess as to which PCH is really there. | |
128 | */ | |
129 | ||
fd6b8f43 | 130 | if (IS_GEN5(dev_priv)) { |
0673ad47 CW |
131 | ret = PCH_IBX; |
132 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); | |
fd6b8f43 | 133 | } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { |
0673ad47 | 134 | ret = PCH_CPT; |
aa032130 | 135 | DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); |
fd6b8f43 | 136 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
0673ad47 | 137 | ret = PCH_LPT; |
817aef5d XZ |
138 | if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) |
139 | dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; | |
140 | else | |
141 | dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE; | |
0673ad47 | 142 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); |
fd6b8f43 | 143 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
0673ad47 CW |
144 | ret = PCH_SPT; |
145 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); | |
80937819 | 146 | } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { |
acf1dba6 | 147 | ret = PCH_CNP; |
80937819 | 148 | DRM_DEBUG_KMS("Assuming CannonPoint PCH\n"); |
0673ad47 CW |
149 | } |
150 | ||
151 | return ret; | |
152 | } | |
153 | ||
da5f53bf | 154 | static void intel_detect_pch(struct drm_i915_private *dev_priv) |
0673ad47 | 155 | { |
0673ad47 CW |
156 | struct pci_dev *pch = NULL; |
157 | ||
158 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | |
159 | * (which really amounts to a PCH but no South Display). | |
160 | */ | |
b7f05d4a | 161 | if (INTEL_INFO(dev_priv)->num_pipes == 0) { |
0673ad47 CW |
162 | dev_priv->pch_type = PCH_NOP; |
163 | return; | |
164 | } | |
165 | ||
166 | /* | |
167 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | |
168 | * make graphics device passthrough work easy for VMM, that only | |
169 | * need to expose ISA bridge to let driver know the real hardware | |
170 | * underneath. This is a requirement from virtualization team. | |
171 | * | |
172 | * In some virtualized environments (e.g. XEN), there is irrelevant | |
173 | * ISA bridge in the system. To work reliably, we should scan trhough | |
174 | * all the ISA bridge devices and check for the first match, instead | |
175 | * of only checking the first one. | |
176 | */ | |
177 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { | |
178 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | |
179 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | |
c5e855d0 VS |
180 | |
181 | dev_priv->pch_id = id; | |
ec7e0bb3 | 182 | |
0673ad47 CW |
183 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
184 | dev_priv->pch_type = PCH_IBX; | |
185 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | |
5db94019 | 186 | WARN_ON(!IS_GEN5(dev_priv)); |
0673ad47 CW |
187 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
188 | dev_priv->pch_type = PCH_CPT; | |
189 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | |
d4cdbf03 VS |
190 | WARN_ON(!IS_GEN6(dev_priv) && |
191 | !IS_IVYBRIDGE(dev_priv)); | |
0673ad47 CW |
192 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
193 | /* PantherPoint is CPT compatible */ | |
194 | dev_priv->pch_type = PCH_CPT; | |
195 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); | |
d4cdbf03 VS |
196 | WARN_ON(!IS_GEN6(dev_priv) && |
197 | !IS_IVYBRIDGE(dev_priv)); | |
0673ad47 CW |
198 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
199 | dev_priv->pch_type = PCH_LPT; | |
200 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | |
8652744b TU |
201 | WARN_ON(!IS_HASWELL(dev_priv) && |
202 | !IS_BROADWELL(dev_priv)); | |
50a0bc90 TU |
203 | WARN_ON(IS_HSW_ULT(dev_priv) || |
204 | IS_BDW_ULT(dev_priv)); | |
0673ad47 CW |
205 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
206 | dev_priv->pch_type = PCH_LPT; | |
207 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | |
8652744b TU |
208 | WARN_ON(!IS_HASWELL(dev_priv) && |
209 | !IS_BROADWELL(dev_priv)); | |
50a0bc90 TU |
210 | WARN_ON(!IS_HSW_ULT(dev_priv) && |
211 | !IS_BDW_ULT(dev_priv)); | |
c5e855d0 VS |
212 | } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) { |
213 | /* WildcatPoint is LPT compatible */ | |
214 | dev_priv->pch_type = PCH_LPT; | |
215 | DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); | |
216 | WARN_ON(!IS_HASWELL(dev_priv) && | |
217 | !IS_BROADWELL(dev_priv)); | |
218 | WARN_ON(IS_HSW_ULT(dev_priv) || | |
219 | IS_BDW_ULT(dev_priv)); | |
220 | } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) { | |
221 | /* WildcatPoint is LPT compatible */ | |
222 | dev_priv->pch_type = PCH_LPT; | |
223 | DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); | |
224 | WARN_ON(!IS_HASWELL(dev_priv) && | |
225 | !IS_BROADWELL(dev_priv)); | |
226 | WARN_ON(!IS_HSW_ULT(dev_priv) && | |
227 | !IS_BDW_ULT(dev_priv)); | |
0673ad47 CW |
228 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
229 | dev_priv->pch_type = PCH_SPT; | |
230 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | |
0853723b TU |
231 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
232 | !IS_KABYLAKE(dev_priv)); | |
c5e855d0 | 233 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { |
0673ad47 CW |
234 | dev_priv->pch_type = PCH_SPT; |
235 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); | |
0853723b TU |
236 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
237 | !IS_KABYLAKE(dev_priv)); | |
22dea0be RV |
238 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { |
239 | dev_priv->pch_type = PCH_KBP; | |
23247d71 | 240 | DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); |
85327748 | 241 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
eb371933 RV |
242 | !IS_KABYLAKE(dev_priv) && |
243 | !IS_COFFEELAKE(dev_priv)); | |
7b22b8c4 RV |
244 | } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { |
245 | dev_priv->pch_type = PCH_CNP; | |
23247d71 | 246 | DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); |
80937819 RV |
247 | WARN_ON(!IS_CANNONLAKE(dev_priv) && |
248 | !IS_COFFEELAKE(dev_priv)); | |
c5e855d0 | 249 | } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { |
ec7e0bb3 | 250 | dev_priv->pch_type = PCH_CNP; |
23247d71 | 251 | DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); |
80937819 RV |
252 | WARN_ON(!IS_CANNONLAKE(dev_priv) && |
253 | !IS_COFFEELAKE(dev_priv)); | |
d4cdbf03 VS |
254 | } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || |
255 | id == INTEL_PCH_P3X_DEVICE_ID_TYPE || | |
256 | (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && | |
0673ad47 CW |
257 | pch->subsystem_vendor == |
258 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET && | |
259 | pch->subsystem_device == | |
260 | PCI_SUBDEVICE_ID_QEMU)) { | |
fd6b8f43 TU |
261 | dev_priv->pch_type = |
262 | intel_virt_detect_pch(dev_priv); | |
0673ad47 CW |
263 | } else |
264 | continue; | |
265 | ||
266 | break; | |
267 | } | |
268 | } | |
269 | if (!pch) | |
270 | DRM_DEBUG_KMS("No PCH found.\n"); | |
271 | ||
272 | pci_dev_put(pch); | |
273 | } | |
274 | ||
0673ad47 CW |
275 | static int i915_getparam(struct drm_device *dev, void *data, |
276 | struct drm_file *file_priv) | |
277 | { | |
fac5e23e | 278 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 279 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
280 | drm_i915_getparam_t *param = data; |
281 | int value; | |
282 | ||
283 | switch (param->param) { | |
284 | case I915_PARAM_IRQ_ACTIVE: | |
285 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
286 | case I915_PARAM_LAST_DISPATCH: | |
ef0f411f | 287 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
0673ad47 CW |
288 | /* Reject all old ums/dri params. */ |
289 | return -ENODEV; | |
290 | case I915_PARAM_CHIPSET_ID: | |
52a05c30 | 291 | value = pdev->device; |
0673ad47 CW |
292 | break; |
293 | case I915_PARAM_REVISION: | |
52a05c30 | 294 | value = pdev->revision; |
0673ad47 | 295 | break; |
0673ad47 CW |
296 | case I915_PARAM_NUM_FENCES_AVAIL: |
297 | value = dev_priv->num_fence_regs; | |
298 | break; | |
299 | case I915_PARAM_HAS_OVERLAY: | |
300 | value = dev_priv->overlay ? 1 : 0; | |
301 | break; | |
0673ad47 | 302 | case I915_PARAM_HAS_BSD: |
3b3f1650 | 303 | value = !!dev_priv->engine[VCS]; |
0673ad47 CW |
304 | break; |
305 | case I915_PARAM_HAS_BLT: | |
3b3f1650 | 306 | value = !!dev_priv->engine[BCS]; |
0673ad47 CW |
307 | break; |
308 | case I915_PARAM_HAS_VEBOX: | |
3b3f1650 | 309 | value = !!dev_priv->engine[VECS]; |
0673ad47 CW |
310 | break; |
311 | case I915_PARAM_HAS_BSD2: | |
3b3f1650 | 312 | value = !!dev_priv->engine[VCS2]; |
0673ad47 | 313 | break; |
0673ad47 | 314 | case I915_PARAM_HAS_LLC: |
16162470 | 315 | value = HAS_LLC(dev_priv); |
0673ad47 CW |
316 | break; |
317 | case I915_PARAM_HAS_WT: | |
16162470 | 318 | value = HAS_WT(dev_priv); |
0673ad47 CW |
319 | break; |
320 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
16162470 | 321 | value = USES_PPGTT(dev_priv); |
0673ad47 CW |
322 | break; |
323 | case I915_PARAM_HAS_SEMAPHORES: | |
4f044a88 | 324 | value = i915_modparams.semaphores; |
0673ad47 | 325 | break; |
0673ad47 CW |
326 | case I915_PARAM_HAS_SECURE_BATCHES: |
327 | value = capable(CAP_SYS_ADMIN); | |
328 | break; | |
0673ad47 CW |
329 | case I915_PARAM_CMD_PARSER_VERSION: |
330 | value = i915_cmd_parser_get_version(dev_priv); | |
331 | break; | |
0673ad47 | 332 | case I915_PARAM_SUBSLICE_TOTAL: |
57ec171e | 333 | value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); |
0673ad47 CW |
334 | if (!value) |
335 | return -ENODEV; | |
336 | break; | |
337 | case I915_PARAM_EU_TOTAL: | |
43b67998 | 338 | value = INTEL_INFO(dev_priv)->sseu.eu_total; |
0673ad47 CW |
339 | if (!value) |
340 | return -ENODEV; | |
341 | break; | |
342 | case I915_PARAM_HAS_GPU_RESET: | |
4f044a88 MW |
343 | value = i915_modparams.enable_hangcheck && |
344 | intel_has_gpu_reset(dev_priv); | |
142bc7d9 MT |
345 | if (value && intel_has_reset_engine(dev_priv)) |
346 | value = 2; | |
0673ad47 CW |
347 | break; |
348 | case I915_PARAM_HAS_RESOURCE_STREAMER: | |
16162470 | 349 | value = HAS_RESOURCE_STREAMER(dev_priv); |
0673ad47 | 350 | break; |
37f501af | 351 | case I915_PARAM_HAS_POOLED_EU: |
16162470 | 352 | value = HAS_POOLED_EU(dev_priv); |
37f501af | 353 | break; |
354 | case I915_PARAM_MIN_EU_IN_POOL: | |
43b67998 | 355 | value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; |
37f501af | 356 | break; |
5464cd65 | 357 | case I915_PARAM_HUC_STATUS: |
3582ad13 | 358 | intel_runtime_pm_get(dev_priv); |
5464cd65 | 359 | value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; |
3582ad13 | 360 | intel_runtime_pm_put(dev_priv); |
5464cd65 | 361 | break; |
4cc69075 CW |
362 | case I915_PARAM_MMAP_GTT_VERSION: |
363 | /* Though we've started our numbering from 1, and so class all | |
364 | * earlier versions as 0, in effect their value is undefined as | |
365 | * the ioctl will report EINVAL for the unknown param! | |
366 | */ | |
367 | value = i915_gem_mmap_gtt_version(); | |
368 | break; | |
0de9136d | 369 | case I915_PARAM_HAS_SCHEDULER: |
bf64e0b0 | 370 | value = 0; |
beecec90 | 371 | if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { |
bf64e0b0 | 372 | value |= I915_SCHEDULER_CAP_ENABLED; |
ac14fbd4 | 373 | value |= I915_SCHEDULER_CAP_PRIORITY; |
beecec90 CW |
374 | |
375 | if (INTEL_INFO(dev_priv)->has_logical_ring_preemption && | |
376 | i915_modparams.enable_execlists && | |
377 | !i915_modparams.enable_guc_submission) | |
378 | value |= I915_SCHEDULER_CAP_PREEMPTION; | |
379 | } | |
0de9136d | 380 | break; |
beecec90 | 381 | |
16162470 DW |
382 | case I915_PARAM_MMAP_VERSION: |
383 | /* Remember to bump this if the version changes! */ | |
384 | case I915_PARAM_HAS_GEM: | |
385 | case I915_PARAM_HAS_PAGEFLIPPING: | |
386 | case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ | |
387 | case I915_PARAM_HAS_RELAXED_FENCING: | |
388 | case I915_PARAM_HAS_COHERENT_RINGS: | |
389 | case I915_PARAM_HAS_RELAXED_DELTA: | |
390 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
391 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
392 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
393 | case I915_PARAM_HAS_PINNED_BATCHES: | |
394 | case I915_PARAM_HAS_EXEC_NO_RELOC: | |
395 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | |
396 | case I915_PARAM_HAS_COHERENT_PHYS_GTT: | |
397 | case I915_PARAM_HAS_EXEC_SOFTPIN: | |
77ae9957 | 398 | case I915_PARAM_HAS_EXEC_ASYNC: |
fec0445c | 399 | case I915_PARAM_HAS_EXEC_FENCE: |
b0fd47ad | 400 | case I915_PARAM_HAS_EXEC_CAPTURE: |
1a71cf2f | 401 | case I915_PARAM_HAS_EXEC_BATCH_FIRST: |
cf6e7bac | 402 | case I915_PARAM_HAS_EXEC_FENCE_ARRAY: |
16162470 DW |
403 | /* For the time being all of these are always true; |
404 | * if some supported hardware does not have one of these | |
405 | * features this value needs to be provided from | |
406 | * INTEL_INFO(), a feature macro, or similar. | |
407 | */ | |
408 | value = 1; | |
409 | break; | |
7fed555c RB |
410 | case I915_PARAM_SLICE_MASK: |
411 | value = INTEL_INFO(dev_priv)->sseu.slice_mask; | |
412 | if (!value) | |
413 | return -ENODEV; | |
414 | break; | |
f5320233 RB |
415 | case I915_PARAM_SUBSLICE_MASK: |
416 | value = INTEL_INFO(dev_priv)->sseu.subslice_mask; | |
417 | if (!value) | |
418 | return -ENODEV; | |
419 | break; | |
0673ad47 CW |
420 | default: |
421 | DRM_DEBUG("Unknown parameter %d\n", param->param); | |
422 | return -EINVAL; | |
423 | } | |
424 | ||
dda33009 | 425 | if (put_user(value, param->value)) |
0673ad47 | 426 | return -EFAULT; |
0673ad47 CW |
427 | |
428 | return 0; | |
429 | } | |
430 | ||
da5f53bf | 431 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
0673ad47 | 432 | { |
0673ad47 CW |
433 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
434 | if (!dev_priv->bridge_dev) { | |
435 | DRM_ERROR("bridge device not found\n"); | |
436 | return -1; | |
437 | } | |
438 | return 0; | |
439 | } | |
440 | ||
441 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
442 | static int | |
da5f53bf | 443 | intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) |
0673ad47 | 444 | { |
514e1d64 | 445 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
446 | u32 temp_lo, temp_hi = 0; |
447 | u64 mchbar_addr; | |
448 | int ret; | |
449 | ||
514e1d64 | 450 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
451 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
452 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
453 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
454 | ||
455 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
456 | #ifdef CONFIG_PNP | |
457 | if (mchbar_addr && | |
458 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
459 | return 0; | |
460 | #endif | |
461 | ||
462 | /* Get some space for it */ | |
463 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
464 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
465 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
466 | &dev_priv->mch_res, | |
467 | MCHBAR_SIZE, MCHBAR_SIZE, | |
468 | PCIBIOS_MIN_MEM, | |
469 | 0, pcibios_align_resource, | |
470 | dev_priv->bridge_dev); | |
471 | if (ret) { | |
472 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
473 | dev_priv->mch_res.start = 0; | |
474 | return ret; | |
475 | } | |
476 | ||
514e1d64 | 477 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
478 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
479 | upper_32_bits(dev_priv->mch_res.start)); | |
480 | ||
481 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
482 | lower_32_bits(dev_priv->mch_res.start)); | |
483 | return 0; | |
484 | } | |
485 | ||
486 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
487 | static void | |
da5f53bf | 488 | intel_setup_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 489 | { |
514e1d64 | 490 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
491 | u32 temp; |
492 | bool enabled; | |
493 | ||
920a14b2 | 494 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
0673ad47 CW |
495 | return; |
496 | ||
497 | dev_priv->mchbar_need_disable = false; | |
498 | ||
50a0bc90 | 499 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
500 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); |
501 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
502 | } else { | |
503 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
504 | enabled = temp & 1; | |
505 | } | |
506 | ||
507 | /* If it's already enabled, don't have to do anything */ | |
508 | if (enabled) | |
509 | return; | |
510 | ||
da5f53bf | 511 | if (intel_alloc_mchbar_resource(dev_priv)) |
0673ad47 CW |
512 | return; |
513 | ||
514 | dev_priv->mchbar_need_disable = true; | |
515 | ||
516 | /* Space is allocated or reserved, so enable it. */ | |
50a0bc90 | 517 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
518 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
519 | temp | DEVEN_MCHBAR_EN); | |
520 | } else { | |
521 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
522 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
523 | } | |
524 | } | |
525 | ||
526 | static void | |
da5f53bf | 527 | intel_teardown_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 528 | { |
514e1d64 | 529 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
530 | |
531 | if (dev_priv->mchbar_need_disable) { | |
50a0bc90 | 532 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
533 | u32 deven_val; |
534 | ||
535 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, | |
536 | &deven_val); | |
537 | deven_val &= ~DEVEN_MCHBAR_EN; | |
538 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
539 | deven_val); | |
540 | } else { | |
541 | u32 mchbar_val; | |
542 | ||
543 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
544 | &mchbar_val); | |
545 | mchbar_val &= ~1; | |
546 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
547 | mchbar_val); | |
548 | } | |
549 | } | |
550 | ||
551 | if (dev_priv->mch_res.start) | |
552 | release_resource(&dev_priv->mch_res); | |
553 | } | |
554 | ||
555 | /* true = enable decode, false = disable decoder */ | |
556 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
557 | { | |
da5f53bf | 558 | struct drm_i915_private *dev_priv = cookie; |
0673ad47 | 559 | |
da5f53bf | 560 | intel_modeset_vga_set_state(dev_priv, state); |
0673ad47 CW |
561 | if (state) |
562 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
563 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
564 | else | |
565 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
566 | } | |
567 | ||
7f26cb88 TU |
568 | static int i915_resume_switcheroo(struct drm_device *dev); |
569 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); | |
570 | ||
0673ad47 CW |
571 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
572 | { | |
573 | struct drm_device *dev = pci_get_drvdata(pdev); | |
574 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
575 | ||
576 | if (state == VGA_SWITCHEROO_ON) { | |
577 | pr_info("switched on\n"); | |
578 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
579 | /* i915 resume handler doesn't set to D0 */ | |
52a05c30 | 580 | pci_set_power_state(pdev, PCI_D0); |
0673ad47 CW |
581 | i915_resume_switcheroo(dev); |
582 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
583 | } else { | |
584 | pr_info("switched off\n"); | |
585 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
586 | i915_suspend_switcheroo(dev, pmm); | |
587 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
588 | } | |
589 | } | |
590 | ||
591 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
592 | { | |
593 | struct drm_device *dev = pci_get_drvdata(pdev); | |
594 | ||
595 | /* | |
596 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | |
597 | * locking inversion with the driver load path. And the access here is | |
598 | * completely racy anyway. So don't bother with locking for now. | |
599 | */ | |
600 | return dev->open_count == 0; | |
601 | } | |
602 | ||
603 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
604 | .set_gpu_state = i915_switcheroo_set_state, | |
605 | .reprobe = NULL, | |
606 | .can_switch = i915_switcheroo_can_switch, | |
607 | }; | |
608 | ||
fbbd37b3 | 609 | static void i915_gem_fini(struct drm_i915_private *dev_priv) |
0673ad47 | 610 | { |
3b19f16a CW |
611 | /* Flush any outstanding unpin_work. */ |
612 | i915_gem_drain_workqueue(dev_priv); | |
5f09a9c8 | 613 | |
fbbd37b3 | 614 | mutex_lock(&dev_priv->drm.struct_mutex); |
b8991403 | 615 | intel_uc_fini_hw(dev_priv); |
cb15d9f8 | 616 | i915_gem_cleanup_engines(dev_priv); |
829a0af2 | 617 | i915_gem_contexts_fini(dev_priv); |
8a2421bd | 618 | i915_gem_cleanup_userptr(dev_priv); |
fbbd37b3 | 619 | mutex_unlock(&dev_priv->drm.struct_mutex); |
0673ad47 | 620 | |
bdeb9785 | 621 | i915_gem_drain_freed_objects(dev_priv); |
fbbd37b3 | 622 | |
829a0af2 | 623 | WARN_ON(!list_empty(&dev_priv->contexts.list)); |
0673ad47 CW |
624 | } |
625 | ||
626 | static int i915_load_modeset_init(struct drm_device *dev) | |
627 | { | |
fac5e23e | 628 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 629 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
630 | int ret; |
631 | ||
632 | if (i915_inject_load_failure()) | |
633 | return -ENODEV; | |
634 | ||
66578857 | 635 | intel_bios_init(dev_priv); |
0673ad47 CW |
636 | |
637 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
638 | * to the common VGA resources. | |
639 | * | |
640 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
641 | * then we do not take part in VGA arbitration and the | |
642 | * vga_client_register() fails with -ENODEV. | |
643 | */ | |
da5f53bf | 644 | ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); |
0673ad47 CW |
645 | if (ret && ret != -ENODEV) |
646 | goto out; | |
647 | ||
648 | intel_register_dsm_handler(); | |
649 | ||
52a05c30 | 650 | ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); |
0673ad47 CW |
651 | if (ret) |
652 | goto cleanup_vga_client; | |
653 | ||
654 | /* must happen before intel_power_domains_init_hw() on VLV/CHV */ | |
655 | intel_update_rawclk(dev_priv); | |
656 | ||
657 | intel_power_domains_init_hw(dev_priv, false); | |
658 | ||
659 | intel_csr_ucode_init(dev_priv); | |
660 | ||
661 | ret = intel_irq_install(dev_priv); | |
662 | if (ret) | |
663 | goto cleanup_csr; | |
664 | ||
40196446 | 665 | intel_setup_gmbus(dev_priv); |
0673ad47 CW |
666 | |
667 | /* Important: The output setup functions called by modeset_init need | |
668 | * working irqs for e.g. gmbus and dp aux transfers. */ | |
b079bd17 VS |
669 | ret = intel_modeset_init(dev); |
670 | if (ret) | |
671 | goto cleanup_irq; | |
0673ad47 | 672 | |
29ad6a30 | 673 | intel_uc_init_fw(dev_priv); |
0673ad47 | 674 | |
bf9e8429 | 675 | ret = i915_gem_init(dev_priv); |
0673ad47 | 676 | if (ret) |
3950bf3d | 677 | goto cleanup_uc; |
0673ad47 CW |
678 | |
679 | intel_modeset_gem_init(dev); | |
680 | ||
b7f05d4a | 681 | if (INTEL_INFO(dev_priv)->num_pipes == 0) |
0673ad47 CW |
682 | return 0; |
683 | ||
684 | ret = intel_fbdev_init(dev); | |
685 | if (ret) | |
686 | goto cleanup_gem; | |
687 | ||
688 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
689 | intel_hpd_init(dev_priv); | |
690 | ||
691 | drm_kms_helper_poll_init(dev); | |
692 | ||
693 | return 0; | |
694 | ||
695 | cleanup_gem: | |
bf9e8429 | 696 | if (i915_gem_suspend(dev_priv)) |
1c777c5d | 697 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
fbbd37b3 | 698 | i915_gem_fini(dev_priv); |
3950bf3d OM |
699 | cleanup_uc: |
700 | intel_uc_fini_fw(dev_priv); | |
0673ad47 | 701 | cleanup_irq: |
0673ad47 | 702 | drm_irq_uninstall(dev); |
40196446 | 703 | intel_teardown_gmbus(dev_priv); |
0673ad47 CW |
704 | cleanup_csr: |
705 | intel_csr_ucode_fini(dev_priv); | |
706 | intel_power_domains_fini(dev_priv); | |
52a05c30 | 707 | vga_switcheroo_unregister_client(pdev); |
0673ad47 | 708 | cleanup_vga_client: |
52a05c30 | 709 | vga_client_register(pdev, NULL, NULL, NULL); |
0673ad47 CW |
710 | out: |
711 | return ret; | |
712 | } | |
713 | ||
0673ad47 CW |
714 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
715 | { | |
716 | struct apertures_struct *ap; | |
91c8a326 | 717 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
718 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
719 | bool primary; | |
720 | int ret; | |
721 | ||
722 | ap = alloc_apertures(1); | |
723 | if (!ap) | |
724 | return -ENOMEM; | |
725 | ||
726 | ap->ranges[0].base = ggtt->mappable_base; | |
727 | ap->ranges[0].size = ggtt->mappable_end; | |
728 | ||
729 | primary = | |
730 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
731 | ||
44adece5 | 732 | ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
0673ad47 CW |
733 | |
734 | kfree(ap); | |
735 | ||
736 | return ret; | |
737 | } | |
0673ad47 CW |
738 | |
739 | #if !defined(CONFIG_VGA_CONSOLE) | |
740 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
741 | { | |
742 | return 0; | |
743 | } | |
744 | #elif !defined(CONFIG_DUMMY_CONSOLE) | |
745 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
746 | { | |
747 | return -ENODEV; | |
748 | } | |
749 | #else | |
750 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
751 | { | |
752 | int ret = 0; | |
753 | ||
754 | DRM_INFO("Replacing VGA console driver\n"); | |
755 | ||
756 | console_lock(); | |
757 | if (con_is_bound(&vga_con)) | |
758 | ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); | |
759 | if (ret == 0) { | |
760 | ret = do_unregister_con_driver(&vga_con); | |
761 | ||
762 | /* Ignore "already unregistered". */ | |
763 | if (ret == -ENODEV) | |
764 | ret = 0; | |
765 | } | |
766 | console_unlock(); | |
767 | ||
768 | return ret; | |
769 | } | |
770 | #endif | |
771 | ||
0673ad47 CW |
772 | static void intel_init_dpio(struct drm_i915_private *dev_priv) |
773 | { | |
774 | /* | |
775 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | |
776 | * CHV x1 PHY (DP/HDMI D) | |
777 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | |
778 | */ | |
779 | if (IS_CHERRYVIEW(dev_priv)) { | |
780 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | |
781 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | |
782 | } else if (IS_VALLEYVIEW(dev_priv)) { | |
783 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | |
784 | } | |
785 | } | |
786 | ||
787 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) | |
788 | { | |
789 | /* | |
790 | * The i915 workqueue is primarily used for batched retirement of | |
791 | * requests (and thus managing bo) once the task has been completed | |
792 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
793 | * need high-priority retirement, such as waiting for an explicit | |
794 | * bo. | |
795 | * | |
796 | * It is also used for periodic low-priority events, such as | |
797 | * idle-timers and recording error state. | |
798 | * | |
799 | * All tasks on the workqueue are expected to acquire the dev mutex | |
800 | * so there is no point in running more than one instance of the | |
801 | * workqueue at any time. Use an ordered one. | |
802 | */ | |
803 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
804 | if (dev_priv->wq == NULL) | |
805 | goto out_err; | |
806 | ||
807 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); | |
808 | if (dev_priv->hotplug.dp_wq == NULL) | |
809 | goto out_free_wq; | |
810 | ||
0673ad47 CW |
811 | return 0; |
812 | ||
0673ad47 CW |
813 | out_free_wq: |
814 | destroy_workqueue(dev_priv->wq); | |
815 | out_err: | |
816 | DRM_ERROR("Failed to allocate workqueues.\n"); | |
817 | ||
818 | return -ENOMEM; | |
819 | } | |
820 | ||
bb8f0f5a CW |
821 | static void i915_engines_cleanup(struct drm_i915_private *i915) |
822 | { | |
823 | struct intel_engine_cs *engine; | |
824 | enum intel_engine_id id; | |
825 | ||
826 | for_each_engine(engine, i915, id) | |
827 | kfree(engine); | |
828 | } | |
829 | ||
0673ad47 CW |
830 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) |
831 | { | |
0673ad47 CW |
832 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
833 | destroy_workqueue(dev_priv->wq); | |
834 | } | |
835 | ||
4fc7e845 PZ |
836 | /* |
837 | * We don't keep the workarounds for pre-production hardware, so we expect our | |
838 | * driver to fail on these machines in one way or another. A little warning on | |
839 | * dmesg may help both the user and the bug triagers. | |
840 | */ | |
841 | static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) | |
842 | { | |
248a124d CW |
843 | bool pre = false; |
844 | ||
845 | pre |= IS_HSW_EARLY_SDV(dev_priv); | |
846 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); | |
0102ba1f | 847 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
248a124d | 848 | |
7c5ff4a2 | 849 | if (pre) { |
4fc7e845 PZ |
850 | DRM_ERROR("This is a pre-production stepping. " |
851 | "It may not be fully functional.\n"); | |
7c5ff4a2 CW |
852 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); |
853 | } | |
4fc7e845 PZ |
854 | } |
855 | ||
0673ad47 CW |
856 | /** |
857 | * i915_driver_init_early - setup state not requiring device access | |
858 | * @dev_priv: device private | |
859 | * | |
860 | * Initialize everything that is a "SW-only" state, that is state not | |
861 | * requiring accessing the device or exposing the driver via kernel internal | |
862 | * or userspace interfaces. Example steps belonging here: lock initialization, | |
863 | * system memory allocation, setting up device specific attributes and | |
864 | * function hooks not requiring accessing the device. | |
865 | */ | |
866 | static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |
867 | const struct pci_device_id *ent) | |
868 | { | |
869 | const struct intel_device_info *match_info = | |
870 | (struct intel_device_info *)ent->driver_data; | |
871 | struct intel_device_info *device_info; | |
872 | int ret = 0; | |
873 | ||
874 | if (i915_inject_load_failure()) | |
875 | return -ENODEV; | |
876 | ||
877 | /* Setup the write-once "constant" device info */ | |
94b4f3ba | 878 | device_info = mkwrite_device_info(dev_priv); |
0673ad47 CW |
879 | memcpy(device_info, match_info, sizeof(*device_info)); |
880 | device_info->device_id = dev_priv->drm.pdev->device; | |
881 | ||
ae7617f0 TU |
882 | BUILD_BUG_ON(INTEL_MAX_PLATFORMS > |
883 | sizeof(device_info->platform_mask) * BITS_PER_BYTE); | |
884 | device_info->platform_mask = BIT(device_info->platform); | |
885 | ||
0673ad47 CW |
886 | BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); |
887 | device_info->gen_mask = BIT(device_info->gen - 1); | |
888 | ||
889 | spin_lock_init(&dev_priv->irq_lock); | |
890 | spin_lock_init(&dev_priv->gpu_error.lock); | |
891 | mutex_init(&dev_priv->backlight_lock); | |
892 | spin_lock_init(&dev_priv->uncore.lock); | |
317eaa95 | 893 | |
0673ad47 | 894 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
0673ad47 CW |
895 | mutex_init(&dev_priv->sb_lock); |
896 | mutex_init(&dev_priv->modeset_restore_lock); | |
897 | mutex_init(&dev_priv->av_mutex); | |
898 | mutex_init(&dev_priv->wm.wm_mutex); | |
899 | mutex_init(&dev_priv->pps_mutex); | |
900 | ||
413e8fdb | 901 | intel_uc_init_early(dev_priv); |
0b1de5d5 CW |
902 | i915_memcpy_init_early(dev_priv); |
903 | ||
0673ad47 CW |
904 | ret = i915_workqueues_init(dev_priv); |
905 | if (ret < 0) | |
bb8f0f5a | 906 | goto err_engines; |
0673ad47 | 907 | |
0673ad47 | 908 | /* This must be called before any calls to HAS_PCH_* */ |
da5f53bf | 909 | intel_detect_pch(dev_priv); |
0673ad47 | 910 | |
192aa181 | 911 | intel_pm_setup(dev_priv); |
0673ad47 CW |
912 | intel_init_dpio(dev_priv); |
913 | intel_power_domains_init(dev_priv); | |
914 | intel_irq_init(dev_priv); | |
3ac168a7 | 915 | intel_hangcheck_init(dev_priv); |
0673ad47 CW |
916 | intel_init_display_hooks(dev_priv); |
917 | intel_init_clock_gating_hooks(dev_priv); | |
918 | intel_init_audio_hooks(dev_priv); | |
cb15d9f8 | 919 | ret = i915_gem_load_init(dev_priv); |
73cb9701 | 920 | if (ret < 0) |
cefcff8f | 921 | goto err_irq; |
0673ad47 | 922 | |
36cdd013 | 923 | intel_display_crc_init(dev_priv); |
0673ad47 | 924 | |
94b4f3ba | 925 | intel_device_info_dump(dev_priv); |
0673ad47 | 926 | |
4fc7e845 | 927 | intel_detect_preproduction_hw(dev_priv); |
0673ad47 | 928 | |
eec688e1 RB |
929 | i915_perf_init(dev_priv); |
930 | ||
0673ad47 CW |
931 | return 0; |
932 | ||
cefcff8f JL |
933 | err_irq: |
934 | intel_irq_fini(dev_priv); | |
0673ad47 | 935 | i915_workqueues_cleanup(dev_priv); |
bb8f0f5a CW |
936 | err_engines: |
937 | i915_engines_cleanup(dev_priv); | |
0673ad47 CW |
938 | return ret; |
939 | } | |
940 | ||
941 | /** | |
942 | * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() | |
943 | * @dev_priv: device private | |
944 | */ | |
945 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) | |
946 | { | |
eec688e1 | 947 | i915_perf_fini(dev_priv); |
cb15d9f8 | 948 | i915_gem_load_cleanup(dev_priv); |
cefcff8f | 949 | intel_irq_fini(dev_priv); |
0673ad47 | 950 | i915_workqueues_cleanup(dev_priv); |
bb8f0f5a | 951 | i915_engines_cleanup(dev_priv); |
0673ad47 CW |
952 | } |
953 | ||
da5f53bf | 954 | static int i915_mmio_setup(struct drm_i915_private *dev_priv) |
0673ad47 | 955 | { |
52a05c30 | 956 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
957 | int mmio_bar; |
958 | int mmio_size; | |
959 | ||
5db94019 | 960 | mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; |
0673ad47 CW |
961 | /* |
962 | * Before gen4, the registers and the GTT are behind different BARs. | |
963 | * However, from gen4 onwards, the registers and the GTT are shared | |
964 | * in the same BAR, so we want to restrict this ioremap from | |
965 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
966 | * the register BAR remains the same size for all the earlier | |
967 | * generations up to Ironlake. | |
968 | */ | |
514e1d64 | 969 | if (INTEL_GEN(dev_priv) < 5) |
0673ad47 CW |
970 | mmio_size = 512 * 1024; |
971 | else | |
972 | mmio_size = 2 * 1024 * 1024; | |
52a05c30 | 973 | dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
0673ad47 CW |
974 | if (dev_priv->regs == NULL) { |
975 | DRM_ERROR("failed to map registers\n"); | |
976 | ||
977 | return -EIO; | |
978 | } | |
979 | ||
980 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
da5f53bf | 981 | intel_setup_mchbar(dev_priv); |
0673ad47 CW |
982 | |
983 | return 0; | |
984 | } | |
985 | ||
da5f53bf | 986 | static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) |
0673ad47 | 987 | { |
52a05c30 | 988 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 989 | |
da5f53bf | 990 | intel_teardown_mchbar(dev_priv); |
52a05c30 | 991 | pci_iounmap(pdev, dev_priv->regs); |
0673ad47 CW |
992 | } |
993 | ||
994 | /** | |
995 | * i915_driver_init_mmio - setup device MMIO | |
996 | * @dev_priv: device private | |
997 | * | |
998 | * Setup minimal device state necessary for MMIO accesses later in the | |
999 | * initialization sequence. The setup here should avoid any other device-wide | |
1000 | * side effects or exposing the driver via kernel internal or user space | |
1001 | * interfaces. | |
1002 | */ | |
1003 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | |
1004 | { | |
0673ad47 CW |
1005 | int ret; |
1006 | ||
1007 | if (i915_inject_load_failure()) | |
1008 | return -ENODEV; | |
1009 | ||
da5f53bf | 1010 | if (i915_get_bridge_dev(dev_priv)) |
0673ad47 CW |
1011 | return -EIO; |
1012 | ||
da5f53bf | 1013 | ret = i915_mmio_setup(dev_priv); |
0673ad47 | 1014 | if (ret < 0) |
63ffbcda | 1015 | goto err_bridge; |
0673ad47 CW |
1016 | |
1017 | intel_uncore_init(dev_priv); | |
63ffbcda | 1018 | |
1fc556fa SAK |
1019 | intel_uc_init_mmio(dev_priv); |
1020 | ||
63ffbcda JL |
1021 | ret = intel_engines_init_mmio(dev_priv); |
1022 | if (ret) | |
1023 | goto err_uncore; | |
1024 | ||
24145517 | 1025 | i915_gem_init_mmio(dev_priv); |
0673ad47 CW |
1026 | |
1027 | return 0; | |
1028 | ||
63ffbcda JL |
1029 | err_uncore: |
1030 | intel_uncore_fini(dev_priv); | |
1031 | err_bridge: | |
0673ad47 CW |
1032 | pci_dev_put(dev_priv->bridge_dev); |
1033 | ||
1034 | return ret; | |
1035 | } | |
1036 | ||
1037 | /** | |
1038 | * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() | |
1039 | * @dev_priv: device private | |
1040 | */ | |
1041 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | |
1042 | { | |
0673ad47 | 1043 | intel_uncore_fini(dev_priv); |
da5f53bf | 1044 | i915_mmio_cleanup(dev_priv); |
0673ad47 CW |
1045 | pci_dev_put(dev_priv->bridge_dev); |
1046 | } | |
1047 | ||
94b4f3ba CW |
1048 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
1049 | { | |
4f044a88 | 1050 | i915_modparams.enable_execlists = |
94b4f3ba | 1051 | intel_sanitize_enable_execlists(dev_priv, |
4f044a88 | 1052 | i915_modparams.enable_execlists); |
94b4f3ba CW |
1053 | |
1054 | /* | |
1055 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | |
1056 | * user's requested state against the hardware/driver capabilities. We | |
1057 | * do this now so that we can print out any log messages once rather | |
1058 | * than every time we check intel_enable_ppgtt(). | |
1059 | */ | |
4f044a88 MW |
1060 | i915_modparams.enable_ppgtt = |
1061 | intel_sanitize_enable_ppgtt(dev_priv, | |
1062 | i915_modparams.enable_ppgtt); | |
1063 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); | |
39df9190 | 1064 | |
4f044a88 MW |
1065 | i915_modparams.semaphores = |
1066 | intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores); | |
1067 | DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", | |
1068 | yesno(i915_modparams.semaphores)); | |
d2be9f2f AH |
1069 | |
1070 | intel_uc_sanitize_options(dev_priv); | |
67b7f33e CD |
1071 | |
1072 | intel_gvt_sanitize_options(dev_priv); | |
94b4f3ba CW |
1073 | } |
1074 | ||
0673ad47 CW |
1075 | /** |
1076 | * i915_driver_init_hw - setup state requiring device access | |
1077 | * @dev_priv: device private | |
1078 | * | |
1079 | * Setup state that requires accessing the device, but doesn't require | |
1080 | * exposing the driver via kernel internal or userspace interfaces. | |
1081 | */ | |
1082 | static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |
1083 | { | |
52a05c30 | 1084 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
1085 | int ret; |
1086 | ||
1087 | if (i915_inject_load_failure()) | |
1088 | return -ENODEV; | |
1089 | ||
94b4f3ba CW |
1090 | intel_device_info_runtime_init(dev_priv); |
1091 | ||
1092 | intel_sanitize_options(dev_priv); | |
0673ad47 | 1093 | |
97d6d7ab | 1094 | ret = i915_ggtt_probe_hw(dev_priv); |
0673ad47 CW |
1095 | if (ret) |
1096 | return ret; | |
1097 | ||
0673ad47 CW |
1098 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
1099 | * otherwise the vga fbdev driver falls over. */ | |
1100 | ret = i915_kick_out_firmware_fb(dev_priv); | |
1101 | if (ret) { | |
1102 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); | |
1103 | goto out_ggtt; | |
1104 | } | |
1105 | ||
1106 | ret = i915_kick_out_vgacon(dev_priv); | |
1107 | if (ret) { | |
1108 | DRM_ERROR("failed to remove conflicting VGA console\n"); | |
1109 | goto out_ggtt; | |
1110 | } | |
1111 | ||
97d6d7ab | 1112 | ret = i915_ggtt_init_hw(dev_priv); |
0088e522 CW |
1113 | if (ret) |
1114 | return ret; | |
1115 | ||
97d6d7ab | 1116 | ret = i915_ggtt_enable_hw(dev_priv); |
0088e522 CW |
1117 | if (ret) { |
1118 | DRM_ERROR("failed to enable GGTT\n"); | |
1119 | goto out_ggtt; | |
1120 | } | |
1121 | ||
52a05c30 | 1122 | pci_set_master(pdev); |
0673ad47 CW |
1123 | |
1124 | /* overlay on gen2 is broken and can't address above 1G */ | |
5db94019 | 1125 | if (IS_GEN2(dev_priv)) { |
52a05c30 | 1126 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); |
0673ad47 CW |
1127 | if (ret) { |
1128 | DRM_ERROR("failed to set DMA mask\n"); | |
1129 | ||
1130 | goto out_ggtt; | |
1131 | } | |
1132 | } | |
1133 | ||
0673ad47 CW |
1134 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1135 | * using 32bit addressing, overwriting memory if HWS is located | |
1136 | * above 4GB. | |
1137 | * | |
1138 | * The documentation also mentions an issue with undefined | |
1139 | * behaviour if any general state is accessed within a page above 4GB, | |
1140 | * which also needs to be handled carefully. | |
1141 | */ | |
c0f86832 | 1142 | if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { |
52a05c30 | 1143 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
0673ad47 CW |
1144 | |
1145 | if (ret) { | |
1146 | DRM_ERROR("failed to set DMA mask\n"); | |
1147 | ||
1148 | goto out_ggtt; | |
1149 | } | |
1150 | } | |
1151 | ||
0673ad47 CW |
1152 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, |
1153 | PM_QOS_DEFAULT_VALUE); | |
1154 | ||
1155 | intel_uncore_sanitize(dev_priv); | |
1156 | ||
1157 | intel_opregion_setup(dev_priv); | |
1158 | ||
1159 | i915_gem_load_init_fences(dev_priv); | |
1160 | ||
1161 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1162 | * integrated graphics even though the support isn't actually there | |
1163 | * according to the published specs. It doesn't appear to function | |
1164 | * correctly in testing on 945G. | |
1165 | * This may be a side effect of MSI having been made available for PEG | |
1166 | * and the registers being closely associated. | |
1167 | * | |
1168 | * According to chipset errata, on the 965GM, MSI interrupts may | |
e38c2da0 VS |
1169 | * be lost or delayed, and was defeatured. MSI interrupts seem to |
1170 | * get lost on g4x as well, and interrupt delivery seems to stay | |
1171 | * properly dead afterwards. So we'll just disable them for all | |
1172 | * pre-gen5 chipsets. | |
0673ad47 | 1173 | */ |
e38c2da0 | 1174 | if (INTEL_GEN(dev_priv) >= 5) { |
52a05c30 | 1175 | if (pci_enable_msi(pdev) < 0) |
0673ad47 CW |
1176 | DRM_DEBUG_DRIVER("can't enable MSI"); |
1177 | } | |
1178 | ||
26f837e8 ZW |
1179 | ret = intel_gvt_init(dev_priv); |
1180 | if (ret) | |
1181 | goto out_ggtt; | |
1182 | ||
0673ad47 CW |
1183 | return 0; |
1184 | ||
1185 | out_ggtt: | |
97d6d7ab | 1186 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1187 | |
1188 | return ret; | |
1189 | } | |
1190 | ||
1191 | /** | |
1192 | * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() | |
1193 | * @dev_priv: device private | |
1194 | */ | |
1195 | static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) | |
1196 | { | |
52a05c30 | 1197 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 1198 | |
52a05c30 DW |
1199 | if (pdev->msi_enabled) |
1200 | pci_disable_msi(pdev); | |
0673ad47 CW |
1201 | |
1202 | pm_qos_remove_request(&dev_priv->pm_qos); | |
97d6d7ab | 1203 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1204 | } |
1205 | ||
1206 | /** | |
1207 | * i915_driver_register - register the driver with the rest of the system | |
1208 | * @dev_priv: device private | |
1209 | * | |
1210 | * Perform any steps necessary to make the driver available via kernel | |
1211 | * internal or userspace interfaces. | |
1212 | */ | |
1213 | static void i915_driver_register(struct drm_i915_private *dev_priv) | |
1214 | { | |
91c8a326 | 1215 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 CW |
1216 | |
1217 | i915_gem_shrinker_init(dev_priv); | |
1218 | ||
1219 | /* | |
1220 | * Notify a valid surface after modesetting, | |
1221 | * when running inside a VM. | |
1222 | */ | |
1223 | if (intel_vgpu_active(dev_priv)) | |
1224 | I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); | |
1225 | ||
1226 | /* Reveal our presence to userspace */ | |
1227 | if (drm_dev_register(dev, 0) == 0) { | |
1228 | i915_debugfs_register(dev_priv); | |
f9cda048 | 1229 | i915_guc_log_register(dev_priv); |
694c2828 | 1230 | i915_setup_sysfs(dev_priv); |
442b8c06 RB |
1231 | |
1232 | /* Depends on sysfs having been initialized */ | |
1233 | i915_perf_register(dev_priv); | |
0673ad47 CW |
1234 | } else |
1235 | DRM_ERROR("Failed to register driver for userspace access!\n"); | |
1236 | ||
1237 | if (INTEL_INFO(dev_priv)->num_pipes) { | |
1238 | /* Must be done after probing outputs */ | |
1239 | intel_opregion_register(dev_priv); | |
1240 | acpi_video_register(); | |
1241 | } | |
1242 | ||
1243 | if (IS_GEN5(dev_priv)) | |
1244 | intel_gpu_ips_init(dev_priv); | |
1245 | ||
eef57324 | 1246 | intel_audio_init(dev_priv); |
0673ad47 CW |
1247 | |
1248 | /* | |
1249 | * Some ports require correctly set-up hpd registers for detection to | |
1250 | * work properly (leading to ghost connected connector status), e.g. VGA | |
1251 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
1252 | * irqs are fully enabled. We do it last so that the async config | |
1253 | * cannot run before the connectors are registered. | |
1254 | */ | |
1255 | intel_fbdev_initial_config_async(dev); | |
1256 | } | |
1257 | ||
1258 | /** | |
1259 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() | |
1260 | * @dev_priv: device private | |
1261 | */ | |
1262 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |
1263 | { | |
4f256d82 | 1264 | intel_fbdev_unregister(dev_priv); |
eef57324 | 1265 | intel_audio_deinit(dev_priv); |
0673ad47 CW |
1266 | |
1267 | intel_gpu_ips_teardown(); | |
1268 | acpi_video_unregister(); | |
1269 | intel_opregion_unregister(dev_priv); | |
1270 | ||
442b8c06 RB |
1271 | i915_perf_unregister(dev_priv); |
1272 | ||
694c2828 | 1273 | i915_teardown_sysfs(dev_priv); |
f9cda048 | 1274 | i915_guc_log_unregister(dev_priv); |
91c8a326 | 1275 | drm_dev_unregister(&dev_priv->drm); |
0673ad47 CW |
1276 | |
1277 | i915_gem_shrinker_cleanup(dev_priv); | |
1278 | } | |
1279 | ||
1280 | /** | |
1281 | * i915_driver_load - setup chip and create an initial config | |
d2ad3ae4 JL |
1282 | * @pdev: PCI device |
1283 | * @ent: matching PCI ID entry | |
0673ad47 CW |
1284 | * |
1285 | * The driver load routine has to do several things: | |
1286 | * - drive output discovery via intel_modeset_init() | |
1287 | * - initialize the memory manager | |
1288 | * - allocate initial config memory | |
1289 | * - setup the DRM framebuffer with the allocated memory | |
1290 | */ | |
42f5551d | 1291 | int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) |
0673ad47 | 1292 | { |
8d2b47dd ML |
1293 | const struct intel_device_info *match_info = |
1294 | (struct intel_device_info *)ent->driver_data; | |
0673ad47 CW |
1295 | struct drm_i915_private *dev_priv; |
1296 | int ret; | |
7d87a7f7 | 1297 | |
ff4c3b76 | 1298 | /* Enable nuclear pageflip on ILK+ */ |
4f044a88 | 1299 | if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) |
8d2b47dd | 1300 | driver.driver_features &= ~DRIVER_ATOMIC; |
a09d0ba1 | 1301 | |
0673ad47 CW |
1302 | ret = -ENOMEM; |
1303 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
1304 | if (dev_priv) | |
1305 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); | |
1306 | if (ret) { | |
87a6752c | 1307 | DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); |
cad3688f | 1308 | goto out_free; |
0673ad47 | 1309 | } |
72bbf0af | 1310 | |
0673ad47 CW |
1311 | dev_priv->drm.pdev = pdev; |
1312 | dev_priv->drm.dev_private = dev_priv; | |
719388e1 | 1313 | |
0673ad47 CW |
1314 | ret = pci_enable_device(pdev); |
1315 | if (ret) | |
cad3688f | 1316 | goto out_fini; |
1347f5b4 | 1317 | |
0673ad47 | 1318 | pci_set_drvdata(pdev, &dev_priv->drm); |
adfdf85d ID |
1319 | /* |
1320 | * Disable the system suspend direct complete optimization, which can | |
1321 | * leave the device suspended skipping the driver's suspend handlers | |
1322 | * if the device was already runtime suspended. This is needed due to | |
1323 | * the difference in our runtime and system suspend sequence and | |
1324 | * becaue the HDA driver may require us to enable the audio power | |
1325 | * domain during system suspend. | |
1326 | */ | |
1327 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | |
ef11bdb3 | 1328 | |
0673ad47 CW |
1329 | ret = i915_driver_init_early(dev_priv, ent); |
1330 | if (ret < 0) | |
1331 | goto out_pci_disable; | |
ef11bdb3 | 1332 | |
0673ad47 | 1333 | intel_runtime_pm_get(dev_priv); |
1da177e4 | 1334 | |
0673ad47 CW |
1335 | ret = i915_driver_init_mmio(dev_priv); |
1336 | if (ret < 0) | |
1337 | goto out_runtime_pm_put; | |
79e53945 | 1338 | |
0673ad47 CW |
1339 | ret = i915_driver_init_hw(dev_priv); |
1340 | if (ret < 0) | |
1341 | goto out_cleanup_mmio; | |
30c964a6 RB |
1342 | |
1343 | /* | |
0673ad47 CW |
1344 | * TODO: move the vblank init and parts of modeset init steps into one |
1345 | * of the i915_driver_init_/i915_driver_register functions according | |
1346 | * to the role/effect of the given init step. | |
30c964a6 | 1347 | */ |
0673ad47 | 1348 | if (INTEL_INFO(dev_priv)->num_pipes) { |
91c8a326 | 1349 | ret = drm_vblank_init(&dev_priv->drm, |
0673ad47 CW |
1350 | INTEL_INFO(dev_priv)->num_pipes); |
1351 | if (ret) | |
1352 | goto out_cleanup_hw; | |
30c964a6 RB |
1353 | } |
1354 | ||
91c8a326 | 1355 | ret = i915_load_modeset_init(&dev_priv->drm); |
0673ad47 | 1356 | if (ret < 0) |
baf54385 | 1357 | goto out_cleanup_hw; |
0673ad47 CW |
1358 | |
1359 | i915_driver_register(dev_priv); | |
1360 | ||
1361 | intel_runtime_pm_enable(dev_priv); | |
1362 | ||
2503a0fe | 1363 | intel_init_ipc(dev_priv); |
a3a8986c | 1364 | |
0525a062 CW |
1365 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
1366 | DRM_INFO("DRM_I915_DEBUG enabled\n"); | |
1367 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | |
1368 | DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); | |
bc5ca47c | 1369 | |
0673ad47 CW |
1370 | intel_runtime_pm_put(dev_priv); |
1371 | ||
1372 | return 0; | |
1373 | ||
0673ad47 CW |
1374 | out_cleanup_hw: |
1375 | i915_driver_cleanup_hw(dev_priv); | |
1376 | out_cleanup_mmio: | |
1377 | i915_driver_cleanup_mmio(dev_priv); | |
1378 | out_runtime_pm_put: | |
1379 | intel_runtime_pm_put(dev_priv); | |
1380 | i915_driver_cleanup_early(dev_priv); | |
1381 | out_pci_disable: | |
1382 | pci_disable_device(pdev); | |
cad3688f | 1383 | out_fini: |
0673ad47 | 1384 | i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); |
cad3688f CW |
1385 | drm_dev_fini(&dev_priv->drm); |
1386 | out_free: | |
1387 | kfree(dev_priv); | |
30c964a6 RB |
1388 | return ret; |
1389 | } | |
1390 | ||
42f5551d | 1391 | void i915_driver_unload(struct drm_device *dev) |
3bad0781 | 1392 | { |
fac5e23e | 1393 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1394 | struct pci_dev *pdev = dev_priv->drm.pdev; |
3bad0781 | 1395 | |
99c539be DV |
1396 | i915_driver_unregister(dev_priv); |
1397 | ||
bf9e8429 | 1398 | if (i915_gem_suspend(dev_priv)) |
42f5551d | 1399 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
ce1bb329 | 1400 | |
0673ad47 CW |
1401 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
1402 | ||
18dddadc | 1403 | drm_atomic_helper_shutdown(dev); |
a667fb40 | 1404 | |
26f837e8 ZW |
1405 | intel_gvt_cleanup(dev_priv); |
1406 | ||
0673ad47 CW |
1407 | intel_modeset_cleanup(dev); |
1408 | ||
3bad0781 | 1409 | /* |
0673ad47 CW |
1410 | * free the memory space allocated for the child device |
1411 | * config parsed from VBT | |
3bad0781 | 1412 | */ |
0673ad47 CW |
1413 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1414 | kfree(dev_priv->vbt.child_dev); | |
1415 | dev_priv->vbt.child_dev = NULL; | |
1416 | dev_priv->vbt.child_dev_num = 0; | |
1417 | } | |
1418 | kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); | |
1419 | dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; | |
1420 | kfree(dev_priv->vbt.lfp_lvds_vbt_mode); | |
1421 | dev_priv->vbt.lfp_lvds_vbt_mode = NULL; | |
3bad0781 | 1422 | |
52a05c30 DW |
1423 | vga_switcheroo_unregister_client(pdev); |
1424 | vga_client_register(pdev, NULL, NULL, NULL); | |
bcdb72ac | 1425 | |
0673ad47 | 1426 | intel_csr_ucode_fini(dev_priv); |
bcdb72ac | 1427 | |
0673ad47 CW |
1428 | /* Free error state after interrupts are fully disabled. */ |
1429 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | |
5a4c6f1b | 1430 | i915_reset_error_state(dev_priv); |
0673ad47 | 1431 | |
fbbd37b3 | 1432 | i915_gem_fini(dev_priv); |
3950bf3d | 1433 | intel_uc_fini_fw(dev_priv); |
0673ad47 CW |
1434 | intel_fbc_cleanup_cfb(dev_priv); |
1435 | ||
1436 | intel_power_domains_fini(dev_priv); | |
1437 | ||
1438 | i915_driver_cleanup_hw(dev_priv); | |
1439 | i915_driver_cleanup_mmio(dev_priv); | |
1440 | ||
1441 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | |
cad3688f CW |
1442 | } |
1443 | ||
1444 | static void i915_driver_release(struct drm_device *dev) | |
1445 | { | |
1446 | struct drm_i915_private *dev_priv = to_i915(dev); | |
0673ad47 CW |
1447 | |
1448 | i915_driver_cleanup_early(dev_priv); | |
cad3688f CW |
1449 | drm_dev_fini(&dev_priv->drm); |
1450 | ||
1451 | kfree(dev_priv); | |
3bad0781 ZW |
1452 | } |
1453 | ||
0673ad47 | 1454 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2911a35b | 1455 | { |
829a0af2 | 1456 | struct drm_i915_private *i915 = to_i915(dev); |
0673ad47 | 1457 | int ret; |
2911a35b | 1458 | |
829a0af2 | 1459 | ret = i915_gem_open(i915, file); |
0673ad47 CW |
1460 | if (ret) |
1461 | return ret; | |
2911a35b | 1462 | |
0673ad47 CW |
1463 | return 0; |
1464 | } | |
71386ef9 | 1465 | |
0673ad47 CW |
1466 | /** |
1467 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
1468 | * @dev: DRM device | |
1469 | * | |
1470 | * Take care of cleaning up after all DRM clients have exited. In the | |
1471 | * mode setting case, we want to restore the kernel's initial mode (just | |
1472 | * in case the last client left us in a bad state). | |
1473 | * | |
1474 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
1475 | * and DMA structures, since the kernel won't be using them, and clea | |
1476 | * up any GEM state. | |
1477 | */ | |
1478 | static void i915_driver_lastclose(struct drm_device *dev) | |
1479 | { | |
1480 | intel_fbdev_restore_mode(dev); | |
1481 | vga_switcheroo_process_delayed_switch(); | |
1482 | } | |
2911a35b | 1483 | |
7d2ec881 | 1484 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
0673ad47 | 1485 | { |
7d2ec881 DV |
1486 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1487 | ||
0673ad47 | 1488 | mutex_lock(&dev->struct_mutex); |
829a0af2 | 1489 | i915_gem_context_close(file); |
0673ad47 CW |
1490 | i915_gem_release(dev, file); |
1491 | mutex_unlock(&dev->struct_mutex); | |
0673ad47 CW |
1492 | |
1493 | kfree(file_priv); | |
2911a35b BW |
1494 | } |
1495 | ||
07f9cd0b ID |
1496 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
1497 | { | |
91c8a326 | 1498 | struct drm_device *dev = &dev_priv->drm; |
19c8054c | 1499 | struct intel_encoder *encoder; |
07f9cd0b ID |
1500 | |
1501 | drm_modeset_lock_all(dev); | |
19c8054c JN |
1502 | for_each_intel_encoder(dev, encoder) |
1503 | if (encoder->suspend) | |
1504 | encoder->suspend(encoder); | |
07f9cd0b ID |
1505 | drm_modeset_unlock_all(dev); |
1506 | } | |
1507 | ||
1a5df187 PZ |
1508 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
1509 | bool rpm_resume); | |
507e126e | 1510 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
f75a1985 | 1511 | |
bc87229f ID |
1512 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
1513 | { | |
1514 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
1515 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
1516 | return true; | |
1517 | #endif | |
1518 | return false; | |
1519 | } | |
ebc32824 | 1520 | |
5e365c39 | 1521 | static int i915_drm_suspend(struct drm_device *dev) |
ba8bbcf6 | 1522 | { |
fac5e23e | 1523 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1524 | struct pci_dev *pdev = dev_priv->drm.pdev; |
e5747e3a | 1525 | pci_power_t opregion_target_state; |
d5818938 | 1526 | int error; |
61caf87c | 1527 | |
b8efb17b ZR |
1528 | /* ignore lid events during suspend */ |
1529 | mutex_lock(&dev_priv->modeset_restore_lock); | |
1530 | dev_priv->modeset_restore = MODESET_SUSPENDED; | |
1531 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
1532 | ||
1f814dac ID |
1533 | disable_rpm_wakeref_asserts(dev_priv); |
1534 | ||
c67a470b PZ |
1535 | /* We do a lot of poking in a lot of registers, make sure they work |
1536 | * properly. */ | |
da7e29bd | 1537 | intel_display_set_init_power(dev_priv, true); |
cb10799c | 1538 | |
5bcf719b DA |
1539 | drm_kms_helper_poll_disable(dev); |
1540 | ||
52a05c30 | 1541 | pci_save_state(pdev); |
ba8bbcf6 | 1542 | |
bf9e8429 | 1543 | error = i915_gem_suspend(dev_priv); |
d5818938 | 1544 | if (error) { |
52a05c30 | 1545 | dev_err(&pdev->dev, |
d5818938 | 1546 | "GEM idle failed, resume might fail\n"); |
1f814dac | 1547 | goto out; |
d5818938 | 1548 | } |
db1b76ca | 1549 | |
6b72d486 | 1550 | intel_display_suspend(dev); |
2eb5252e | 1551 | |
d5818938 | 1552 | intel_dp_mst_suspend(dev); |
7d708ee4 | 1553 | |
d5818938 DV |
1554 | intel_runtime_pm_disable_interrupts(dev_priv); |
1555 | intel_hpd_cancel_work(dev_priv); | |
09b64267 | 1556 | |
d5818938 | 1557 | intel_suspend_encoders(dev_priv); |
0e32b39c | 1558 | |
712bf364 | 1559 | intel_suspend_hw(dev_priv); |
5669fcac | 1560 | |
275a991c | 1561 | i915_gem_suspend_gtt_mappings(dev_priv); |
828c7908 | 1562 | |
af6dc742 | 1563 | i915_save_state(dev_priv); |
9e06dd39 | 1564 | |
bc87229f | 1565 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
6f9f4b7a | 1566 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
e5747e3a | 1567 | |
68f60946 | 1568 | intel_uncore_suspend(dev_priv); |
03d92e47 | 1569 | intel_opregion_unregister(dev_priv); |
8ee1c3db | 1570 | |
82e3b8c1 | 1571 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
3fa016a0 | 1572 | |
62d5d69b MK |
1573 | dev_priv->suspend_count++; |
1574 | ||
f74ed08d | 1575 | intel_csr_ucode_suspend(dev_priv); |
f514c2d8 | 1576 | |
1f814dac ID |
1577 | out: |
1578 | enable_rpm_wakeref_asserts(dev_priv); | |
1579 | ||
1580 | return error; | |
84b79f8d RW |
1581 | } |
1582 | ||
c49d13ee | 1583 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
c3c09c95 | 1584 | { |
c49d13ee | 1585 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1586 | struct pci_dev *pdev = dev_priv->drm.pdev; |
bc87229f | 1587 | bool fw_csr; |
c3c09c95 ID |
1588 | int ret; |
1589 | ||
1f814dac ID |
1590 | disable_rpm_wakeref_asserts(dev_priv); |
1591 | ||
4c494a57 ID |
1592 | intel_display_set_init_power(dev_priv, false); |
1593 | ||
dd9f31c7 | 1594 | fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation && |
a7c8125f | 1595 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
bc87229f ID |
1596 | /* |
1597 | * In case of firmware assisted context save/restore don't manually | |
1598 | * deinit the power domains. This also means the CSR/DMC firmware will | |
1599 | * stay active, it will power down any HW resources as required and | |
1600 | * also enable deeper system power states that would be blocked if the | |
1601 | * firmware was inactive. | |
1602 | */ | |
1603 | if (!fw_csr) | |
1604 | intel_power_domains_suspend(dev_priv); | |
73dfc227 | 1605 | |
507e126e | 1606 | ret = 0; |
b9fd799e | 1607 | if (IS_GEN9_LP(dev_priv)) |
507e126e | 1608 | bxt_enable_dc9(dev_priv); |
b8aea3d1 | 1609 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
507e126e ID |
1610 | hsw_enable_pc8(dev_priv); |
1611 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
1612 | ret = vlv_suspend_complete(dev_priv); | |
c3c09c95 ID |
1613 | |
1614 | if (ret) { | |
1615 | DRM_ERROR("Suspend complete failed: %d\n", ret); | |
bc87229f ID |
1616 | if (!fw_csr) |
1617 | intel_power_domains_init_hw(dev_priv, true); | |
c3c09c95 | 1618 | |
1f814dac | 1619 | goto out; |
c3c09c95 ID |
1620 | } |
1621 | ||
52a05c30 | 1622 | pci_disable_device(pdev); |
ab3be73f | 1623 | /* |
54875571 | 1624 | * During hibernation on some platforms the BIOS may try to access |
ab3be73f ID |
1625 | * the device even though it's already in D3 and hang the machine. So |
1626 | * leave the device in D0 on those platforms and hope the BIOS will | |
54875571 ID |
1627 | * power down the device properly. The issue was seen on multiple old |
1628 | * GENs with different BIOS vendors, so having an explicit blacklist | |
1629 | * is inpractical; apply the workaround on everything pre GEN6. The | |
1630 | * platforms where the issue was seen: | |
1631 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
1632 | * Fujitsu FSC S7110 | |
1633 | * Acer Aspire 1830T | |
ab3be73f | 1634 | */ |
514e1d64 | 1635 | if (!(hibernation && INTEL_GEN(dev_priv) < 6)) |
52a05c30 | 1636 | pci_set_power_state(pdev, PCI_D3hot); |
c3c09c95 | 1637 | |
bc87229f ID |
1638 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
1639 | ||
1f814dac ID |
1640 | out: |
1641 | enable_rpm_wakeref_asserts(dev_priv); | |
1642 | ||
1643 | return ret; | |
c3c09c95 ID |
1644 | } |
1645 | ||
a9a251c2 | 1646 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
84b79f8d RW |
1647 | { |
1648 | int error; | |
1649 | ||
ded8b07d | 1650 | if (!dev) { |
84b79f8d RW |
1651 | DRM_ERROR("dev: %p\n", dev); |
1652 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | |
1653 | return -ENODEV; | |
1654 | } | |
1655 | ||
0b14cbd2 ID |
1656 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
1657 | state.event != PM_EVENT_FREEZE)) | |
1658 | return -EINVAL; | |
5bcf719b DA |
1659 | |
1660 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1661 | return 0; | |
6eecba33 | 1662 | |
5e365c39 | 1663 | error = i915_drm_suspend(dev); |
84b79f8d RW |
1664 | if (error) |
1665 | return error; | |
1666 | ||
ab3be73f | 1667 | return i915_drm_suspend_late(dev, false); |
ba8bbcf6 JB |
1668 | } |
1669 | ||
5e365c39 | 1670 | static int i915_drm_resume(struct drm_device *dev) |
76c4b250 | 1671 | { |
fac5e23e | 1672 | struct drm_i915_private *dev_priv = to_i915(dev); |
ac840ae5 | 1673 | int ret; |
9d49c0ef | 1674 | |
1f814dac | 1675 | disable_rpm_wakeref_asserts(dev_priv); |
abc80abd | 1676 | intel_sanitize_gt_powersave(dev_priv); |
1f814dac | 1677 | |
97d6d7ab | 1678 | ret = i915_ggtt_enable_hw(dev_priv); |
ac840ae5 VS |
1679 | if (ret) |
1680 | DRM_ERROR("failed to re-enable GGTT\n"); | |
1681 | ||
f74ed08d ID |
1682 | intel_csr_ucode_resume(dev_priv); |
1683 | ||
bf9e8429 | 1684 | i915_gem_resume(dev_priv); |
9d49c0ef | 1685 | |
af6dc742 | 1686 | i915_restore_state(dev_priv); |
8090ba8c | 1687 | intel_pps_unlock_regs_wa(dev_priv); |
6f9f4b7a | 1688 | intel_opregion_setup(dev_priv); |
61caf87c | 1689 | |
c39055b0 | 1690 | intel_init_pch_refclk(dev_priv); |
1833b134 | 1691 | |
364aece0 PA |
1692 | /* |
1693 | * Interrupts have to be enabled before any batches are run. If not the | |
1694 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
1695 | * update/restore the context. | |
1696 | * | |
908764f6 ID |
1697 | * drm_mode_config_reset() needs AUX interrupts. |
1698 | * | |
364aece0 PA |
1699 | * Modeset enabling in intel_modeset_init_hw() also needs working |
1700 | * interrupts. | |
1701 | */ | |
1702 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1703 | ||
908764f6 ID |
1704 | drm_mode_config_reset(dev); |
1705 | ||
d5818938 | 1706 | mutex_lock(&dev->struct_mutex); |
bf9e8429 | 1707 | if (i915_gem_init_hw(dev_priv)) { |
d5818938 | 1708 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
821ed7df | 1709 | i915_gem_set_wedged(dev_priv); |
d5818938 DV |
1710 | } |
1711 | mutex_unlock(&dev->struct_mutex); | |
226485e9 | 1712 | |
bf9e8429 | 1713 | intel_guc_resume(dev_priv); |
a1c41994 | 1714 | |
d5818938 | 1715 | intel_modeset_init_hw(dev); |
24576d23 | 1716 | |
d5818938 DV |
1717 | spin_lock_irq(&dev_priv->irq_lock); |
1718 | if (dev_priv->display.hpd_irq_setup) | |
91d14251 | 1719 | dev_priv->display.hpd_irq_setup(dev_priv); |
d5818938 | 1720 | spin_unlock_irq(&dev_priv->irq_lock); |
0e32b39c | 1721 | |
d5818938 | 1722 | intel_dp_mst_resume(dev); |
e7d6f7d7 | 1723 | |
a16b7658 L |
1724 | intel_display_resume(dev); |
1725 | ||
e0b70061 L |
1726 | drm_kms_helper_poll_enable(dev); |
1727 | ||
d5818938 DV |
1728 | /* |
1729 | * ... but also need to make sure that hotplug processing | |
1730 | * doesn't cause havoc. Like in the driver load code we don't | |
1731 | * bother with the tiny race here where we might loose hotplug | |
1732 | * notifications. | |
1733 | * */ | |
1734 | intel_hpd_init(dev_priv); | |
1daed3fb | 1735 | |
03d92e47 | 1736 | intel_opregion_register(dev_priv); |
44834a67 | 1737 | |
82e3b8c1 | 1738 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
073f34d9 | 1739 | |
b8efb17b ZR |
1740 | mutex_lock(&dev_priv->modeset_restore_lock); |
1741 | dev_priv->modeset_restore = MODESET_DONE; | |
1742 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
8a187455 | 1743 | |
6f9f4b7a | 1744 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
e5747e3a | 1745 | |
54b4f68f | 1746 | intel_autoenable_gt_powersave(dev_priv); |
ee6f280e | 1747 | |
1f814dac ID |
1748 | enable_rpm_wakeref_asserts(dev_priv); |
1749 | ||
074c6ada | 1750 | return 0; |
84b79f8d RW |
1751 | } |
1752 | ||
5e365c39 | 1753 | static int i915_drm_resume_early(struct drm_device *dev) |
84b79f8d | 1754 | { |
fac5e23e | 1755 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1756 | struct pci_dev *pdev = dev_priv->drm.pdev; |
44410cd0 | 1757 | int ret; |
36d61e67 | 1758 | |
76c4b250 ID |
1759 | /* |
1760 | * We have a resume ordering issue with the snd-hda driver also | |
1761 | * requiring our device to be power up. Due to the lack of a | |
1762 | * parent/child relationship we currently solve this with an early | |
1763 | * resume hook. | |
1764 | * | |
1765 | * FIXME: This should be solved with a special hdmi sink device or | |
1766 | * similar so that power domains can be employed. | |
1767 | */ | |
44410cd0 ID |
1768 | |
1769 | /* | |
1770 | * Note that we need to set the power state explicitly, since we | |
1771 | * powered off the device during freeze and the PCI core won't power | |
1772 | * it back up for us during thaw. Powering off the device during | |
1773 | * freeze is not a hard requirement though, and during the | |
1774 | * suspend/resume phases the PCI core makes sure we get here with the | |
1775 | * device powered on. So in case we change our freeze logic and keep | |
1776 | * the device powered we can also remove the following set power state | |
1777 | * call. | |
1778 | */ | |
52a05c30 | 1779 | ret = pci_set_power_state(pdev, PCI_D0); |
44410cd0 ID |
1780 | if (ret) { |
1781 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | |
1782 | goto out; | |
1783 | } | |
1784 | ||
1785 | /* | |
1786 | * Note that pci_enable_device() first enables any parent bridge | |
1787 | * device and only then sets the power state for this device. The | |
1788 | * bridge enabling is a nop though, since bridge devices are resumed | |
1789 | * first. The order of enabling power and enabling the device is | |
1790 | * imposed by the PCI core as described above, so here we preserve the | |
1791 | * same order for the freeze/thaw phases. | |
1792 | * | |
1793 | * TODO: eventually we should remove pci_disable_device() / | |
1794 | * pci_enable_enable_device() from suspend/resume. Due to how they | |
1795 | * depend on the device enable refcount we can't anyway depend on them | |
1796 | * disabling/enabling the device. | |
1797 | */ | |
52a05c30 | 1798 | if (pci_enable_device(pdev)) { |
bc87229f ID |
1799 | ret = -EIO; |
1800 | goto out; | |
1801 | } | |
84b79f8d | 1802 | |
52a05c30 | 1803 | pci_set_master(pdev); |
84b79f8d | 1804 | |
1f814dac ID |
1805 | disable_rpm_wakeref_asserts(dev_priv); |
1806 | ||
666a4537 | 1807 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1a5df187 | 1808 | ret = vlv_resume_prepare(dev_priv, false); |
36d61e67 | 1809 | if (ret) |
ff0b187f DL |
1810 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
1811 | ret); | |
36d61e67 | 1812 | |
68f60946 | 1813 | intel_uncore_resume_early(dev_priv); |
efee833a | 1814 | |
b9fd799e | 1815 | if (IS_GEN9_LP(dev_priv)) { |
da2f41d1 ID |
1816 | if (!dev_priv->suspended_to_idle) |
1817 | gen9_sanitize_dc_state(dev_priv); | |
507e126e | 1818 | bxt_disable_dc9(dev_priv); |
da2f41d1 | 1819 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
a9a6b73a | 1820 | hsw_disable_pc8(dev_priv); |
da2f41d1 | 1821 | } |
efee833a | 1822 | |
dc97997a | 1823 | intel_uncore_sanitize(dev_priv); |
bc87229f | 1824 | |
b9fd799e | 1825 | if (IS_GEN9_LP(dev_priv) || |
a7c8125f | 1826 | !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) |
bc87229f ID |
1827 | intel_power_domains_init_hw(dev_priv, true); |
1828 | ||
24145517 CW |
1829 | i915_gem_sanitize(dev_priv); |
1830 | ||
6e35e8ab ID |
1831 | enable_rpm_wakeref_asserts(dev_priv); |
1832 | ||
bc87229f ID |
1833 | out: |
1834 | dev_priv->suspended_to_idle = false; | |
36d61e67 ID |
1835 | |
1836 | return ret; | |
76c4b250 ID |
1837 | } |
1838 | ||
7f26cb88 | 1839 | static int i915_resume_switcheroo(struct drm_device *dev) |
76c4b250 | 1840 | { |
50a0072f | 1841 | int ret; |
76c4b250 | 1842 | |
097dd837 ID |
1843 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1844 | return 0; | |
1845 | ||
5e365c39 | 1846 | ret = i915_drm_resume_early(dev); |
50a0072f ID |
1847 | if (ret) |
1848 | return ret; | |
1849 | ||
5a17514e ID |
1850 | return i915_drm_resume(dev); |
1851 | } | |
1852 | ||
11ed50ec | 1853 | /** |
f3953dcb | 1854 | * i915_reset - reset chip after a hang |
535275d3 CW |
1855 | * @i915: #drm_i915_private to reset |
1856 | * @flags: Instructions | |
11ed50ec | 1857 | * |
780f262a CW |
1858 | * Reset the chip. Useful if a hang is detected. Marks the device as wedged |
1859 | * on failure. | |
11ed50ec | 1860 | * |
221fe799 CW |
1861 | * Caller must hold the struct_mutex. |
1862 | * | |
11ed50ec BG |
1863 | * Procedure is fairly simple: |
1864 | * - reset the chip using the reset reg | |
1865 | * - re-init context state | |
1866 | * - re-init hardware status page | |
1867 | * - re-init ring buffer | |
1868 | * - re-init interrupt state | |
1869 | * - re-init display | |
1870 | */ | |
535275d3 | 1871 | void i915_reset(struct drm_i915_private *i915, unsigned int flags) |
11ed50ec | 1872 | { |
535275d3 | 1873 | struct i915_gpu_error *error = &i915->gpu_error; |
0573ed4a | 1874 | int ret; |
11ed50ec | 1875 | |
535275d3 | 1876 | lockdep_assert_held(&i915->drm.struct_mutex); |
8c185eca | 1877 | GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); |
221fe799 | 1878 | |
8c185eca | 1879 | if (!test_bit(I915_RESET_HANDOFF, &error->flags)) |
780f262a | 1880 | return; |
11ed50ec | 1881 | |
d98c52cf | 1882 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
535275d3 | 1883 | if (!i915_gem_unset_wedged(i915)) |
2e8f9d32 CW |
1884 | goto wakeup; |
1885 | ||
535275d3 CW |
1886 | if (!(flags & I915_RESET_QUIET)) |
1887 | dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n"); | |
8af29b0c | 1888 | error->reset_count++; |
d98c52cf | 1889 | |
535275d3 CW |
1890 | disable_irq(i915->drm.irq); |
1891 | ret = i915_gem_reset_prepare(i915); | |
0e178aef CW |
1892 | if (ret) { |
1893 | DRM_ERROR("GPU recovery failed\n"); | |
535275d3 | 1894 | intel_gpu_reset(i915, ALL_ENGINES); |
0e178aef CW |
1895 | goto error; |
1896 | } | |
9e60ab03 | 1897 | |
535275d3 | 1898 | ret = intel_gpu_reset(i915, ALL_ENGINES); |
0573ed4a | 1899 | if (ret) { |
804e59a8 CW |
1900 | if (ret != -ENODEV) |
1901 | DRM_ERROR("Failed to reset chip: %i\n", ret); | |
1902 | else | |
1903 | DRM_DEBUG_DRIVER("GPU reset disabled\n"); | |
d98c52cf | 1904 | goto error; |
11ed50ec BG |
1905 | } |
1906 | ||
535275d3 CW |
1907 | i915_gem_reset(i915); |
1908 | intel_overlay_reset(i915); | |
1362b776 | 1909 | |
11ed50ec BG |
1910 | /* Ok, now get things going again... */ |
1911 | ||
1912 | /* | |
1913 | * Everything depends on having the GTT running, so we need to start | |
0db8c961 CW |
1914 | * there. |
1915 | */ | |
1916 | ret = i915_ggtt_enable_hw(i915); | |
1917 | if (ret) { | |
1918 | DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret); | |
1919 | goto error; | |
1920 | } | |
1921 | ||
1922 | /* | |
11ed50ec BG |
1923 | * Next we need to restore the context, but we don't use those |
1924 | * yet either... | |
1925 | * | |
1926 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
1927 | * was running at the time of the reset (i.e. we weren't VT | |
1928 | * switched away). | |
1929 | */ | |
535275d3 | 1930 | ret = i915_gem_init_hw(i915); |
33d30a9c DV |
1931 | if (ret) { |
1932 | DRM_ERROR("Failed hw init on reset %d\n", ret); | |
d98c52cf | 1933 | goto error; |
11ed50ec BG |
1934 | } |
1935 | ||
535275d3 | 1936 | i915_queue_hangcheck(i915); |
c2a126a4 | 1937 | |
2e8f9d32 | 1938 | finish: |
535275d3 CW |
1939 | i915_gem_reset_finish(i915); |
1940 | enable_irq(i915->drm.irq); | |
8c185eca | 1941 | |
2e8f9d32 | 1942 | wakeup: |
8c185eca CW |
1943 | clear_bit(I915_RESET_HANDOFF, &error->flags); |
1944 | wake_up_bit(&error->flags, I915_RESET_HANDOFF); | |
780f262a | 1945 | return; |
d98c52cf CW |
1946 | |
1947 | error: | |
535275d3 CW |
1948 | i915_gem_set_wedged(i915); |
1949 | i915_gem_retire_requests(i915); | |
2e8f9d32 | 1950 | goto finish; |
11ed50ec BG |
1951 | } |
1952 | ||
142bc7d9 MT |
1953 | /** |
1954 | * i915_reset_engine - reset GPU engine to recover from a hang | |
1955 | * @engine: engine to reset | |
535275d3 | 1956 | * @flags: options |
142bc7d9 MT |
1957 | * |
1958 | * Reset a specific GPU engine. Useful if a hang is detected. | |
1959 | * Returns zero on successful reset or otherwise an error code. | |
a1ef70e1 MT |
1960 | * |
1961 | * Procedure is: | |
1962 | * - identifies the request that caused the hang and it is dropped | |
1963 | * - reset engine (which will force the engine to idle) | |
1964 | * - re-init/configure engine | |
142bc7d9 | 1965 | */ |
535275d3 | 1966 | int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) |
142bc7d9 | 1967 | { |
a1ef70e1 MT |
1968 | struct i915_gpu_error *error = &engine->i915->gpu_error; |
1969 | struct drm_i915_gem_request *active_request; | |
1970 | int ret; | |
1971 | ||
1972 | GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); | |
1973 | ||
535275d3 CW |
1974 | if (!(flags & I915_RESET_QUIET)) { |
1975 | dev_notice(engine->i915->drm.dev, | |
1976 | "Resetting %s after gpu hang\n", engine->name); | |
1977 | } | |
7367612f | 1978 | error->reset_engine_count[engine->id]++; |
a1ef70e1 MT |
1979 | |
1980 | active_request = i915_gem_reset_prepare_engine(engine); | |
1981 | if (IS_ERR(active_request)) { | |
1982 | DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n"); | |
1983 | ret = PTR_ERR(active_request); | |
1984 | goto out; | |
1985 | } | |
1986 | ||
b4f3e163 | 1987 | ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine)); |
0364cd19 CW |
1988 | if (ret) { |
1989 | /* If we fail here, we expect to fallback to a global reset */ | |
1990 | DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n", | |
1991 | engine->name, ret); | |
1992 | goto out; | |
1993 | } | |
b4f3e163 | 1994 | |
a1ef70e1 MT |
1995 | /* |
1996 | * The request that caused the hang is stuck on elsp, we know the | |
1997 | * active request and can drop it, adjust head to skip the offending | |
1998 | * request to resume executing remaining requests in the queue. | |
1999 | */ | |
2000 | i915_gem_reset_engine(engine, active_request); | |
2001 | ||
a1ef70e1 MT |
2002 | /* |
2003 | * The engine and its registers (and workarounds in case of render) | |
2004 | * have been reset to their default values. Follow the init_ring | |
2005 | * process to program RING_MODE, HWSP and re-enable submission. | |
2006 | */ | |
2007 | ret = engine->init_hw(engine); | |
702c8f8e MT |
2008 | if (ret) |
2009 | goto out; | |
a1ef70e1 MT |
2010 | |
2011 | out: | |
0364cd19 | 2012 | i915_gem_reset_finish_engine(engine); |
a1ef70e1 | 2013 | return ret; |
142bc7d9 MT |
2014 | } |
2015 | ||
c49d13ee | 2016 | static int i915_pm_suspend(struct device *kdev) |
112b715e | 2017 | { |
c49d13ee DW |
2018 | struct pci_dev *pdev = to_pci_dev(kdev); |
2019 | struct drm_device *dev = pci_get_drvdata(pdev); | |
112b715e | 2020 | |
c49d13ee DW |
2021 | if (!dev) { |
2022 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); | |
84b79f8d RW |
2023 | return -ENODEV; |
2024 | } | |
112b715e | 2025 | |
c49d13ee | 2026 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
5bcf719b DA |
2027 | return 0; |
2028 | ||
c49d13ee | 2029 | return i915_drm_suspend(dev); |
76c4b250 ID |
2030 | } |
2031 | ||
c49d13ee | 2032 | static int i915_pm_suspend_late(struct device *kdev) |
76c4b250 | 2033 | { |
c49d13ee | 2034 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 ID |
2035 | |
2036 | /* | |
c965d995 | 2037 | * We have a suspend ordering issue with the snd-hda driver also |
76c4b250 ID |
2038 | * requiring our device to be power up. Due to the lack of a |
2039 | * parent/child relationship we currently solve this with an late | |
2040 | * suspend hook. | |
2041 | * | |
2042 | * FIXME: This should be solved with a special hdmi sink device or | |
2043 | * similar so that power domains can be employed. | |
2044 | */ | |
c49d13ee | 2045 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
76c4b250 | 2046 | return 0; |
112b715e | 2047 | |
c49d13ee | 2048 | return i915_drm_suspend_late(dev, false); |
ab3be73f ID |
2049 | } |
2050 | ||
c49d13ee | 2051 | static int i915_pm_poweroff_late(struct device *kdev) |
ab3be73f | 2052 | { |
c49d13ee | 2053 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
ab3be73f | 2054 | |
c49d13ee | 2055 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
ab3be73f ID |
2056 | return 0; |
2057 | ||
c49d13ee | 2058 | return i915_drm_suspend_late(dev, true); |
cbda12d7 ZW |
2059 | } |
2060 | ||
c49d13ee | 2061 | static int i915_pm_resume_early(struct device *kdev) |
76c4b250 | 2062 | { |
c49d13ee | 2063 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 | 2064 | |
c49d13ee | 2065 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
2066 | return 0; |
2067 | ||
c49d13ee | 2068 | return i915_drm_resume_early(dev); |
76c4b250 ID |
2069 | } |
2070 | ||
c49d13ee | 2071 | static int i915_pm_resume(struct device *kdev) |
cbda12d7 | 2072 | { |
c49d13ee | 2073 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
84b79f8d | 2074 | |
c49d13ee | 2075 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
2076 | return 0; |
2077 | ||
c49d13ee | 2078 | return i915_drm_resume(dev); |
cbda12d7 ZW |
2079 | } |
2080 | ||
1f19ac2a | 2081 | /* freeze: before creating the hibernation_image */ |
c49d13ee | 2082 | static int i915_pm_freeze(struct device *kdev) |
1f19ac2a | 2083 | { |
dd9f31c7 | 2084 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
6a800eab CW |
2085 | int ret; |
2086 | ||
dd9f31c7 ID |
2087 | if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { |
2088 | ret = i915_drm_suspend(dev); | |
2089 | if (ret) | |
2090 | return ret; | |
2091 | } | |
6a800eab CW |
2092 | |
2093 | ret = i915_gem_freeze(kdev_to_i915(kdev)); | |
2094 | if (ret) | |
2095 | return ret; | |
2096 | ||
2097 | return 0; | |
1f19ac2a CW |
2098 | } |
2099 | ||
c49d13ee | 2100 | static int i915_pm_freeze_late(struct device *kdev) |
1f19ac2a | 2101 | { |
dd9f31c7 | 2102 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
461fb99c CW |
2103 | int ret; |
2104 | ||
dd9f31c7 ID |
2105 | if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { |
2106 | ret = i915_drm_suspend_late(dev, true); | |
2107 | if (ret) | |
2108 | return ret; | |
2109 | } | |
461fb99c | 2110 | |
c49d13ee | 2111 | ret = i915_gem_freeze_late(kdev_to_i915(kdev)); |
461fb99c CW |
2112 | if (ret) |
2113 | return ret; | |
2114 | ||
2115 | return 0; | |
1f19ac2a CW |
2116 | } |
2117 | ||
2118 | /* thaw: called after creating the hibernation image, but before turning off. */ | |
c49d13ee | 2119 | static int i915_pm_thaw_early(struct device *kdev) |
1f19ac2a | 2120 | { |
c49d13ee | 2121 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
2122 | } |
2123 | ||
c49d13ee | 2124 | static int i915_pm_thaw(struct device *kdev) |
1f19ac2a | 2125 | { |
c49d13ee | 2126 | return i915_pm_resume(kdev); |
1f19ac2a CW |
2127 | } |
2128 | ||
2129 | /* restore: called after loading the hibernation image. */ | |
c49d13ee | 2130 | static int i915_pm_restore_early(struct device *kdev) |
1f19ac2a | 2131 | { |
c49d13ee | 2132 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
2133 | } |
2134 | ||
c49d13ee | 2135 | static int i915_pm_restore(struct device *kdev) |
1f19ac2a | 2136 | { |
c49d13ee | 2137 | return i915_pm_resume(kdev); |
1f19ac2a CW |
2138 | } |
2139 | ||
ddeea5b0 ID |
2140 | /* |
2141 | * Save all Gunit registers that may be lost after a D3 and a subsequent | |
2142 | * S0i[R123] transition. The list of registers needing a save/restore is | |
2143 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit | |
2144 | * registers in the following way: | |
2145 | * - Driver: saved/restored by the driver | |
2146 | * - Punit : saved/restored by the Punit firmware | |
2147 | * - No, w/o marking: no need to save/restore, since the register is R/O or | |
2148 | * used internally by the HW in a way that doesn't depend | |
2149 | * keeping the content across a suspend/resume. | |
2150 | * - Debug : used for debugging | |
2151 | * | |
2152 | * We save/restore all registers marked with 'Driver', with the following | |
2153 | * exceptions: | |
2154 | * - Registers out of use, including also registers marked with 'Debug'. | |
2155 | * These have no effect on the driver's operation, so we don't save/restore | |
2156 | * them to reduce the overhead. | |
2157 | * - Registers that are fully setup by an initialization function called from | |
2158 | * the resume path. For example many clock gating and RPS/RC6 registers. | |
2159 | * - Registers that provide the right functionality with their reset defaults. | |
2160 | * | |
2161 | * TODO: Except for registers that based on the above 3 criteria can be safely | |
2162 | * ignored, we save/restore all others, practically treating the HW context as | |
2163 | * a black-box for the driver. Further investigation is needed to reduce the | |
2164 | * saved/restored registers even further, by following the same 3 criteria. | |
2165 | */ | |
2166 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2167 | { | |
2168 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2169 | int i; | |
2170 | ||
2171 | /* GAM 0x4000-0x4770 */ | |
2172 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); | |
2173 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); | |
2174 | s->arb_mode = I915_READ(ARB_MODE); | |
2175 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); | |
2176 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); | |
2177 | ||
2178 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2179 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
ddeea5b0 ID |
2180 | |
2181 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); | |
b5f1c97f | 2182 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
ddeea5b0 ID |
2183 | |
2184 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); | |
2185 | s->ecochk = I915_READ(GAM_ECOCHK); | |
2186 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); | |
2187 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); | |
2188 | ||
2189 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); | |
2190 | ||
2191 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2192 | s->g3dctl = I915_READ(VLV_G3DCTL); | |
2193 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); | |
2194 | s->mbctl = I915_READ(GEN6_MBCTL); | |
2195 | ||
2196 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2197 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); | |
2198 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); | |
2199 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); | |
2200 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); | |
2201 | s->rstctl = I915_READ(GEN6_RSTCTL); | |
2202 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); | |
2203 | ||
2204 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2205 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); | |
2206 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); | |
2207 | s->rpdeuc = I915_READ(GEN6_RPDEUC); | |
2208 | s->ecobus = I915_READ(ECOBUS); | |
2209 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); | |
2210 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); | |
2211 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); | |
2212 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); | |
2213 | s->rcedata = I915_READ(VLV_RCEDATA); | |
2214 | s->spare2gh = I915_READ(VLV_SPAREG2H); | |
2215 | ||
2216 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2217 | s->gt_imr = I915_READ(GTIMR); | |
2218 | s->gt_ier = I915_READ(GTIER); | |
2219 | s->pm_imr = I915_READ(GEN6_PMIMR); | |
2220 | s->pm_ier = I915_READ(GEN6_PMIER); | |
2221 | ||
2222 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2223 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
ddeea5b0 ID |
2224 | |
2225 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2226 | s->tilectl = I915_READ(TILECTL); | |
2227 | s->gt_fifoctl = I915_READ(GTFIFOCTL); | |
2228 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2229 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2230 | s->pmwgicz = I915_READ(VLV_PMWGICZ); | |
2231 | ||
2232 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2233 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | |
2234 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | |
9c25210f | 2235 | s->pcbr = I915_READ(VLV_PCBR); |
ddeea5b0 ID |
2236 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
2237 | ||
2238 | /* | |
2239 | * Not saving any of: | |
2240 | * DFT, 0x9800-0x9EC0 | |
2241 | * SARB, 0xB000-0xB1FC | |
2242 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 | |
2243 | * PCI CFG | |
2244 | */ | |
2245 | } | |
2246 | ||
2247 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2248 | { | |
2249 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2250 | u32 val; | |
2251 | int i; | |
2252 | ||
2253 | /* GAM 0x4000-0x4770 */ | |
2254 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); | |
2255 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); | |
2256 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); | |
2257 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); | |
2258 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); | |
2259 | ||
2260 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2261 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
ddeea5b0 ID |
2262 | |
2263 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); | |
b5f1c97f | 2264 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
ddeea5b0 ID |
2265 | |
2266 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); | |
2267 | I915_WRITE(GAM_ECOCHK, s->ecochk); | |
2268 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); | |
2269 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); | |
2270 | ||
2271 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); | |
2272 | ||
2273 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2274 | I915_WRITE(VLV_G3DCTL, s->g3dctl); | |
2275 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); | |
2276 | I915_WRITE(GEN6_MBCTL, s->mbctl); | |
2277 | ||
2278 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2279 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); | |
2280 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); | |
2281 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); | |
2282 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); | |
2283 | I915_WRITE(GEN6_RSTCTL, s->rstctl); | |
2284 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); | |
2285 | ||
2286 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2287 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); | |
2288 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); | |
2289 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); | |
2290 | I915_WRITE(ECOBUS, s->ecobus); | |
2291 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); | |
2292 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); | |
2293 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); | |
2294 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); | |
2295 | I915_WRITE(VLV_RCEDATA, s->rcedata); | |
2296 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); | |
2297 | ||
2298 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2299 | I915_WRITE(GTIMR, s->gt_imr); | |
2300 | I915_WRITE(GTIER, s->gt_ier); | |
2301 | I915_WRITE(GEN6_PMIMR, s->pm_imr); | |
2302 | I915_WRITE(GEN6_PMIER, s->pm_ier); | |
2303 | ||
2304 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2305 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
ddeea5b0 ID |
2306 | |
2307 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2308 | I915_WRITE(TILECTL, s->tilectl); | |
2309 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); | |
2310 | /* | |
2311 | * Preserve the GT allow wake and GFX force clock bit, they are not | |
2312 | * be restored, as they are used to control the s0ix suspend/resume | |
2313 | * sequence by the caller. | |
2314 | */ | |
2315 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2316 | val &= VLV_GTLC_ALLOWWAKEREQ; | |
2317 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; | |
2318 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2319 | ||
2320 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2321 | val &= VLV_GFX_CLK_FORCE_ON_BIT; | |
2322 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2323 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2324 | ||
2325 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); | |
2326 | ||
2327 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2328 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | |
2329 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | |
9c25210f | 2330 | I915_WRITE(VLV_PCBR, s->pcbr); |
ddeea5b0 ID |
2331 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
2332 | } | |
2333 | ||
3dd14c04 CW |
2334 | static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, |
2335 | u32 mask, u32 val) | |
2336 | { | |
2337 | /* The HW does not like us polling for PW_STATUS frequently, so | |
2338 | * use the sleeping loop rather than risk the busy spin within | |
2339 | * intel_wait_for_register(). | |
2340 | * | |
2341 | * Transitioning between RC6 states should be at most 2ms (see | |
2342 | * valleyview_enable_rps) so use a 3ms timeout. | |
2343 | */ | |
2344 | return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, | |
2345 | 3); | |
2346 | } | |
2347 | ||
650ad970 ID |
2348 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
2349 | { | |
2350 | u32 val; | |
2351 | int err; | |
2352 | ||
650ad970 ID |
2353 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
2354 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2355 | if (force_on) | |
2356 | val |= VLV_GFX_CLK_FORCE_ON_BIT; | |
2357 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2358 | ||
2359 | if (!force_on) | |
2360 | return 0; | |
2361 | ||
c6ddc5f3 CW |
2362 | err = intel_wait_for_register(dev_priv, |
2363 | VLV_GTLC_SURVIVABILITY_REG, | |
2364 | VLV_GFX_CLK_STATUS_BIT, | |
2365 | VLV_GFX_CLK_STATUS_BIT, | |
2366 | 20); | |
650ad970 ID |
2367 | if (err) |
2368 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", | |
2369 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | |
2370 | ||
2371 | return err; | |
650ad970 ID |
2372 | } |
2373 | ||
ddeea5b0 ID |
2374 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
2375 | { | |
3dd14c04 | 2376 | u32 mask; |
ddeea5b0 | 2377 | u32 val; |
3dd14c04 | 2378 | int err; |
ddeea5b0 ID |
2379 | |
2380 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2381 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | |
2382 | if (allow) | |
2383 | val |= VLV_GTLC_ALLOWWAKEREQ; | |
2384 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2385 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | |
2386 | ||
3dd14c04 CW |
2387 | mask = VLV_GTLC_ALLOWWAKEACK; |
2388 | val = allow ? mask : 0; | |
2389 | ||
2390 | err = vlv_wait_for_pw_status(dev_priv, mask, val); | |
ddeea5b0 ID |
2391 | if (err) |
2392 | DRM_ERROR("timeout disabling GT waking\n"); | |
b2736695 | 2393 | |
ddeea5b0 | 2394 | return err; |
ddeea5b0 ID |
2395 | } |
2396 | ||
3dd14c04 CW |
2397 | static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
2398 | bool wait_for_on) | |
ddeea5b0 ID |
2399 | { |
2400 | u32 mask; | |
2401 | u32 val; | |
ddeea5b0 ID |
2402 | |
2403 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | |
2404 | val = wait_for_on ? mask : 0; | |
ddeea5b0 ID |
2405 | |
2406 | /* | |
2407 | * RC6 transitioning can be delayed up to 2 msec (see | |
2408 | * valleyview_enable_rps), use 3 msec for safety. | |
2409 | */ | |
3dd14c04 | 2410 | if (vlv_wait_for_pw_status(dev_priv, mask, val)) |
ddeea5b0 | 2411 | DRM_ERROR("timeout waiting for GT wells to go %s\n", |
87ad3212 | 2412 | onoff(wait_for_on)); |
ddeea5b0 ID |
2413 | } |
2414 | ||
2415 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | |
2416 | { | |
2417 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) | |
2418 | return; | |
2419 | ||
6fa283b0 | 2420 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
ddeea5b0 ID |
2421 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
2422 | } | |
2423 | ||
ebc32824 | 2424 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
ddeea5b0 ID |
2425 | { |
2426 | u32 mask; | |
2427 | int err; | |
2428 | ||
2429 | /* | |
2430 | * Bspec defines the following GT well on flags as debug only, so | |
2431 | * don't treat them as hard failures. | |
2432 | */ | |
3dd14c04 | 2433 | vlv_wait_for_gt_wells(dev_priv, false); |
ddeea5b0 ID |
2434 | |
2435 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | |
2436 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | |
2437 | ||
2438 | vlv_check_no_gt_access(dev_priv); | |
2439 | ||
2440 | err = vlv_force_gfx_clock(dev_priv, true); | |
2441 | if (err) | |
2442 | goto err1; | |
2443 | ||
2444 | err = vlv_allow_gt_wake(dev_priv, false); | |
2445 | if (err) | |
2446 | goto err2; | |
98711167 | 2447 | |
2d1fe073 | 2448 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2449 | vlv_save_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2450 | |
2451 | err = vlv_force_gfx_clock(dev_priv, false); | |
2452 | if (err) | |
2453 | goto err2; | |
2454 | ||
2455 | return 0; | |
2456 | ||
2457 | err2: | |
2458 | /* For safety always re-enable waking and disable gfx clock forcing */ | |
2459 | vlv_allow_gt_wake(dev_priv, true); | |
2460 | err1: | |
2461 | vlv_force_gfx_clock(dev_priv, false); | |
2462 | ||
2463 | return err; | |
2464 | } | |
2465 | ||
016970be SK |
2466 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
2467 | bool rpm_resume) | |
ddeea5b0 | 2468 | { |
ddeea5b0 ID |
2469 | int err; |
2470 | int ret; | |
2471 | ||
2472 | /* | |
2473 | * If any of the steps fail just try to continue, that's the best we | |
2474 | * can do at this point. Return the first error code (which will also | |
2475 | * leave RPM permanently disabled). | |
2476 | */ | |
2477 | ret = vlv_force_gfx_clock(dev_priv, true); | |
2478 | ||
2d1fe073 | 2479 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2480 | vlv_restore_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2481 | |
2482 | err = vlv_allow_gt_wake(dev_priv, true); | |
2483 | if (!ret) | |
2484 | ret = err; | |
2485 | ||
2486 | err = vlv_force_gfx_clock(dev_priv, false); | |
2487 | if (!ret) | |
2488 | ret = err; | |
2489 | ||
2490 | vlv_check_no_gt_access(dev_priv); | |
2491 | ||
7c108fd8 | 2492 | if (rpm_resume) |
46f16e63 | 2493 | intel_init_clock_gating(dev_priv); |
ddeea5b0 ID |
2494 | |
2495 | return ret; | |
2496 | } | |
2497 | ||
c49d13ee | 2498 | static int intel_runtime_suspend(struct device *kdev) |
8a187455 | 2499 | { |
c49d13ee | 2500 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2501 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2502 | struct drm_i915_private *dev_priv = to_i915(dev); |
0ab9cfeb | 2503 | int ret; |
8a187455 | 2504 | |
771decb0 | 2505 | if (WARN_ON_ONCE(!(dev_priv->gt_pm.rps.enabled && intel_rc6_enabled()))) |
c6df39b5 ID |
2506 | return -ENODEV; |
2507 | ||
6772ffe0 | 2508 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
604effb7 ID |
2509 | return -ENODEV; |
2510 | ||
8a187455 PZ |
2511 | DRM_DEBUG_KMS("Suspending device\n"); |
2512 | ||
1f814dac ID |
2513 | disable_rpm_wakeref_asserts(dev_priv); |
2514 | ||
d6102977 ID |
2515 | /* |
2516 | * We are safe here against re-faults, since the fault handler takes | |
2517 | * an RPM reference. | |
2518 | */ | |
7c108fd8 | 2519 | i915_gem_runtime_suspend(dev_priv); |
d6102977 | 2520 | |
bf9e8429 | 2521 | intel_guc_suspend(dev_priv); |
a1c41994 | 2522 | |
2eb5252e | 2523 | intel_runtime_pm_disable_interrupts(dev_priv); |
b5478bcd | 2524 | |
507e126e | 2525 | ret = 0; |
b9fd799e | 2526 | if (IS_GEN9_LP(dev_priv)) { |
507e126e ID |
2527 | bxt_display_core_uninit(dev_priv); |
2528 | bxt_enable_dc9(dev_priv); | |
2529 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | |
2530 | hsw_enable_pc8(dev_priv); | |
2531 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | |
2532 | ret = vlv_suspend_complete(dev_priv); | |
2533 | } | |
2534 | ||
0ab9cfeb ID |
2535 | if (ret) { |
2536 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | |
b963291c | 2537 | intel_runtime_pm_enable_interrupts(dev_priv); |
0ab9cfeb | 2538 | |
1f814dac ID |
2539 | enable_rpm_wakeref_asserts(dev_priv); |
2540 | ||
0ab9cfeb ID |
2541 | return ret; |
2542 | } | |
a8a8bd54 | 2543 | |
68f60946 | 2544 | intel_uncore_suspend(dev_priv); |
1f814dac ID |
2545 | |
2546 | enable_rpm_wakeref_asserts(dev_priv); | |
ad1443f0 | 2547 | WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |
55ec45c2 | 2548 | |
bc3b9346 | 2549 | if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
55ec45c2 MK |
2550 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
2551 | ||
ad1443f0 | 2552 | dev_priv->runtime_pm.suspended = true; |
1fb2362b KCA |
2553 | |
2554 | /* | |
c8a0bd42 PZ |
2555 | * FIXME: We really should find a document that references the arguments |
2556 | * used below! | |
1fb2362b | 2557 | */ |
6f9f4b7a | 2558 | if (IS_BROADWELL(dev_priv)) { |
d37ae19a PZ |
2559 | /* |
2560 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
2561 | * being detected, and the call we do at intel_runtime_resume() | |
2562 | * won't be able to restore them. Since PCI_D3hot matches the | |
2563 | * actual specification and appears to be working, use it. | |
2564 | */ | |
6f9f4b7a | 2565 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
d37ae19a | 2566 | } else { |
c8a0bd42 PZ |
2567 | /* |
2568 | * current versions of firmware which depend on this opregion | |
2569 | * notification have repurposed the D1 definition to mean | |
2570 | * "runtime suspended" vs. what you would normally expect (D3) | |
2571 | * to distinguish it from notifications that might be sent via | |
2572 | * the suspend path. | |
2573 | */ | |
6f9f4b7a | 2574 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
c8a0bd42 | 2575 | } |
8a187455 | 2576 | |
59bad947 | 2577 | assert_forcewakes_inactive(dev_priv); |
dc9fb09c | 2578 | |
21d6e0bd | 2579 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
19625e85 L |
2580 | intel_hpd_poll_init(dev_priv); |
2581 | ||
a8a8bd54 | 2582 | DRM_DEBUG_KMS("Device suspended\n"); |
8a187455 PZ |
2583 | return 0; |
2584 | } | |
2585 | ||
c49d13ee | 2586 | static int intel_runtime_resume(struct device *kdev) |
8a187455 | 2587 | { |
c49d13ee | 2588 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2589 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2590 | struct drm_i915_private *dev_priv = to_i915(dev); |
1a5df187 | 2591 | int ret = 0; |
8a187455 | 2592 | |
6772ffe0 | 2593 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
604effb7 | 2594 | return -ENODEV; |
8a187455 PZ |
2595 | |
2596 | DRM_DEBUG_KMS("Resuming device\n"); | |
2597 | ||
ad1443f0 | 2598 | WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |
1f814dac ID |
2599 | disable_rpm_wakeref_asserts(dev_priv); |
2600 | ||
6f9f4b7a | 2601 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
ad1443f0 | 2602 | dev_priv->runtime_pm.suspended = false; |
55ec45c2 MK |
2603 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
2604 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | |
8a187455 | 2605 | |
bf9e8429 | 2606 | intel_guc_resume(dev_priv); |
a1c41994 | 2607 | |
b9fd799e | 2608 | if (IS_GEN9_LP(dev_priv)) { |
507e126e ID |
2609 | bxt_disable_dc9(dev_priv); |
2610 | bxt_display_core_init(dev_priv, true); | |
f62c79b3 ID |
2611 | if (dev_priv->csr.dmc_payload && |
2612 | (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) | |
2613 | gen9_enable_dc5(dev_priv); | |
507e126e | 2614 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
1a5df187 | 2615 | hsw_disable_pc8(dev_priv); |
507e126e | 2616 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1a5df187 | 2617 | ret = vlv_resume_prepare(dev_priv, true); |
507e126e | 2618 | } |
1a5df187 | 2619 | |
0ab9cfeb ID |
2620 | /* |
2621 | * No point of rolling back things in case of an error, as the best | |
2622 | * we can do is to hope that things will still work (and disable RPM). | |
2623 | */ | |
c6be607a | 2624 | i915_gem_init_swizzling(dev_priv); |
83bf6d55 | 2625 | i915_gem_restore_fences(dev_priv); |
92b806d3 | 2626 | |
b963291c | 2627 | intel_runtime_pm_enable_interrupts(dev_priv); |
08d8a232 VS |
2628 | |
2629 | /* | |
2630 | * On VLV/CHV display interrupts are part of the display | |
2631 | * power well, so hpd is reinitialized from there. For | |
2632 | * everyone else do it here. | |
2633 | */ | |
666a4537 | 2634 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
08d8a232 VS |
2635 | intel_hpd_init(dev_priv); |
2636 | ||
2503a0fe KM |
2637 | intel_enable_ipc(dev_priv); |
2638 | ||
1f814dac ID |
2639 | enable_rpm_wakeref_asserts(dev_priv); |
2640 | ||
0ab9cfeb ID |
2641 | if (ret) |
2642 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | |
2643 | else | |
2644 | DRM_DEBUG_KMS("Device resumed\n"); | |
2645 | ||
2646 | return ret; | |
8a187455 PZ |
2647 | } |
2648 | ||
42f5551d | 2649 | const struct dev_pm_ops i915_pm_ops = { |
5545dbbf ID |
2650 | /* |
2651 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
2652 | * PMSG_RESUME] | |
2653 | */ | |
0206e353 | 2654 | .suspend = i915_pm_suspend, |
76c4b250 ID |
2655 | .suspend_late = i915_pm_suspend_late, |
2656 | .resume_early = i915_pm_resume_early, | |
0206e353 | 2657 | .resume = i915_pm_resume, |
5545dbbf ID |
2658 | |
2659 | /* | |
2660 | * S4 event handlers | |
2661 | * @freeze, @freeze_late : called (1) before creating the | |
2662 | * hibernation image [PMSG_FREEZE] and | |
2663 | * (2) after rebooting, before restoring | |
2664 | * the image [PMSG_QUIESCE] | |
2665 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
2666 | * image, before writing it [PMSG_THAW] | |
2667 | * and (2) after failing to create or | |
2668 | * restore the image [PMSG_RECOVER] | |
2669 | * @poweroff, @poweroff_late: called after writing the hibernation | |
2670 | * image, before rebooting [PMSG_HIBERNATE] | |
2671 | * @restore, @restore_early : called after rebooting and restoring the | |
2672 | * hibernation image [PMSG_RESTORE] | |
2673 | */ | |
1f19ac2a CW |
2674 | .freeze = i915_pm_freeze, |
2675 | .freeze_late = i915_pm_freeze_late, | |
2676 | .thaw_early = i915_pm_thaw_early, | |
2677 | .thaw = i915_pm_thaw, | |
36d61e67 | 2678 | .poweroff = i915_pm_suspend, |
ab3be73f | 2679 | .poweroff_late = i915_pm_poweroff_late, |
1f19ac2a CW |
2680 | .restore_early = i915_pm_restore_early, |
2681 | .restore = i915_pm_restore, | |
5545dbbf ID |
2682 | |
2683 | /* S0ix (via runtime suspend) event handlers */ | |
97bea207 PZ |
2684 | .runtime_suspend = intel_runtime_suspend, |
2685 | .runtime_resume = intel_runtime_resume, | |
cbda12d7 ZW |
2686 | }; |
2687 | ||
78b68556 | 2688 | static const struct vm_operations_struct i915_gem_vm_ops = { |
de151cf6 | 2689 | .fault = i915_gem_fault, |
ab00b3e5 JB |
2690 | .open = drm_gem_vm_open, |
2691 | .close = drm_gem_vm_close, | |
de151cf6 JB |
2692 | }; |
2693 | ||
e08e96de AV |
2694 | static const struct file_operations i915_driver_fops = { |
2695 | .owner = THIS_MODULE, | |
2696 | .open = drm_open, | |
2697 | .release = drm_release, | |
2698 | .unlocked_ioctl = drm_ioctl, | |
2699 | .mmap = drm_gem_mmap, | |
2700 | .poll = drm_poll, | |
e08e96de | 2701 | .read = drm_read, |
e08e96de | 2702 | .compat_ioctl = i915_compat_ioctl, |
e08e96de AV |
2703 | .llseek = noop_llseek, |
2704 | }; | |
2705 | ||
0673ad47 CW |
2706 | static int |
2707 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | |
2708 | struct drm_file *file) | |
2709 | { | |
2710 | return -ENODEV; | |
2711 | } | |
2712 | ||
2713 | static const struct drm_ioctl_desc i915_ioctls[] = { | |
2714 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2715 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | |
2716 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), | |
2717 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), | |
2718 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), | |
2719 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), | |
2720 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), | |
2721 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2722 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
2723 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
2724 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2725 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), | |
2726 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2727 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2728 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), | |
2729 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), | |
2730 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2731 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2732 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | |
fec0445c | 2733 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), |
0673ad47 CW |
2734 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
2735 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
2736 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2737 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), | |
2738 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), | |
2739 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2740 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2741 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2742 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), | |
2743 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), | |
2744 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), | |
2745 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), | |
2746 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), | |
2747 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), | |
2748 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), | |
111dbcab CW |
2749 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), |
2750 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), | |
0673ad47 CW |
2751 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), |
2752 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | |
2753 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), | |
2754 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2755 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2756 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2757 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2758 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2759 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), | |
2760 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), | |
2761 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), | |
2762 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), | |
2763 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | |
2764 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | |
2765 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | |
eec688e1 | 2766 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), |
f89823c2 LL |
2767 | DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2768 | DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
0673ad47 CW |
2769 | }; |
2770 | ||
1da177e4 | 2771 | static struct drm_driver driver = { |
0c54781b MW |
2772 | /* Don't use MTRRs here; the Xserver or userspace app should |
2773 | * deal with them for Intel hardware. | |
792d2b9a | 2774 | */ |
673a394b | 2775 | .driver_features = |
10ba5012 | 2776 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
cf6e7bac | 2777 | DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ, |
cad3688f | 2778 | .release = i915_driver_release, |
673a394b | 2779 | .open = i915_driver_open, |
22eae947 | 2780 | .lastclose = i915_driver_lastclose, |
673a394b | 2781 | .postclose = i915_driver_postclose, |
d8e29209 | 2782 | |
b1f788c6 | 2783 | .gem_close_object = i915_gem_close_object, |
f0cd5182 | 2784 | .gem_free_object_unlocked = i915_gem_free_object, |
de151cf6 | 2785 | .gem_vm_ops = &i915_gem_vm_ops, |
1286ff73 DV |
2786 | |
2787 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
2788 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
2789 | .gem_prime_export = i915_gem_prime_export, | |
2790 | .gem_prime_import = i915_gem_prime_import, | |
2791 | ||
ff72145b | 2792 | .dumb_create = i915_gem_dumb_create, |
da6b51d0 | 2793 | .dumb_map_offset = i915_gem_mmap_gtt, |
1da177e4 | 2794 | .ioctls = i915_ioctls, |
0673ad47 | 2795 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
e08e96de | 2796 | .fops = &i915_driver_fops, |
22eae947 DA |
2797 | .name = DRIVER_NAME, |
2798 | .desc = DRIVER_DESC, | |
2799 | .date = DRIVER_DATE, | |
2800 | .major = DRIVER_MAJOR, | |
2801 | .minor = DRIVER_MINOR, | |
2802 | .patchlevel = DRIVER_PATCHLEVEL, | |
1da177e4 | 2803 | }; |
66d9cb5d CW |
2804 | |
2805 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
2806 | #include "selftests/mock_drm.c" | |
2807 | #endif |