]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 | 29 | |
e5747e3a | 30 | #include <linux/acpi.h> |
0673ad47 CW |
31 | #include <linux/device.h> |
32 | #include <linux/oom.h> | |
e0cd3608 | 33 | #include <linux/module.h> |
0673ad47 CW |
34 | #include <linux/pci.h> |
35 | #include <linux/pm.h> | |
d6102977 | 36 | #include <linux/pm_runtime.h> |
0673ad47 CW |
37 | #include <linux/pnp.h> |
38 | #include <linux/slab.h> | |
39 | #include <linux/vgaarb.h> | |
704ab614 | 40 | #include <linux/vga_switcheroo.h> |
0673ad47 CW |
41 | #include <linux/vt.h> |
42 | #include <acpi/video.h> | |
43 | ||
44 | #include <drm/drmP.h> | |
760285e7 | 45 | #include <drm/drm_crtc_helper.h> |
a667fb40 | 46 | #include <drm/drm_atomic_helper.h> |
0673ad47 CW |
47 | #include <drm/i915_drm.h> |
48 | ||
49 | #include "i915_drv.h" | |
50 | #include "i915_trace.h" | |
51 | #include "i915_vgpu.h" | |
52 | #include "intel_drv.h" | |
5464cd65 | 53 | #include "intel_uc.h" |
79e53945 | 54 | |
112b715e KH |
55 | static struct drm_driver driver; |
56 | ||
0673ad47 CW |
57 | static unsigned int i915_load_fail_count; |
58 | ||
59 | bool __i915_inject_load_failure(const char *func, int line) | |
60 | { | |
61 | if (i915_load_fail_count >= i915.inject_load_failure) | |
62 | return false; | |
63 | ||
64 | if (++i915_load_fail_count == i915.inject_load_failure) { | |
65 | DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", | |
66 | i915.inject_load_failure, func, line); | |
67 | return true; | |
68 | } | |
69 | ||
70 | return false; | |
71 | } | |
72 | ||
73 | #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" | |
74 | #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ | |
75 | "providing the dmesg log by booting with drm.debug=0xf" | |
76 | ||
77 | void | |
78 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |
79 | const char *fmt, ...) | |
80 | { | |
81 | static bool shown_bug_once; | |
c49d13ee | 82 | struct device *kdev = dev_priv->drm.dev; |
0673ad47 CW |
83 | bool is_error = level[1] <= KERN_ERR[1]; |
84 | bool is_debug = level[1] == KERN_DEBUG[1]; | |
85 | struct va_format vaf; | |
86 | va_list args; | |
87 | ||
88 | if (is_debug && !(drm_debug & DRM_UT_DRIVER)) | |
89 | return; | |
90 | ||
91 | va_start(args, fmt); | |
92 | ||
93 | vaf.fmt = fmt; | |
94 | vaf.va = &args; | |
95 | ||
c49d13ee | 96 | dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", |
0673ad47 CW |
97 | __builtin_return_address(0), &vaf); |
98 | ||
99 | if (is_error && !shown_bug_once) { | |
c49d13ee | 100 | dev_notice(kdev, "%s", FDO_BUG_MSG); |
0673ad47 CW |
101 | shown_bug_once = true; |
102 | } | |
103 | ||
104 | va_end(args); | |
105 | } | |
106 | ||
107 | static bool i915_error_injected(struct drm_i915_private *dev_priv) | |
108 | { | |
109 | return i915.inject_load_failure && | |
110 | i915_load_fail_count == i915.inject_load_failure; | |
111 | } | |
112 | ||
113 | #define i915_load_error(dev_priv, fmt, ...) \ | |
114 | __i915_printk(dev_priv, \ | |
115 | i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ | |
116 | fmt, ##__VA_ARGS__) | |
117 | ||
118 | ||
fd6b8f43 | 119 | static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) |
0673ad47 CW |
120 | { |
121 | enum intel_pch ret = PCH_NOP; | |
122 | ||
123 | /* | |
124 | * In a virtualized passthrough environment we can be in a | |
125 | * setup where the ISA bridge is not able to be passed through. | |
126 | * In this case, a south bridge can be emulated and we have to | |
127 | * make an educated guess as to which PCH is really there. | |
128 | */ | |
129 | ||
fd6b8f43 | 130 | if (IS_GEN5(dev_priv)) { |
0673ad47 CW |
131 | ret = PCH_IBX; |
132 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); | |
fd6b8f43 | 133 | } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { |
0673ad47 CW |
134 | ret = PCH_CPT; |
135 | DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); | |
fd6b8f43 | 136 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
0673ad47 CW |
137 | ret = PCH_LPT; |
138 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); | |
fd6b8f43 | 139 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
0673ad47 CW |
140 | ret = PCH_SPT; |
141 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); | |
142 | } | |
143 | ||
144 | return ret; | |
145 | } | |
146 | ||
da5f53bf | 147 | static void intel_detect_pch(struct drm_i915_private *dev_priv) |
0673ad47 | 148 | { |
0673ad47 CW |
149 | struct pci_dev *pch = NULL; |
150 | ||
151 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | |
152 | * (which really amounts to a PCH but no South Display). | |
153 | */ | |
b7f05d4a | 154 | if (INTEL_INFO(dev_priv)->num_pipes == 0) { |
0673ad47 CW |
155 | dev_priv->pch_type = PCH_NOP; |
156 | return; | |
157 | } | |
158 | ||
159 | /* | |
160 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | |
161 | * make graphics device passthrough work easy for VMM, that only | |
162 | * need to expose ISA bridge to let driver know the real hardware | |
163 | * underneath. This is a requirement from virtualization team. | |
164 | * | |
165 | * In some virtualized environments (e.g. XEN), there is irrelevant | |
166 | * ISA bridge in the system. To work reliably, we should scan trhough | |
167 | * all the ISA bridge devices and check for the first match, instead | |
168 | * of only checking the first one. | |
169 | */ | |
170 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { | |
171 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | |
172 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | |
173 | dev_priv->pch_id = id; | |
174 | ||
175 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | |
176 | dev_priv->pch_type = PCH_IBX; | |
177 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | |
5db94019 | 178 | WARN_ON(!IS_GEN5(dev_priv)); |
0673ad47 CW |
179 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
180 | dev_priv->pch_type = PCH_CPT; | |
181 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | |
fd6b8f43 TU |
182 | WARN_ON(!(IS_GEN6(dev_priv) || |
183 | IS_IVYBRIDGE(dev_priv))); | |
0673ad47 CW |
184 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
185 | /* PantherPoint is CPT compatible */ | |
186 | dev_priv->pch_type = PCH_CPT; | |
187 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); | |
fd6b8f43 TU |
188 | WARN_ON(!(IS_GEN6(dev_priv) || |
189 | IS_IVYBRIDGE(dev_priv))); | |
0673ad47 CW |
190 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
191 | dev_priv->pch_type = PCH_LPT; | |
192 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | |
8652744b TU |
193 | WARN_ON(!IS_HASWELL(dev_priv) && |
194 | !IS_BROADWELL(dev_priv)); | |
50a0bc90 TU |
195 | WARN_ON(IS_HSW_ULT(dev_priv) || |
196 | IS_BDW_ULT(dev_priv)); | |
0673ad47 CW |
197 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
198 | dev_priv->pch_type = PCH_LPT; | |
199 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | |
8652744b TU |
200 | WARN_ON(!IS_HASWELL(dev_priv) && |
201 | !IS_BROADWELL(dev_priv)); | |
50a0bc90 TU |
202 | WARN_ON(!IS_HSW_ULT(dev_priv) && |
203 | !IS_BDW_ULT(dev_priv)); | |
0673ad47 CW |
204 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
205 | dev_priv->pch_type = PCH_SPT; | |
206 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | |
0853723b TU |
207 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
208 | !IS_KABYLAKE(dev_priv)); | |
0673ad47 CW |
209 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { |
210 | dev_priv->pch_type = PCH_SPT; | |
211 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); | |
0853723b TU |
212 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
213 | !IS_KABYLAKE(dev_priv)); | |
22dea0be RV |
214 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { |
215 | dev_priv->pch_type = PCH_KBP; | |
216 | DRM_DEBUG_KMS("Found KabyPoint PCH\n"); | |
85327748 JN |
217 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
218 | !IS_KABYLAKE(dev_priv)); | |
0673ad47 CW |
219 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
220 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || | |
221 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && | |
222 | pch->subsystem_vendor == | |
223 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET && | |
224 | pch->subsystem_device == | |
225 | PCI_SUBDEVICE_ID_QEMU)) { | |
fd6b8f43 TU |
226 | dev_priv->pch_type = |
227 | intel_virt_detect_pch(dev_priv); | |
0673ad47 CW |
228 | } else |
229 | continue; | |
230 | ||
231 | break; | |
232 | } | |
233 | } | |
234 | if (!pch) | |
235 | DRM_DEBUG_KMS("No PCH found.\n"); | |
236 | ||
237 | pci_dev_put(pch); | |
238 | } | |
239 | ||
0673ad47 CW |
240 | static int i915_getparam(struct drm_device *dev, void *data, |
241 | struct drm_file *file_priv) | |
242 | { | |
fac5e23e | 243 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 244 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
245 | drm_i915_getparam_t *param = data; |
246 | int value; | |
247 | ||
248 | switch (param->param) { | |
249 | case I915_PARAM_IRQ_ACTIVE: | |
250 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
251 | case I915_PARAM_LAST_DISPATCH: | |
ef0f411f | 252 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
0673ad47 CW |
253 | /* Reject all old ums/dri params. */ |
254 | return -ENODEV; | |
255 | case I915_PARAM_CHIPSET_ID: | |
52a05c30 | 256 | value = pdev->device; |
0673ad47 CW |
257 | break; |
258 | case I915_PARAM_REVISION: | |
52a05c30 | 259 | value = pdev->revision; |
0673ad47 | 260 | break; |
0673ad47 CW |
261 | case I915_PARAM_NUM_FENCES_AVAIL: |
262 | value = dev_priv->num_fence_regs; | |
263 | break; | |
264 | case I915_PARAM_HAS_OVERLAY: | |
265 | value = dev_priv->overlay ? 1 : 0; | |
266 | break; | |
0673ad47 | 267 | case I915_PARAM_HAS_BSD: |
3b3f1650 | 268 | value = !!dev_priv->engine[VCS]; |
0673ad47 CW |
269 | break; |
270 | case I915_PARAM_HAS_BLT: | |
3b3f1650 | 271 | value = !!dev_priv->engine[BCS]; |
0673ad47 CW |
272 | break; |
273 | case I915_PARAM_HAS_VEBOX: | |
3b3f1650 | 274 | value = !!dev_priv->engine[VECS]; |
0673ad47 CW |
275 | break; |
276 | case I915_PARAM_HAS_BSD2: | |
3b3f1650 | 277 | value = !!dev_priv->engine[VCS2]; |
0673ad47 | 278 | break; |
0673ad47 | 279 | case I915_PARAM_HAS_LLC: |
16162470 | 280 | value = HAS_LLC(dev_priv); |
0673ad47 CW |
281 | break; |
282 | case I915_PARAM_HAS_WT: | |
16162470 | 283 | value = HAS_WT(dev_priv); |
0673ad47 CW |
284 | break; |
285 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
16162470 | 286 | value = USES_PPGTT(dev_priv); |
0673ad47 CW |
287 | break; |
288 | case I915_PARAM_HAS_SEMAPHORES: | |
39df9190 | 289 | value = i915.semaphores; |
0673ad47 | 290 | break; |
0673ad47 CW |
291 | case I915_PARAM_HAS_SECURE_BATCHES: |
292 | value = capable(CAP_SYS_ADMIN); | |
293 | break; | |
0673ad47 CW |
294 | case I915_PARAM_CMD_PARSER_VERSION: |
295 | value = i915_cmd_parser_get_version(dev_priv); | |
296 | break; | |
0673ad47 | 297 | case I915_PARAM_SUBSLICE_TOTAL: |
57ec171e | 298 | value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); |
0673ad47 CW |
299 | if (!value) |
300 | return -ENODEV; | |
301 | break; | |
302 | case I915_PARAM_EU_TOTAL: | |
43b67998 | 303 | value = INTEL_INFO(dev_priv)->sseu.eu_total; |
0673ad47 CW |
304 | if (!value) |
305 | return -ENODEV; | |
306 | break; | |
307 | case I915_PARAM_HAS_GPU_RESET: | |
308 | value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); | |
309 | break; | |
310 | case I915_PARAM_HAS_RESOURCE_STREAMER: | |
16162470 | 311 | value = HAS_RESOURCE_STREAMER(dev_priv); |
0673ad47 | 312 | break; |
37f501af | 313 | case I915_PARAM_HAS_POOLED_EU: |
16162470 | 314 | value = HAS_POOLED_EU(dev_priv); |
37f501af | 315 | break; |
316 | case I915_PARAM_MIN_EU_IN_POOL: | |
43b67998 | 317 | value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; |
37f501af | 318 | break; |
5464cd65 | 319 | case I915_PARAM_HUC_STATUS: |
3582ad13 | 320 | intel_runtime_pm_get(dev_priv); |
5464cd65 | 321 | value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; |
3582ad13 | 322 | intel_runtime_pm_put(dev_priv); |
5464cd65 | 323 | break; |
4cc69075 CW |
324 | case I915_PARAM_MMAP_GTT_VERSION: |
325 | /* Though we've started our numbering from 1, and so class all | |
326 | * earlier versions as 0, in effect their value is undefined as | |
327 | * the ioctl will report EINVAL for the unknown param! | |
328 | */ | |
329 | value = i915_gem_mmap_gtt_version(); | |
330 | break; | |
0de9136d CW |
331 | case I915_PARAM_HAS_SCHEDULER: |
332 | value = dev_priv->engine[RCS] && | |
333 | dev_priv->engine[RCS]->schedule; | |
334 | break; | |
16162470 DW |
335 | case I915_PARAM_MMAP_VERSION: |
336 | /* Remember to bump this if the version changes! */ | |
337 | case I915_PARAM_HAS_GEM: | |
338 | case I915_PARAM_HAS_PAGEFLIPPING: | |
339 | case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ | |
340 | case I915_PARAM_HAS_RELAXED_FENCING: | |
341 | case I915_PARAM_HAS_COHERENT_RINGS: | |
342 | case I915_PARAM_HAS_RELAXED_DELTA: | |
343 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
344 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
345 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
346 | case I915_PARAM_HAS_PINNED_BATCHES: | |
347 | case I915_PARAM_HAS_EXEC_NO_RELOC: | |
348 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | |
349 | case I915_PARAM_HAS_COHERENT_PHYS_GTT: | |
350 | case I915_PARAM_HAS_EXEC_SOFTPIN: | |
77ae9957 | 351 | case I915_PARAM_HAS_EXEC_ASYNC: |
fec0445c | 352 | case I915_PARAM_HAS_EXEC_FENCE: |
b0fd47ad | 353 | case I915_PARAM_HAS_EXEC_CAPTURE: |
16162470 DW |
354 | /* For the time being all of these are always true; |
355 | * if some supported hardware does not have one of these | |
356 | * features this value needs to be provided from | |
357 | * INTEL_INFO(), a feature macro, or similar. | |
358 | */ | |
359 | value = 1; | |
360 | break; | |
0673ad47 CW |
361 | default: |
362 | DRM_DEBUG("Unknown parameter %d\n", param->param); | |
363 | return -EINVAL; | |
364 | } | |
365 | ||
dda33009 | 366 | if (put_user(value, param->value)) |
0673ad47 | 367 | return -EFAULT; |
0673ad47 CW |
368 | |
369 | return 0; | |
370 | } | |
371 | ||
da5f53bf | 372 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
0673ad47 | 373 | { |
0673ad47 CW |
374 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
375 | if (!dev_priv->bridge_dev) { | |
376 | DRM_ERROR("bridge device not found\n"); | |
377 | return -1; | |
378 | } | |
379 | return 0; | |
380 | } | |
381 | ||
382 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
383 | static int | |
da5f53bf | 384 | intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) |
0673ad47 | 385 | { |
514e1d64 | 386 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
387 | u32 temp_lo, temp_hi = 0; |
388 | u64 mchbar_addr; | |
389 | int ret; | |
390 | ||
514e1d64 | 391 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
392 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
393 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
394 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
395 | ||
396 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
397 | #ifdef CONFIG_PNP | |
398 | if (mchbar_addr && | |
399 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
400 | return 0; | |
401 | #endif | |
402 | ||
403 | /* Get some space for it */ | |
404 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
405 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
406 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
407 | &dev_priv->mch_res, | |
408 | MCHBAR_SIZE, MCHBAR_SIZE, | |
409 | PCIBIOS_MIN_MEM, | |
410 | 0, pcibios_align_resource, | |
411 | dev_priv->bridge_dev); | |
412 | if (ret) { | |
413 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
414 | dev_priv->mch_res.start = 0; | |
415 | return ret; | |
416 | } | |
417 | ||
514e1d64 | 418 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
419 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
420 | upper_32_bits(dev_priv->mch_res.start)); | |
421 | ||
422 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
423 | lower_32_bits(dev_priv->mch_res.start)); | |
424 | return 0; | |
425 | } | |
426 | ||
427 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
428 | static void | |
da5f53bf | 429 | intel_setup_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 430 | { |
514e1d64 | 431 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
432 | u32 temp; |
433 | bool enabled; | |
434 | ||
920a14b2 | 435 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
0673ad47 CW |
436 | return; |
437 | ||
438 | dev_priv->mchbar_need_disable = false; | |
439 | ||
50a0bc90 | 440 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
441 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); |
442 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
443 | } else { | |
444 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
445 | enabled = temp & 1; | |
446 | } | |
447 | ||
448 | /* If it's already enabled, don't have to do anything */ | |
449 | if (enabled) | |
450 | return; | |
451 | ||
da5f53bf | 452 | if (intel_alloc_mchbar_resource(dev_priv)) |
0673ad47 CW |
453 | return; |
454 | ||
455 | dev_priv->mchbar_need_disable = true; | |
456 | ||
457 | /* Space is allocated or reserved, so enable it. */ | |
50a0bc90 | 458 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
459 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
460 | temp | DEVEN_MCHBAR_EN); | |
461 | } else { | |
462 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
463 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
464 | } | |
465 | } | |
466 | ||
467 | static void | |
da5f53bf | 468 | intel_teardown_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 469 | { |
514e1d64 | 470 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
471 | |
472 | if (dev_priv->mchbar_need_disable) { | |
50a0bc90 | 473 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
474 | u32 deven_val; |
475 | ||
476 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, | |
477 | &deven_val); | |
478 | deven_val &= ~DEVEN_MCHBAR_EN; | |
479 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
480 | deven_val); | |
481 | } else { | |
482 | u32 mchbar_val; | |
483 | ||
484 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
485 | &mchbar_val); | |
486 | mchbar_val &= ~1; | |
487 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
488 | mchbar_val); | |
489 | } | |
490 | } | |
491 | ||
492 | if (dev_priv->mch_res.start) | |
493 | release_resource(&dev_priv->mch_res); | |
494 | } | |
495 | ||
496 | /* true = enable decode, false = disable decoder */ | |
497 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
498 | { | |
da5f53bf | 499 | struct drm_i915_private *dev_priv = cookie; |
0673ad47 | 500 | |
da5f53bf | 501 | intel_modeset_vga_set_state(dev_priv, state); |
0673ad47 CW |
502 | if (state) |
503 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
504 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
505 | else | |
506 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
507 | } | |
508 | ||
7f26cb88 TU |
509 | static int i915_resume_switcheroo(struct drm_device *dev); |
510 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); | |
511 | ||
0673ad47 CW |
512 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
513 | { | |
514 | struct drm_device *dev = pci_get_drvdata(pdev); | |
515 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
516 | ||
517 | if (state == VGA_SWITCHEROO_ON) { | |
518 | pr_info("switched on\n"); | |
519 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
520 | /* i915 resume handler doesn't set to D0 */ | |
52a05c30 | 521 | pci_set_power_state(pdev, PCI_D0); |
0673ad47 CW |
522 | i915_resume_switcheroo(dev); |
523 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
524 | } else { | |
525 | pr_info("switched off\n"); | |
526 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
527 | i915_suspend_switcheroo(dev, pmm); | |
528 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
529 | } | |
530 | } | |
531 | ||
532 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
533 | { | |
534 | struct drm_device *dev = pci_get_drvdata(pdev); | |
535 | ||
536 | /* | |
537 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | |
538 | * locking inversion with the driver load path. And the access here is | |
539 | * completely racy anyway. So don't bother with locking for now. | |
540 | */ | |
541 | return dev->open_count == 0; | |
542 | } | |
543 | ||
544 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
545 | .set_gpu_state = i915_switcheroo_set_state, | |
546 | .reprobe = NULL, | |
547 | .can_switch = i915_switcheroo_can_switch, | |
548 | }; | |
549 | ||
fbbd37b3 | 550 | static void i915_gem_fini(struct drm_i915_private *dev_priv) |
0673ad47 | 551 | { |
fbbd37b3 | 552 | mutex_lock(&dev_priv->drm.struct_mutex); |
b8991403 | 553 | intel_uc_fini_hw(dev_priv); |
cb15d9f8 TU |
554 | i915_gem_cleanup_engines(dev_priv); |
555 | i915_gem_context_fini(dev_priv); | |
fbbd37b3 | 556 | mutex_unlock(&dev_priv->drm.struct_mutex); |
0673ad47 | 557 | |
bdeb9785 | 558 | i915_gem_drain_freed_objects(dev_priv); |
fbbd37b3 CW |
559 | |
560 | WARN_ON(!list_empty(&dev_priv->context_list)); | |
0673ad47 CW |
561 | } |
562 | ||
563 | static int i915_load_modeset_init(struct drm_device *dev) | |
564 | { | |
fac5e23e | 565 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 566 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
567 | int ret; |
568 | ||
569 | if (i915_inject_load_failure()) | |
570 | return -ENODEV; | |
571 | ||
66578857 | 572 | intel_bios_init(dev_priv); |
0673ad47 CW |
573 | |
574 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
575 | * to the common VGA resources. | |
576 | * | |
577 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
578 | * then we do not take part in VGA arbitration and the | |
579 | * vga_client_register() fails with -ENODEV. | |
580 | */ | |
da5f53bf | 581 | ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); |
0673ad47 CW |
582 | if (ret && ret != -ENODEV) |
583 | goto out; | |
584 | ||
585 | intel_register_dsm_handler(); | |
586 | ||
52a05c30 | 587 | ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); |
0673ad47 CW |
588 | if (ret) |
589 | goto cleanup_vga_client; | |
590 | ||
591 | /* must happen before intel_power_domains_init_hw() on VLV/CHV */ | |
592 | intel_update_rawclk(dev_priv); | |
593 | ||
594 | intel_power_domains_init_hw(dev_priv, false); | |
595 | ||
596 | intel_csr_ucode_init(dev_priv); | |
597 | ||
598 | ret = intel_irq_install(dev_priv); | |
599 | if (ret) | |
600 | goto cleanup_csr; | |
601 | ||
40196446 | 602 | intel_setup_gmbus(dev_priv); |
0673ad47 CW |
603 | |
604 | /* Important: The output setup functions called by modeset_init need | |
605 | * working irqs for e.g. gmbus and dp aux transfers. */ | |
b079bd17 VS |
606 | ret = intel_modeset_init(dev); |
607 | if (ret) | |
608 | goto cleanup_irq; | |
0673ad47 | 609 | |
29ad6a30 | 610 | intel_uc_init_fw(dev_priv); |
0673ad47 | 611 | |
bf9e8429 | 612 | ret = i915_gem_init(dev_priv); |
0673ad47 | 613 | if (ret) |
3950bf3d | 614 | goto cleanup_uc; |
0673ad47 CW |
615 | |
616 | intel_modeset_gem_init(dev); | |
617 | ||
b7f05d4a | 618 | if (INTEL_INFO(dev_priv)->num_pipes == 0) |
0673ad47 CW |
619 | return 0; |
620 | ||
621 | ret = intel_fbdev_init(dev); | |
622 | if (ret) | |
623 | goto cleanup_gem; | |
624 | ||
625 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
626 | intel_hpd_init(dev_priv); | |
627 | ||
628 | drm_kms_helper_poll_init(dev); | |
629 | ||
630 | return 0; | |
631 | ||
632 | cleanup_gem: | |
bf9e8429 | 633 | if (i915_gem_suspend(dev_priv)) |
1c777c5d | 634 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
fbbd37b3 | 635 | i915_gem_fini(dev_priv); |
3950bf3d OM |
636 | cleanup_uc: |
637 | intel_uc_fini_fw(dev_priv); | |
0673ad47 | 638 | cleanup_irq: |
0673ad47 | 639 | drm_irq_uninstall(dev); |
40196446 | 640 | intel_teardown_gmbus(dev_priv); |
0673ad47 CW |
641 | cleanup_csr: |
642 | intel_csr_ucode_fini(dev_priv); | |
643 | intel_power_domains_fini(dev_priv); | |
52a05c30 | 644 | vga_switcheroo_unregister_client(pdev); |
0673ad47 | 645 | cleanup_vga_client: |
52a05c30 | 646 | vga_client_register(pdev, NULL, NULL, NULL); |
0673ad47 CW |
647 | out: |
648 | return ret; | |
649 | } | |
650 | ||
0673ad47 CW |
651 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
652 | { | |
653 | struct apertures_struct *ap; | |
91c8a326 | 654 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
655 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
656 | bool primary; | |
657 | int ret; | |
658 | ||
659 | ap = alloc_apertures(1); | |
660 | if (!ap) | |
661 | return -ENOMEM; | |
662 | ||
663 | ap->ranges[0].base = ggtt->mappable_base; | |
664 | ap->ranges[0].size = ggtt->mappable_end; | |
665 | ||
666 | primary = | |
667 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
668 | ||
44adece5 | 669 | ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
0673ad47 CW |
670 | |
671 | kfree(ap); | |
672 | ||
673 | return ret; | |
674 | } | |
0673ad47 CW |
675 | |
676 | #if !defined(CONFIG_VGA_CONSOLE) | |
677 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
678 | { | |
679 | return 0; | |
680 | } | |
681 | #elif !defined(CONFIG_DUMMY_CONSOLE) | |
682 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
683 | { | |
684 | return -ENODEV; | |
685 | } | |
686 | #else | |
687 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
688 | { | |
689 | int ret = 0; | |
690 | ||
691 | DRM_INFO("Replacing VGA console driver\n"); | |
692 | ||
693 | console_lock(); | |
694 | if (con_is_bound(&vga_con)) | |
695 | ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); | |
696 | if (ret == 0) { | |
697 | ret = do_unregister_con_driver(&vga_con); | |
698 | ||
699 | /* Ignore "already unregistered". */ | |
700 | if (ret == -ENODEV) | |
701 | ret = 0; | |
702 | } | |
703 | console_unlock(); | |
704 | ||
705 | return ret; | |
706 | } | |
707 | #endif | |
708 | ||
0673ad47 CW |
709 | static void intel_init_dpio(struct drm_i915_private *dev_priv) |
710 | { | |
711 | /* | |
712 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | |
713 | * CHV x1 PHY (DP/HDMI D) | |
714 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | |
715 | */ | |
716 | if (IS_CHERRYVIEW(dev_priv)) { | |
717 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | |
718 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | |
719 | } else if (IS_VALLEYVIEW(dev_priv)) { | |
720 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | |
721 | } | |
722 | } | |
723 | ||
724 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) | |
725 | { | |
726 | /* | |
727 | * The i915 workqueue is primarily used for batched retirement of | |
728 | * requests (and thus managing bo) once the task has been completed | |
729 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
730 | * need high-priority retirement, such as waiting for an explicit | |
731 | * bo. | |
732 | * | |
733 | * It is also used for periodic low-priority events, such as | |
734 | * idle-timers and recording error state. | |
735 | * | |
736 | * All tasks on the workqueue are expected to acquire the dev mutex | |
737 | * so there is no point in running more than one instance of the | |
738 | * workqueue at any time. Use an ordered one. | |
739 | */ | |
740 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
741 | if (dev_priv->wq == NULL) | |
742 | goto out_err; | |
743 | ||
744 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); | |
745 | if (dev_priv->hotplug.dp_wq == NULL) | |
746 | goto out_free_wq; | |
747 | ||
0673ad47 CW |
748 | return 0; |
749 | ||
0673ad47 CW |
750 | out_free_wq: |
751 | destroy_workqueue(dev_priv->wq); | |
752 | out_err: | |
753 | DRM_ERROR("Failed to allocate workqueues.\n"); | |
754 | ||
755 | return -ENOMEM; | |
756 | } | |
757 | ||
bb8f0f5a CW |
758 | static void i915_engines_cleanup(struct drm_i915_private *i915) |
759 | { | |
760 | struct intel_engine_cs *engine; | |
761 | enum intel_engine_id id; | |
762 | ||
763 | for_each_engine(engine, i915, id) | |
764 | kfree(engine); | |
765 | } | |
766 | ||
0673ad47 CW |
767 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) |
768 | { | |
0673ad47 CW |
769 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
770 | destroy_workqueue(dev_priv->wq); | |
771 | } | |
772 | ||
4fc7e845 PZ |
773 | /* |
774 | * We don't keep the workarounds for pre-production hardware, so we expect our | |
775 | * driver to fail on these machines in one way or another. A little warning on | |
776 | * dmesg may help both the user and the bug triagers. | |
777 | */ | |
778 | static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) | |
779 | { | |
248a124d CW |
780 | bool pre = false; |
781 | ||
782 | pre |= IS_HSW_EARLY_SDV(dev_priv); | |
783 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); | |
0102ba1f | 784 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
248a124d | 785 | |
7c5ff4a2 | 786 | if (pre) { |
4fc7e845 PZ |
787 | DRM_ERROR("This is a pre-production stepping. " |
788 | "It may not be fully functional.\n"); | |
7c5ff4a2 CW |
789 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); |
790 | } | |
4fc7e845 PZ |
791 | } |
792 | ||
0673ad47 CW |
793 | /** |
794 | * i915_driver_init_early - setup state not requiring device access | |
795 | * @dev_priv: device private | |
796 | * | |
797 | * Initialize everything that is a "SW-only" state, that is state not | |
798 | * requiring accessing the device or exposing the driver via kernel internal | |
799 | * or userspace interfaces. Example steps belonging here: lock initialization, | |
800 | * system memory allocation, setting up device specific attributes and | |
801 | * function hooks not requiring accessing the device. | |
802 | */ | |
803 | static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |
804 | const struct pci_device_id *ent) | |
805 | { | |
806 | const struct intel_device_info *match_info = | |
807 | (struct intel_device_info *)ent->driver_data; | |
808 | struct intel_device_info *device_info; | |
809 | int ret = 0; | |
810 | ||
811 | if (i915_inject_load_failure()) | |
812 | return -ENODEV; | |
813 | ||
814 | /* Setup the write-once "constant" device info */ | |
94b4f3ba | 815 | device_info = mkwrite_device_info(dev_priv); |
0673ad47 CW |
816 | memcpy(device_info, match_info, sizeof(*device_info)); |
817 | device_info->device_id = dev_priv->drm.pdev->device; | |
818 | ||
819 | BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); | |
820 | device_info->gen_mask = BIT(device_info->gen - 1); | |
821 | ||
822 | spin_lock_init(&dev_priv->irq_lock); | |
823 | spin_lock_init(&dev_priv->gpu_error.lock); | |
824 | mutex_init(&dev_priv->backlight_lock); | |
825 | spin_lock_init(&dev_priv->uncore.lock); | |
317eaa95 | 826 | |
0673ad47 CW |
827 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
828 | spin_lock_init(&dev_priv->mmio_flip_lock); | |
829 | mutex_init(&dev_priv->sb_lock); | |
830 | mutex_init(&dev_priv->modeset_restore_lock); | |
831 | mutex_init(&dev_priv->av_mutex); | |
832 | mutex_init(&dev_priv->wm.wm_mutex); | |
833 | mutex_init(&dev_priv->pps_mutex); | |
834 | ||
413e8fdb | 835 | intel_uc_init_early(dev_priv); |
0b1de5d5 CW |
836 | i915_memcpy_init_early(dev_priv); |
837 | ||
0673ad47 CW |
838 | ret = i915_workqueues_init(dev_priv); |
839 | if (ret < 0) | |
bb8f0f5a | 840 | goto err_engines; |
0673ad47 | 841 | |
0673ad47 | 842 | /* This must be called before any calls to HAS_PCH_* */ |
da5f53bf | 843 | intel_detect_pch(dev_priv); |
0673ad47 | 844 | |
192aa181 | 845 | intel_pm_setup(dev_priv); |
0673ad47 CW |
846 | intel_init_dpio(dev_priv); |
847 | intel_power_domains_init(dev_priv); | |
848 | intel_irq_init(dev_priv); | |
3ac168a7 | 849 | intel_hangcheck_init(dev_priv); |
0673ad47 CW |
850 | intel_init_display_hooks(dev_priv); |
851 | intel_init_clock_gating_hooks(dev_priv); | |
852 | intel_init_audio_hooks(dev_priv); | |
cb15d9f8 | 853 | ret = i915_gem_load_init(dev_priv); |
73cb9701 | 854 | if (ret < 0) |
cefcff8f | 855 | goto err_irq; |
0673ad47 | 856 | |
36cdd013 | 857 | intel_display_crc_init(dev_priv); |
0673ad47 | 858 | |
94b4f3ba | 859 | intel_device_info_dump(dev_priv); |
0673ad47 | 860 | |
4fc7e845 | 861 | intel_detect_preproduction_hw(dev_priv); |
0673ad47 | 862 | |
eec688e1 RB |
863 | i915_perf_init(dev_priv); |
864 | ||
0673ad47 CW |
865 | return 0; |
866 | ||
cefcff8f JL |
867 | err_irq: |
868 | intel_irq_fini(dev_priv); | |
0673ad47 | 869 | i915_workqueues_cleanup(dev_priv); |
bb8f0f5a CW |
870 | err_engines: |
871 | i915_engines_cleanup(dev_priv); | |
0673ad47 CW |
872 | return ret; |
873 | } | |
874 | ||
875 | /** | |
876 | * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() | |
877 | * @dev_priv: device private | |
878 | */ | |
879 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) | |
880 | { | |
eec688e1 | 881 | i915_perf_fini(dev_priv); |
cb15d9f8 | 882 | i915_gem_load_cleanup(dev_priv); |
cefcff8f | 883 | intel_irq_fini(dev_priv); |
0673ad47 | 884 | i915_workqueues_cleanup(dev_priv); |
bb8f0f5a | 885 | i915_engines_cleanup(dev_priv); |
0673ad47 CW |
886 | } |
887 | ||
da5f53bf | 888 | static int i915_mmio_setup(struct drm_i915_private *dev_priv) |
0673ad47 | 889 | { |
52a05c30 | 890 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
891 | int mmio_bar; |
892 | int mmio_size; | |
893 | ||
5db94019 | 894 | mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; |
0673ad47 CW |
895 | /* |
896 | * Before gen4, the registers and the GTT are behind different BARs. | |
897 | * However, from gen4 onwards, the registers and the GTT are shared | |
898 | * in the same BAR, so we want to restrict this ioremap from | |
899 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
900 | * the register BAR remains the same size for all the earlier | |
901 | * generations up to Ironlake. | |
902 | */ | |
514e1d64 | 903 | if (INTEL_GEN(dev_priv) < 5) |
0673ad47 CW |
904 | mmio_size = 512 * 1024; |
905 | else | |
906 | mmio_size = 2 * 1024 * 1024; | |
52a05c30 | 907 | dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
0673ad47 CW |
908 | if (dev_priv->regs == NULL) { |
909 | DRM_ERROR("failed to map registers\n"); | |
910 | ||
911 | return -EIO; | |
912 | } | |
913 | ||
914 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
da5f53bf | 915 | intel_setup_mchbar(dev_priv); |
0673ad47 CW |
916 | |
917 | return 0; | |
918 | } | |
919 | ||
da5f53bf | 920 | static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) |
0673ad47 | 921 | { |
52a05c30 | 922 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 923 | |
da5f53bf | 924 | intel_teardown_mchbar(dev_priv); |
52a05c30 | 925 | pci_iounmap(pdev, dev_priv->regs); |
0673ad47 CW |
926 | } |
927 | ||
928 | /** | |
929 | * i915_driver_init_mmio - setup device MMIO | |
930 | * @dev_priv: device private | |
931 | * | |
932 | * Setup minimal device state necessary for MMIO accesses later in the | |
933 | * initialization sequence. The setup here should avoid any other device-wide | |
934 | * side effects or exposing the driver via kernel internal or user space | |
935 | * interfaces. | |
936 | */ | |
937 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | |
938 | { | |
0673ad47 CW |
939 | int ret; |
940 | ||
941 | if (i915_inject_load_failure()) | |
942 | return -ENODEV; | |
943 | ||
da5f53bf | 944 | if (i915_get_bridge_dev(dev_priv)) |
0673ad47 CW |
945 | return -EIO; |
946 | ||
da5f53bf | 947 | ret = i915_mmio_setup(dev_priv); |
0673ad47 | 948 | if (ret < 0) |
63ffbcda | 949 | goto err_bridge; |
0673ad47 CW |
950 | |
951 | intel_uncore_init(dev_priv); | |
63ffbcda JL |
952 | |
953 | ret = intel_engines_init_mmio(dev_priv); | |
954 | if (ret) | |
955 | goto err_uncore; | |
956 | ||
24145517 | 957 | i915_gem_init_mmio(dev_priv); |
0673ad47 CW |
958 | |
959 | return 0; | |
960 | ||
63ffbcda JL |
961 | err_uncore: |
962 | intel_uncore_fini(dev_priv); | |
963 | err_bridge: | |
0673ad47 CW |
964 | pci_dev_put(dev_priv->bridge_dev); |
965 | ||
966 | return ret; | |
967 | } | |
968 | ||
969 | /** | |
970 | * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() | |
971 | * @dev_priv: device private | |
972 | */ | |
973 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | |
974 | { | |
0673ad47 | 975 | intel_uncore_fini(dev_priv); |
da5f53bf | 976 | i915_mmio_cleanup(dev_priv); |
0673ad47 CW |
977 | pci_dev_put(dev_priv->bridge_dev); |
978 | } | |
979 | ||
94b4f3ba CW |
980 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
981 | { | |
982 | i915.enable_execlists = | |
983 | intel_sanitize_enable_execlists(dev_priv, | |
984 | i915.enable_execlists); | |
985 | ||
986 | /* | |
987 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | |
988 | * user's requested state against the hardware/driver capabilities. We | |
989 | * do this now so that we can print out any log messages once rather | |
990 | * than every time we check intel_enable_ppgtt(). | |
991 | */ | |
992 | i915.enable_ppgtt = | |
993 | intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); | |
994 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); | |
39df9190 CW |
995 | |
996 | i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); | |
784f2f1a | 997 | DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); |
d2be9f2f AH |
998 | |
999 | intel_uc_sanitize_options(dev_priv); | |
67b7f33e CD |
1000 | |
1001 | intel_gvt_sanitize_options(dev_priv); | |
94b4f3ba CW |
1002 | } |
1003 | ||
0673ad47 CW |
1004 | /** |
1005 | * i915_driver_init_hw - setup state requiring device access | |
1006 | * @dev_priv: device private | |
1007 | * | |
1008 | * Setup state that requires accessing the device, but doesn't require | |
1009 | * exposing the driver via kernel internal or userspace interfaces. | |
1010 | */ | |
1011 | static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |
1012 | { | |
52a05c30 | 1013 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
1014 | int ret; |
1015 | ||
1016 | if (i915_inject_load_failure()) | |
1017 | return -ENODEV; | |
1018 | ||
94b4f3ba CW |
1019 | intel_device_info_runtime_init(dev_priv); |
1020 | ||
1021 | intel_sanitize_options(dev_priv); | |
0673ad47 | 1022 | |
97d6d7ab | 1023 | ret = i915_ggtt_probe_hw(dev_priv); |
0673ad47 CW |
1024 | if (ret) |
1025 | return ret; | |
1026 | ||
0673ad47 CW |
1027 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
1028 | * otherwise the vga fbdev driver falls over. */ | |
1029 | ret = i915_kick_out_firmware_fb(dev_priv); | |
1030 | if (ret) { | |
1031 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); | |
1032 | goto out_ggtt; | |
1033 | } | |
1034 | ||
1035 | ret = i915_kick_out_vgacon(dev_priv); | |
1036 | if (ret) { | |
1037 | DRM_ERROR("failed to remove conflicting VGA console\n"); | |
1038 | goto out_ggtt; | |
1039 | } | |
1040 | ||
97d6d7ab | 1041 | ret = i915_ggtt_init_hw(dev_priv); |
0088e522 CW |
1042 | if (ret) |
1043 | return ret; | |
1044 | ||
97d6d7ab | 1045 | ret = i915_ggtt_enable_hw(dev_priv); |
0088e522 CW |
1046 | if (ret) { |
1047 | DRM_ERROR("failed to enable GGTT\n"); | |
1048 | goto out_ggtt; | |
1049 | } | |
1050 | ||
52a05c30 | 1051 | pci_set_master(pdev); |
0673ad47 CW |
1052 | |
1053 | /* overlay on gen2 is broken and can't address above 1G */ | |
5db94019 | 1054 | if (IS_GEN2(dev_priv)) { |
52a05c30 | 1055 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); |
0673ad47 CW |
1056 | if (ret) { |
1057 | DRM_ERROR("failed to set DMA mask\n"); | |
1058 | ||
1059 | goto out_ggtt; | |
1060 | } | |
1061 | } | |
1062 | ||
0673ad47 CW |
1063 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1064 | * using 32bit addressing, overwriting memory if HWS is located | |
1065 | * above 4GB. | |
1066 | * | |
1067 | * The documentation also mentions an issue with undefined | |
1068 | * behaviour if any general state is accessed within a page above 4GB, | |
1069 | * which also needs to be handled carefully. | |
1070 | */ | |
c0f86832 | 1071 | if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { |
52a05c30 | 1072 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
0673ad47 CW |
1073 | |
1074 | if (ret) { | |
1075 | DRM_ERROR("failed to set DMA mask\n"); | |
1076 | ||
1077 | goto out_ggtt; | |
1078 | } | |
1079 | } | |
1080 | ||
0673ad47 CW |
1081 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, |
1082 | PM_QOS_DEFAULT_VALUE); | |
1083 | ||
1084 | intel_uncore_sanitize(dev_priv); | |
1085 | ||
1086 | intel_opregion_setup(dev_priv); | |
1087 | ||
1088 | i915_gem_load_init_fences(dev_priv); | |
1089 | ||
1090 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1091 | * integrated graphics even though the support isn't actually there | |
1092 | * according to the published specs. It doesn't appear to function | |
1093 | * correctly in testing on 945G. | |
1094 | * This may be a side effect of MSI having been made available for PEG | |
1095 | * and the registers being closely associated. | |
1096 | * | |
1097 | * According to chipset errata, on the 965GM, MSI interrupts may | |
1098 | * be lost or delayed, but we use them anyways to avoid | |
1099 | * stuck interrupts on some machines. | |
1100 | */ | |
50a0bc90 | 1101 | if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) { |
52a05c30 | 1102 | if (pci_enable_msi(pdev) < 0) |
0673ad47 CW |
1103 | DRM_DEBUG_DRIVER("can't enable MSI"); |
1104 | } | |
1105 | ||
26f837e8 ZW |
1106 | ret = intel_gvt_init(dev_priv); |
1107 | if (ret) | |
1108 | goto out_ggtt; | |
1109 | ||
0673ad47 CW |
1110 | return 0; |
1111 | ||
1112 | out_ggtt: | |
97d6d7ab | 1113 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1114 | |
1115 | return ret; | |
1116 | } | |
1117 | ||
1118 | /** | |
1119 | * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() | |
1120 | * @dev_priv: device private | |
1121 | */ | |
1122 | static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) | |
1123 | { | |
52a05c30 | 1124 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 1125 | |
52a05c30 DW |
1126 | if (pdev->msi_enabled) |
1127 | pci_disable_msi(pdev); | |
0673ad47 CW |
1128 | |
1129 | pm_qos_remove_request(&dev_priv->pm_qos); | |
97d6d7ab | 1130 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1131 | } |
1132 | ||
1133 | /** | |
1134 | * i915_driver_register - register the driver with the rest of the system | |
1135 | * @dev_priv: device private | |
1136 | * | |
1137 | * Perform any steps necessary to make the driver available via kernel | |
1138 | * internal or userspace interfaces. | |
1139 | */ | |
1140 | static void i915_driver_register(struct drm_i915_private *dev_priv) | |
1141 | { | |
91c8a326 | 1142 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 CW |
1143 | |
1144 | i915_gem_shrinker_init(dev_priv); | |
1145 | ||
1146 | /* | |
1147 | * Notify a valid surface after modesetting, | |
1148 | * when running inside a VM. | |
1149 | */ | |
1150 | if (intel_vgpu_active(dev_priv)) | |
1151 | I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); | |
1152 | ||
1153 | /* Reveal our presence to userspace */ | |
1154 | if (drm_dev_register(dev, 0) == 0) { | |
1155 | i915_debugfs_register(dev_priv); | |
f9cda048 | 1156 | i915_guc_log_register(dev_priv); |
694c2828 | 1157 | i915_setup_sysfs(dev_priv); |
442b8c06 RB |
1158 | |
1159 | /* Depends on sysfs having been initialized */ | |
1160 | i915_perf_register(dev_priv); | |
0673ad47 CW |
1161 | } else |
1162 | DRM_ERROR("Failed to register driver for userspace access!\n"); | |
1163 | ||
1164 | if (INTEL_INFO(dev_priv)->num_pipes) { | |
1165 | /* Must be done after probing outputs */ | |
1166 | intel_opregion_register(dev_priv); | |
1167 | acpi_video_register(); | |
1168 | } | |
1169 | ||
1170 | if (IS_GEN5(dev_priv)) | |
1171 | intel_gpu_ips_init(dev_priv); | |
1172 | ||
eef57324 | 1173 | intel_audio_init(dev_priv); |
0673ad47 CW |
1174 | |
1175 | /* | |
1176 | * Some ports require correctly set-up hpd registers for detection to | |
1177 | * work properly (leading to ghost connected connector status), e.g. VGA | |
1178 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
1179 | * irqs are fully enabled. We do it last so that the async config | |
1180 | * cannot run before the connectors are registered. | |
1181 | */ | |
1182 | intel_fbdev_initial_config_async(dev); | |
1183 | } | |
1184 | ||
1185 | /** | |
1186 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() | |
1187 | * @dev_priv: device private | |
1188 | */ | |
1189 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |
1190 | { | |
eef57324 | 1191 | intel_audio_deinit(dev_priv); |
0673ad47 CW |
1192 | |
1193 | intel_gpu_ips_teardown(); | |
1194 | acpi_video_unregister(); | |
1195 | intel_opregion_unregister(dev_priv); | |
1196 | ||
442b8c06 RB |
1197 | i915_perf_unregister(dev_priv); |
1198 | ||
694c2828 | 1199 | i915_teardown_sysfs(dev_priv); |
f9cda048 | 1200 | i915_guc_log_unregister(dev_priv); |
91c8a326 | 1201 | drm_dev_unregister(&dev_priv->drm); |
0673ad47 CW |
1202 | |
1203 | i915_gem_shrinker_cleanup(dev_priv); | |
1204 | } | |
1205 | ||
1206 | /** | |
1207 | * i915_driver_load - setup chip and create an initial config | |
d2ad3ae4 JL |
1208 | * @pdev: PCI device |
1209 | * @ent: matching PCI ID entry | |
0673ad47 CW |
1210 | * |
1211 | * The driver load routine has to do several things: | |
1212 | * - drive output discovery via intel_modeset_init() | |
1213 | * - initialize the memory manager | |
1214 | * - allocate initial config memory | |
1215 | * - setup the DRM framebuffer with the allocated memory | |
1216 | */ | |
42f5551d | 1217 | int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) |
0673ad47 | 1218 | { |
8d2b47dd ML |
1219 | const struct intel_device_info *match_info = |
1220 | (struct intel_device_info *)ent->driver_data; | |
0673ad47 CW |
1221 | struct drm_i915_private *dev_priv; |
1222 | int ret; | |
7d87a7f7 | 1223 | |
ff4c3b76 VS |
1224 | /* Enable nuclear pageflip on ILK+ */ |
1225 | if (!i915.nuclear_pageflip && match_info->gen < 5) | |
8d2b47dd | 1226 | driver.driver_features &= ~DRIVER_ATOMIC; |
a09d0ba1 | 1227 | |
0673ad47 CW |
1228 | ret = -ENOMEM; |
1229 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
1230 | if (dev_priv) | |
1231 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); | |
1232 | if (ret) { | |
87a6752c | 1233 | DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); |
cad3688f | 1234 | goto out_free; |
0673ad47 | 1235 | } |
72bbf0af | 1236 | |
0673ad47 CW |
1237 | dev_priv->drm.pdev = pdev; |
1238 | dev_priv->drm.dev_private = dev_priv; | |
719388e1 | 1239 | |
0673ad47 CW |
1240 | ret = pci_enable_device(pdev); |
1241 | if (ret) | |
cad3688f | 1242 | goto out_fini; |
1347f5b4 | 1243 | |
0673ad47 | 1244 | pci_set_drvdata(pdev, &dev_priv->drm); |
adfdf85d ID |
1245 | /* |
1246 | * Disable the system suspend direct complete optimization, which can | |
1247 | * leave the device suspended skipping the driver's suspend handlers | |
1248 | * if the device was already runtime suspended. This is needed due to | |
1249 | * the difference in our runtime and system suspend sequence and | |
1250 | * becaue the HDA driver may require us to enable the audio power | |
1251 | * domain during system suspend. | |
1252 | */ | |
1253 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | |
ef11bdb3 | 1254 | |
0673ad47 CW |
1255 | ret = i915_driver_init_early(dev_priv, ent); |
1256 | if (ret < 0) | |
1257 | goto out_pci_disable; | |
ef11bdb3 | 1258 | |
0673ad47 | 1259 | intel_runtime_pm_get(dev_priv); |
1da177e4 | 1260 | |
0673ad47 CW |
1261 | ret = i915_driver_init_mmio(dev_priv); |
1262 | if (ret < 0) | |
1263 | goto out_runtime_pm_put; | |
79e53945 | 1264 | |
0673ad47 CW |
1265 | ret = i915_driver_init_hw(dev_priv); |
1266 | if (ret < 0) | |
1267 | goto out_cleanup_mmio; | |
30c964a6 RB |
1268 | |
1269 | /* | |
0673ad47 CW |
1270 | * TODO: move the vblank init and parts of modeset init steps into one |
1271 | * of the i915_driver_init_/i915_driver_register functions according | |
1272 | * to the role/effect of the given init step. | |
30c964a6 | 1273 | */ |
0673ad47 | 1274 | if (INTEL_INFO(dev_priv)->num_pipes) { |
91c8a326 | 1275 | ret = drm_vblank_init(&dev_priv->drm, |
0673ad47 CW |
1276 | INTEL_INFO(dev_priv)->num_pipes); |
1277 | if (ret) | |
1278 | goto out_cleanup_hw; | |
30c964a6 RB |
1279 | } |
1280 | ||
91c8a326 | 1281 | ret = i915_load_modeset_init(&dev_priv->drm); |
0673ad47 CW |
1282 | if (ret < 0) |
1283 | goto out_cleanup_vblank; | |
1284 | ||
1285 | i915_driver_register(dev_priv); | |
1286 | ||
1287 | intel_runtime_pm_enable(dev_priv); | |
1288 | ||
a3a8986c MK |
1289 | dev_priv->ipc_enabled = false; |
1290 | ||
0525a062 CW |
1291 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
1292 | DRM_INFO("DRM_I915_DEBUG enabled\n"); | |
1293 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | |
1294 | DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); | |
bc5ca47c | 1295 | |
0673ad47 CW |
1296 | intel_runtime_pm_put(dev_priv); |
1297 | ||
1298 | return 0; | |
1299 | ||
1300 | out_cleanup_vblank: | |
91c8a326 | 1301 | drm_vblank_cleanup(&dev_priv->drm); |
0673ad47 CW |
1302 | out_cleanup_hw: |
1303 | i915_driver_cleanup_hw(dev_priv); | |
1304 | out_cleanup_mmio: | |
1305 | i915_driver_cleanup_mmio(dev_priv); | |
1306 | out_runtime_pm_put: | |
1307 | intel_runtime_pm_put(dev_priv); | |
1308 | i915_driver_cleanup_early(dev_priv); | |
1309 | out_pci_disable: | |
1310 | pci_disable_device(pdev); | |
cad3688f | 1311 | out_fini: |
0673ad47 | 1312 | i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); |
cad3688f CW |
1313 | drm_dev_fini(&dev_priv->drm); |
1314 | out_free: | |
1315 | kfree(dev_priv); | |
30c964a6 RB |
1316 | return ret; |
1317 | } | |
1318 | ||
42f5551d | 1319 | void i915_driver_unload(struct drm_device *dev) |
3bad0781 | 1320 | { |
fac5e23e | 1321 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1322 | struct pci_dev *pdev = dev_priv->drm.pdev; |
3bad0781 | 1323 | |
0673ad47 CW |
1324 | intel_fbdev_fini(dev); |
1325 | ||
bf9e8429 | 1326 | if (i915_gem_suspend(dev_priv)) |
42f5551d | 1327 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
ce1bb329 | 1328 | |
0673ad47 CW |
1329 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
1330 | ||
18dddadc | 1331 | drm_atomic_helper_shutdown(dev); |
a667fb40 | 1332 | |
26f837e8 ZW |
1333 | intel_gvt_cleanup(dev_priv); |
1334 | ||
0673ad47 CW |
1335 | i915_driver_unregister(dev_priv); |
1336 | ||
1337 | drm_vblank_cleanup(dev); | |
1338 | ||
1339 | intel_modeset_cleanup(dev); | |
1340 | ||
3bad0781 | 1341 | /* |
0673ad47 CW |
1342 | * free the memory space allocated for the child device |
1343 | * config parsed from VBT | |
3bad0781 | 1344 | */ |
0673ad47 CW |
1345 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1346 | kfree(dev_priv->vbt.child_dev); | |
1347 | dev_priv->vbt.child_dev = NULL; | |
1348 | dev_priv->vbt.child_dev_num = 0; | |
1349 | } | |
1350 | kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); | |
1351 | dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; | |
1352 | kfree(dev_priv->vbt.lfp_lvds_vbt_mode); | |
1353 | dev_priv->vbt.lfp_lvds_vbt_mode = NULL; | |
3bad0781 | 1354 | |
52a05c30 DW |
1355 | vga_switcheroo_unregister_client(pdev); |
1356 | vga_client_register(pdev, NULL, NULL, NULL); | |
bcdb72ac | 1357 | |
0673ad47 | 1358 | intel_csr_ucode_fini(dev_priv); |
bcdb72ac | 1359 | |
0673ad47 CW |
1360 | /* Free error state after interrupts are fully disabled. */ |
1361 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | |
5a4c6f1b | 1362 | i915_reset_error_state(dev_priv); |
0673ad47 CW |
1363 | |
1364 | /* Flush any outstanding unpin_work. */ | |
b7137e0c | 1365 | drain_workqueue(dev_priv->wq); |
0673ad47 | 1366 | |
fbbd37b3 | 1367 | i915_gem_fini(dev_priv); |
3950bf3d | 1368 | intel_uc_fini_fw(dev_priv); |
0673ad47 CW |
1369 | intel_fbc_cleanup_cfb(dev_priv); |
1370 | ||
1371 | intel_power_domains_fini(dev_priv); | |
1372 | ||
1373 | i915_driver_cleanup_hw(dev_priv); | |
1374 | i915_driver_cleanup_mmio(dev_priv); | |
1375 | ||
1376 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | |
cad3688f CW |
1377 | } |
1378 | ||
1379 | static void i915_driver_release(struct drm_device *dev) | |
1380 | { | |
1381 | struct drm_i915_private *dev_priv = to_i915(dev); | |
0673ad47 CW |
1382 | |
1383 | i915_driver_cleanup_early(dev_priv); | |
cad3688f CW |
1384 | drm_dev_fini(&dev_priv->drm); |
1385 | ||
1386 | kfree(dev_priv); | |
3bad0781 ZW |
1387 | } |
1388 | ||
0673ad47 | 1389 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2911a35b | 1390 | { |
0673ad47 | 1391 | int ret; |
2911a35b | 1392 | |
0673ad47 CW |
1393 | ret = i915_gem_open(dev, file); |
1394 | if (ret) | |
1395 | return ret; | |
2911a35b | 1396 | |
0673ad47 CW |
1397 | return 0; |
1398 | } | |
71386ef9 | 1399 | |
0673ad47 CW |
1400 | /** |
1401 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
1402 | * @dev: DRM device | |
1403 | * | |
1404 | * Take care of cleaning up after all DRM clients have exited. In the | |
1405 | * mode setting case, we want to restore the kernel's initial mode (just | |
1406 | * in case the last client left us in a bad state). | |
1407 | * | |
1408 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
1409 | * and DMA structures, since the kernel won't be using them, and clea | |
1410 | * up any GEM state. | |
1411 | */ | |
1412 | static void i915_driver_lastclose(struct drm_device *dev) | |
1413 | { | |
1414 | intel_fbdev_restore_mode(dev); | |
1415 | vga_switcheroo_process_delayed_switch(); | |
1416 | } | |
2911a35b | 1417 | |
7d2ec881 | 1418 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
0673ad47 | 1419 | { |
7d2ec881 DV |
1420 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1421 | ||
0673ad47 CW |
1422 | mutex_lock(&dev->struct_mutex); |
1423 | i915_gem_context_close(dev, file); | |
1424 | i915_gem_release(dev, file); | |
1425 | mutex_unlock(&dev->struct_mutex); | |
0673ad47 CW |
1426 | |
1427 | kfree(file_priv); | |
2911a35b BW |
1428 | } |
1429 | ||
07f9cd0b ID |
1430 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
1431 | { | |
91c8a326 | 1432 | struct drm_device *dev = &dev_priv->drm; |
19c8054c | 1433 | struct intel_encoder *encoder; |
07f9cd0b ID |
1434 | |
1435 | drm_modeset_lock_all(dev); | |
19c8054c JN |
1436 | for_each_intel_encoder(dev, encoder) |
1437 | if (encoder->suspend) | |
1438 | encoder->suspend(encoder); | |
07f9cd0b ID |
1439 | drm_modeset_unlock_all(dev); |
1440 | } | |
1441 | ||
1a5df187 PZ |
1442 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
1443 | bool rpm_resume); | |
507e126e | 1444 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
f75a1985 | 1445 | |
bc87229f ID |
1446 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
1447 | { | |
1448 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
1449 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
1450 | return true; | |
1451 | #endif | |
1452 | return false; | |
1453 | } | |
ebc32824 | 1454 | |
5e365c39 | 1455 | static int i915_drm_suspend(struct drm_device *dev) |
ba8bbcf6 | 1456 | { |
fac5e23e | 1457 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1458 | struct pci_dev *pdev = dev_priv->drm.pdev; |
e5747e3a | 1459 | pci_power_t opregion_target_state; |
d5818938 | 1460 | int error; |
61caf87c | 1461 | |
b8efb17b ZR |
1462 | /* ignore lid events during suspend */ |
1463 | mutex_lock(&dev_priv->modeset_restore_lock); | |
1464 | dev_priv->modeset_restore = MODESET_SUSPENDED; | |
1465 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
1466 | ||
1f814dac ID |
1467 | disable_rpm_wakeref_asserts(dev_priv); |
1468 | ||
c67a470b PZ |
1469 | /* We do a lot of poking in a lot of registers, make sure they work |
1470 | * properly. */ | |
da7e29bd | 1471 | intel_display_set_init_power(dev_priv, true); |
cb10799c | 1472 | |
5bcf719b DA |
1473 | drm_kms_helper_poll_disable(dev); |
1474 | ||
52a05c30 | 1475 | pci_save_state(pdev); |
ba8bbcf6 | 1476 | |
bf9e8429 | 1477 | error = i915_gem_suspend(dev_priv); |
d5818938 | 1478 | if (error) { |
52a05c30 | 1479 | dev_err(&pdev->dev, |
d5818938 | 1480 | "GEM idle failed, resume might fail\n"); |
1f814dac | 1481 | goto out; |
d5818938 | 1482 | } |
db1b76ca | 1483 | |
6b72d486 | 1484 | intel_display_suspend(dev); |
2eb5252e | 1485 | |
d5818938 | 1486 | intel_dp_mst_suspend(dev); |
7d708ee4 | 1487 | |
d5818938 DV |
1488 | intel_runtime_pm_disable_interrupts(dev_priv); |
1489 | intel_hpd_cancel_work(dev_priv); | |
09b64267 | 1490 | |
d5818938 | 1491 | intel_suspend_encoders(dev_priv); |
0e32b39c | 1492 | |
712bf364 | 1493 | intel_suspend_hw(dev_priv); |
5669fcac | 1494 | |
275a991c | 1495 | i915_gem_suspend_gtt_mappings(dev_priv); |
828c7908 | 1496 | |
af6dc742 | 1497 | i915_save_state(dev_priv); |
9e06dd39 | 1498 | |
bc87229f | 1499 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
6f9f4b7a | 1500 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
e5747e3a | 1501 | |
68f60946 | 1502 | intel_uncore_suspend(dev_priv); |
03d92e47 | 1503 | intel_opregion_unregister(dev_priv); |
8ee1c3db | 1504 | |
82e3b8c1 | 1505 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
3fa016a0 | 1506 | |
62d5d69b MK |
1507 | dev_priv->suspend_count++; |
1508 | ||
f74ed08d | 1509 | intel_csr_ucode_suspend(dev_priv); |
f514c2d8 | 1510 | |
1f814dac ID |
1511 | out: |
1512 | enable_rpm_wakeref_asserts(dev_priv); | |
1513 | ||
1514 | return error; | |
84b79f8d RW |
1515 | } |
1516 | ||
c49d13ee | 1517 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
c3c09c95 | 1518 | { |
c49d13ee | 1519 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1520 | struct pci_dev *pdev = dev_priv->drm.pdev; |
bc87229f | 1521 | bool fw_csr; |
c3c09c95 ID |
1522 | int ret; |
1523 | ||
1f814dac ID |
1524 | disable_rpm_wakeref_asserts(dev_priv); |
1525 | ||
4c494a57 ID |
1526 | intel_display_set_init_power(dev_priv, false); |
1527 | ||
b9fd799e | 1528 | fw_csr = !IS_GEN9_LP(dev_priv) && |
a7c8125f | 1529 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
bc87229f ID |
1530 | /* |
1531 | * In case of firmware assisted context save/restore don't manually | |
1532 | * deinit the power domains. This also means the CSR/DMC firmware will | |
1533 | * stay active, it will power down any HW resources as required and | |
1534 | * also enable deeper system power states that would be blocked if the | |
1535 | * firmware was inactive. | |
1536 | */ | |
1537 | if (!fw_csr) | |
1538 | intel_power_domains_suspend(dev_priv); | |
73dfc227 | 1539 | |
507e126e | 1540 | ret = 0; |
b9fd799e | 1541 | if (IS_GEN9_LP(dev_priv)) |
507e126e | 1542 | bxt_enable_dc9(dev_priv); |
b8aea3d1 | 1543 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
507e126e ID |
1544 | hsw_enable_pc8(dev_priv); |
1545 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
1546 | ret = vlv_suspend_complete(dev_priv); | |
c3c09c95 ID |
1547 | |
1548 | if (ret) { | |
1549 | DRM_ERROR("Suspend complete failed: %d\n", ret); | |
bc87229f ID |
1550 | if (!fw_csr) |
1551 | intel_power_domains_init_hw(dev_priv, true); | |
c3c09c95 | 1552 | |
1f814dac | 1553 | goto out; |
c3c09c95 ID |
1554 | } |
1555 | ||
52a05c30 | 1556 | pci_disable_device(pdev); |
ab3be73f | 1557 | /* |
54875571 | 1558 | * During hibernation on some platforms the BIOS may try to access |
ab3be73f ID |
1559 | * the device even though it's already in D3 and hang the machine. So |
1560 | * leave the device in D0 on those platforms and hope the BIOS will | |
54875571 ID |
1561 | * power down the device properly. The issue was seen on multiple old |
1562 | * GENs with different BIOS vendors, so having an explicit blacklist | |
1563 | * is inpractical; apply the workaround on everything pre GEN6. The | |
1564 | * platforms where the issue was seen: | |
1565 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
1566 | * Fujitsu FSC S7110 | |
1567 | * Acer Aspire 1830T | |
ab3be73f | 1568 | */ |
514e1d64 | 1569 | if (!(hibernation && INTEL_GEN(dev_priv) < 6)) |
52a05c30 | 1570 | pci_set_power_state(pdev, PCI_D3hot); |
c3c09c95 | 1571 | |
bc87229f ID |
1572 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
1573 | ||
1f814dac ID |
1574 | out: |
1575 | enable_rpm_wakeref_asserts(dev_priv); | |
1576 | ||
1577 | return ret; | |
c3c09c95 ID |
1578 | } |
1579 | ||
a9a251c2 | 1580 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
84b79f8d RW |
1581 | { |
1582 | int error; | |
1583 | ||
ded8b07d | 1584 | if (!dev) { |
84b79f8d RW |
1585 | DRM_ERROR("dev: %p\n", dev); |
1586 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | |
1587 | return -ENODEV; | |
1588 | } | |
1589 | ||
0b14cbd2 ID |
1590 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
1591 | state.event != PM_EVENT_FREEZE)) | |
1592 | return -EINVAL; | |
5bcf719b DA |
1593 | |
1594 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1595 | return 0; | |
6eecba33 | 1596 | |
5e365c39 | 1597 | error = i915_drm_suspend(dev); |
84b79f8d RW |
1598 | if (error) |
1599 | return error; | |
1600 | ||
ab3be73f | 1601 | return i915_drm_suspend_late(dev, false); |
ba8bbcf6 JB |
1602 | } |
1603 | ||
5e365c39 | 1604 | static int i915_drm_resume(struct drm_device *dev) |
76c4b250 | 1605 | { |
fac5e23e | 1606 | struct drm_i915_private *dev_priv = to_i915(dev); |
ac840ae5 | 1607 | int ret; |
9d49c0ef | 1608 | |
1f814dac | 1609 | disable_rpm_wakeref_asserts(dev_priv); |
abc80abd | 1610 | intel_sanitize_gt_powersave(dev_priv); |
1f814dac | 1611 | |
97d6d7ab | 1612 | ret = i915_ggtt_enable_hw(dev_priv); |
ac840ae5 VS |
1613 | if (ret) |
1614 | DRM_ERROR("failed to re-enable GGTT\n"); | |
1615 | ||
f74ed08d ID |
1616 | intel_csr_ucode_resume(dev_priv); |
1617 | ||
bf9e8429 | 1618 | i915_gem_resume(dev_priv); |
9d49c0ef | 1619 | |
af6dc742 | 1620 | i915_restore_state(dev_priv); |
8090ba8c | 1621 | intel_pps_unlock_regs_wa(dev_priv); |
6f9f4b7a | 1622 | intel_opregion_setup(dev_priv); |
61caf87c | 1623 | |
c39055b0 | 1624 | intel_init_pch_refclk(dev_priv); |
1833b134 | 1625 | |
364aece0 PA |
1626 | /* |
1627 | * Interrupts have to be enabled before any batches are run. If not the | |
1628 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
1629 | * update/restore the context. | |
1630 | * | |
908764f6 ID |
1631 | * drm_mode_config_reset() needs AUX interrupts. |
1632 | * | |
364aece0 PA |
1633 | * Modeset enabling in intel_modeset_init_hw() also needs working |
1634 | * interrupts. | |
1635 | */ | |
1636 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1637 | ||
908764f6 ID |
1638 | drm_mode_config_reset(dev); |
1639 | ||
d5818938 | 1640 | mutex_lock(&dev->struct_mutex); |
bf9e8429 | 1641 | if (i915_gem_init_hw(dev_priv)) { |
d5818938 | 1642 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
821ed7df | 1643 | i915_gem_set_wedged(dev_priv); |
d5818938 DV |
1644 | } |
1645 | mutex_unlock(&dev->struct_mutex); | |
226485e9 | 1646 | |
bf9e8429 | 1647 | intel_guc_resume(dev_priv); |
a1c41994 | 1648 | |
d5818938 | 1649 | intel_modeset_init_hw(dev); |
24576d23 | 1650 | |
d5818938 DV |
1651 | spin_lock_irq(&dev_priv->irq_lock); |
1652 | if (dev_priv->display.hpd_irq_setup) | |
91d14251 | 1653 | dev_priv->display.hpd_irq_setup(dev_priv); |
d5818938 | 1654 | spin_unlock_irq(&dev_priv->irq_lock); |
0e32b39c | 1655 | |
d5818938 | 1656 | intel_dp_mst_resume(dev); |
e7d6f7d7 | 1657 | |
a16b7658 L |
1658 | intel_display_resume(dev); |
1659 | ||
e0b70061 L |
1660 | drm_kms_helper_poll_enable(dev); |
1661 | ||
d5818938 DV |
1662 | /* |
1663 | * ... but also need to make sure that hotplug processing | |
1664 | * doesn't cause havoc. Like in the driver load code we don't | |
1665 | * bother with the tiny race here where we might loose hotplug | |
1666 | * notifications. | |
1667 | * */ | |
1668 | intel_hpd_init(dev_priv); | |
1daed3fb | 1669 | |
03d92e47 | 1670 | intel_opregion_register(dev_priv); |
44834a67 | 1671 | |
82e3b8c1 | 1672 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
073f34d9 | 1673 | |
b8efb17b ZR |
1674 | mutex_lock(&dev_priv->modeset_restore_lock); |
1675 | dev_priv->modeset_restore = MODESET_DONE; | |
1676 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
8a187455 | 1677 | |
6f9f4b7a | 1678 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
e5747e3a | 1679 | |
54b4f68f | 1680 | intel_autoenable_gt_powersave(dev_priv); |
ee6f280e | 1681 | |
1f814dac ID |
1682 | enable_rpm_wakeref_asserts(dev_priv); |
1683 | ||
074c6ada | 1684 | return 0; |
84b79f8d RW |
1685 | } |
1686 | ||
5e365c39 | 1687 | static int i915_drm_resume_early(struct drm_device *dev) |
84b79f8d | 1688 | { |
fac5e23e | 1689 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1690 | struct pci_dev *pdev = dev_priv->drm.pdev; |
44410cd0 | 1691 | int ret; |
36d61e67 | 1692 | |
76c4b250 ID |
1693 | /* |
1694 | * We have a resume ordering issue with the snd-hda driver also | |
1695 | * requiring our device to be power up. Due to the lack of a | |
1696 | * parent/child relationship we currently solve this with an early | |
1697 | * resume hook. | |
1698 | * | |
1699 | * FIXME: This should be solved with a special hdmi sink device or | |
1700 | * similar so that power domains can be employed. | |
1701 | */ | |
44410cd0 ID |
1702 | |
1703 | /* | |
1704 | * Note that we need to set the power state explicitly, since we | |
1705 | * powered off the device during freeze and the PCI core won't power | |
1706 | * it back up for us during thaw. Powering off the device during | |
1707 | * freeze is not a hard requirement though, and during the | |
1708 | * suspend/resume phases the PCI core makes sure we get here with the | |
1709 | * device powered on. So in case we change our freeze logic and keep | |
1710 | * the device powered we can also remove the following set power state | |
1711 | * call. | |
1712 | */ | |
52a05c30 | 1713 | ret = pci_set_power_state(pdev, PCI_D0); |
44410cd0 ID |
1714 | if (ret) { |
1715 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | |
1716 | goto out; | |
1717 | } | |
1718 | ||
1719 | /* | |
1720 | * Note that pci_enable_device() first enables any parent bridge | |
1721 | * device and only then sets the power state for this device. The | |
1722 | * bridge enabling is a nop though, since bridge devices are resumed | |
1723 | * first. The order of enabling power and enabling the device is | |
1724 | * imposed by the PCI core as described above, so here we preserve the | |
1725 | * same order for the freeze/thaw phases. | |
1726 | * | |
1727 | * TODO: eventually we should remove pci_disable_device() / | |
1728 | * pci_enable_enable_device() from suspend/resume. Due to how they | |
1729 | * depend on the device enable refcount we can't anyway depend on them | |
1730 | * disabling/enabling the device. | |
1731 | */ | |
52a05c30 | 1732 | if (pci_enable_device(pdev)) { |
bc87229f ID |
1733 | ret = -EIO; |
1734 | goto out; | |
1735 | } | |
84b79f8d | 1736 | |
52a05c30 | 1737 | pci_set_master(pdev); |
84b79f8d | 1738 | |
1f814dac ID |
1739 | disable_rpm_wakeref_asserts(dev_priv); |
1740 | ||
666a4537 | 1741 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1a5df187 | 1742 | ret = vlv_resume_prepare(dev_priv, false); |
36d61e67 | 1743 | if (ret) |
ff0b187f DL |
1744 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
1745 | ret); | |
36d61e67 | 1746 | |
68f60946 | 1747 | intel_uncore_resume_early(dev_priv); |
efee833a | 1748 | |
b9fd799e | 1749 | if (IS_GEN9_LP(dev_priv)) { |
da2f41d1 ID |
1750 | if (!dev_priv->suspended_to_idle) |
1751 | gen9_sanitize_dc_state(dev_priv); | |
507e126e | 1752 | bxt_disable_dc9(dev_priv); |
da2f41d1 | 1753 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
a9a6b73a | 1754 | hsw_disable_pc8(dev_priv); |
da2f41d1 | 1755 | } |
efee833a | 1756 | |
dc97997a | 1757 | intel_uncore_sanitize(dev_priv); |
bc87229f | 1758 | |
b9fd799e | 1759 | if (IS_GEN9_LP(dev_priv) || |
a7c8125f | 1760 | !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) |
bc87229f ID |
1761 | intel_power_domains_init_hw(dev_priv, true); |
1762 | ||
24145517 CW |
1763 | i915_gem_sanitize(dev_priv); |
1764 | ||
6e35e8ab ID |
1765 | enable_rpm_wakeref_asserts(dev_priv); |
1766 | ||
bc87229f ID |
1767 | out: |
1768 | dev_priv->suspended_to_idle = false; | |
36d61e67 ID |
1769 | |
1770 | return ret; | |
76c4b250 ID |
1771 | } |
1772 | ||
7f26cb88 | 1773 | static int i915_resume_switcheroo(struct drm_device *dev) |
76c4b250 | 1774 | { |
50a0072f | 1775 | int ret; |
76c4b250 | 1776 | |
097dd837 ID |
1777 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1778 | return 0; | |
1779 | ||
5e365c39 | 1780 | ret = i915_drm_resume_early(dev); |
50a0072f ID |
1781 | if (ret) |
1782 | return ret; | |
1783 | ||
5a17514e ID |
1784 | return i915_drm_resume(dev); |
1785 | } | |
1786 | ||
11ed50ec | 1787 | /** |
f3953dcb | 1788 | * i915_reset - reset chip after a hang |
df210574 | 1789 | * @dev_priv: device private to reset |
11ed50ec | 1790 | * |
780f262a CW |
1791 | * Reset the chip. Useful if a hang is detected. Marks the device as wedged |
1792 | * on failure. | |
11ed50ec | 1793 | * |
221fe799 CW |
1794 | * Caller must hold the struct_mutex. |
1795 | * | |
11ed50ec BG |
1796 | * Procedure is fairly simple: |
1797 | * - reset the chip using the reset reg | |
1798 | * - re-init context state | |
1799 | * - re-init hardware status page | |
1800 | * - re-init ring buffer | |
1801 | * - re-init interrupt state | |
1802 | * - re-init display | |
1803 | */ | |
780f262a | 1804 | void i915_reset(struct drm_i915_private *dev_priv) |
11ed50ec | 1805 | { |
d98c52cf | 1806 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
0573ed4a | 1807 | int ret; |
11ed50ec | 1808 | |
bf9e8429 | 1809 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
8c185eca | 1810 | GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); |
221fe799 | 1811 | |
8c185eca | 1812 | if (!test_bit(I915_RESET_HANDOFF, &error->flags)) |
780f262a | 1813 | return; |
11ed50ec | 1814 | |
d98c52cf | 1815 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
2e8f9d32 CW |
1816 | if (!i915_gem_unset_wedged(dev_priv)) |
1817 | goto wakeup; | |
1818 | ||
8af29b0c | 1819 | error->reset_count++; |
d98c52cf | 1820 | |
7b4d3a16 | 1821 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); |
4c965543 | 1822 | disable_irq(dev_priv->drm.irq); |
0e178aef CW |
1823 | ret = i915_gem_reset_prepare(dev_priv); |
1824 | if (ret) { | |
1825 | DRM_ERROR("GPU recovery failed\n"); | |
1826 | intel_gpu_reset(dev_priv, ALL_ENGINES); | |
1827 | goto error; | |
1828 | } | |
9e60ab03 | 1829 | |
dc97997a | 1830 | ret = intel_gpu_reset(dev_priv, ALL_ENGINES); |
0573ed4a | 1831 | if (ret) { |
804e59a8 CW |
1832 | if (ret != -ENODEV) |
1833 | DRM_ERROR("Failed to reset chip: %i\n", ret); | |
1834 | else | |
1835 | DRM_DEBUG_DRIVER("GPU reset disabled\n"); | |
d98c52cf | 1836 | goto error; |
11ed50ec BG |
1837 | } |
1838 | ||
d8027093 | 1839 | i915_gem_reset(dev_priv); |
1362b776 VS |
1840 | intel_overlay_reset(dev_priv); |
1841 | ||
11ed50ec BG |
1842 | /* Ok, now get things going again... */ |
1843 | ||
1844 | /* | |
1845 | * Everything depends on having the GTT running, so we need to start | |
1846 | * there. Fortunately we don't need to do this unless we reset the | |
1847 | * chip at a PCI level. | |
1848 | * | |
1849 | * Next we need to restore the context, but we don't use those | |
1850 | * yet either... | |
1851 | * | |
1852 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
1853 | * was running at the time of the reset (i.e. we weren't VT | |
1854 | * switched away). | |
1855 | */ | |
bf9e8429 | 1856 | ret = i915_gem_init_hw(dev_priv); |
33d30a9c DV |
1857 | if (ret) { |
1858 | DRM_ERROR("Failed hw init on reset %d\n", ret); | |
d98c52cf | 1859 | goto error; |
11ed50ec BG |
1860 | } |
1861 | ||
c2a126a4 CW |
1862 | i915_queue_hangcheck(dev_priv); |
1863 | ||
2e8f9d32 | 1864 | finish: |
8d613c53 | 1865 | i915_gem_reset_finish(dev_priv); |
4c965543 | 1866 | enable_irq(dev_priv->drm.irq); |
8c185eca | 1867 | |
2e8f9d32 | 1868 | wakeup: |
8c185eca CW |
1869 | clear_bit(I915_RESET_HANDOFF, &error->flags); |
1870 | wake_up_bit(&error->flags, I915_RESET_HANDOFF); | |
780f262a | 1871 | return; |
d98c52cf CW |
1872 | |
1873 | error: | |
821ed7df | 1874 | i915_gem_set_wedged(dev_priv); |
2e8f9d32 | 1875 | goto finish; |
11ed50ec BG |
1876 | } |
1877 | ||
c49d13ee | 1878 | static int i915_pm_suspend(struct device *kdev) |
112b715e | 1879 | { |
c49d13ee DW |
1880 | struct pci_dev *pdev = to_pci_dev(kdev); |
1881 | struct drm_device *dev = pci_get_drvdata(pdev); | |
112b715e | 1882 | |
c49d13ee DW |
1883 | if (!dev) { |
1884 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); | |
84b79f8d RW |
1885 | return -ENODEV; |
1886 | } | |
112b715e | 1887 | |
c49d13ee | 1888 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
5bcf719b DA |
1889 | return 0; |
1890 | ||
c49d13ee | 1891 | return i915_drm_suspend(dev); |
76c4b250 ID |
1892 | } |
1893 | ||
c49d13ee | 1894 | static int i915_pm_suspend_late(struct device *kdev) |
76c4b250 | 1895 | { |
c49d13ee | 1896 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 ID |
1897 | |
1898 | /* | |
c965d995 | 1899 | * We have a suspend ordering issue with the snd-hda driver also |
76c4b250 ID |
1900 | * requiring our device to be power up. Due to the lack of a |
1901 | * parent/child relationship we currently solve this with an late | |
1902 | * suspend hook. | |
1903 | * | |
1904 | * FIXME: This should be solved with a special hdmi sink device or | |
1905 | * similar so that power domains can be employed. | |
1906 | */ | |
c49d13ee | 1907 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
76c4b250 | 1908 | return 0; |
112b715e | 1909 | |
c49d13ee | 1910 | return i915_drm_suspend_late(dev, false); |
ab3be73f ID |
1911 | } |
1912 | ||
c49d13ee | 1913 | static int i915_pm_poweroff_late(struct device *kdev) |
ab3be73f | 1914 | { |
c49d13ee | 1915 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
ab3be73f | 1916 | |
c49d13ee | 1917 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
ab3be73f ID |
1918 | return 0; |
1919 | ||
c49d13ee | 1920 | return i915_drm_suspend_late(dev, true); |
cbda12d7 ZW |
1921 | } |
1922 | ||
c49d13ee | 1923 | static int i915_pm_resume_early(struct device *kdev) |
76c4b250 | 1924 | { |
c49d13ee | 1925 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 | 1926 | |
c49d13ee | 1927 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1928 | return 0; |
1929 | ||
c49d13ee | 1930 | return i915_drm_resume_early(dev); |
76c4b250 ID |
1931 | } |
1932 | ||
c49d13ee | 1933 | static int i915_pm_resume(struct device *kdev) |
cbda12d7 | 1934 | { |
c49d13ee | 1935 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
84b79f8d | 1936 | |
c49d13ee | 1937 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1938 | return 0; |
1939 | ||
c49d13ee | 1940 | return i915_drm_resume(dev); |
cbda12d7 ZW |
1941 | } |
1942 | ||
1f19ac2a | 1943 | /* freeze: before creating the hibernation_image */ |
c49d13ee | 1944 | static int i915_pm_freeze(struct device *kdev) |
1f19ac2a | 1945 | { |
6a800eab CW |
1946 | int ret; |
1947 | ||
1948 | ret = i915_pm_suspend(kdev); | |
1949 | if (ret) | |
1950 | return ret; | |
1951 | ||
1952 | ret = i915_gem_freeze(kdev_to_i915(kdev)); | |
1953 | if (ret) | |
1954 | return ret; | |
1955 | ||
1956 | return 0; | |
1f19ac2a CW |
1957 | } |
1958 | ||
c49d13ee | 1959 | static int i915_pm_freeze_late(struct device *kdev) |
1f19ac2a | 1960 | { |
461fb99c CW |
1961 | int ret; |
1962 | ||
c49d13ee | 1963 | ret = i915_pm_suspend_late(kdev); |
461fb99c CW |
1964 | if (ret) |
1965 | return ret; | |
1966 | ||
c49d13ee | 1967 | ret = i915_gem_freeze_late(kdev_to_i915(kdev)); |
461fb99c CW |
1968 | if (ret) |
1969 | return ret; | |
1970 | ||
1971 | return 0; | |
1f19ac2a CW |
1972 | } |
1973 | ||
1974 | /* thaw: called after creating the hibernation image, but before turning off. */ | |
c49d13ee | 1975 | static int i915_pm_thaw_early(struct device *kdev) |
1f19ac2a | 1976 | { |
c49d13ee | 1977 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
1978 | } |
1979 | ||
c49d13ee | 1980 | static int i915_pm_thaw(struct device *kdev) |
1f19ac2a | 1981 | { |
c49d13ee | 1982 | return i915_pm_resume(kdev); |
1f19ac2a CW |
1983 | } |
1984 | ||
1985 | /* restore: called after loading the hibernation image. */ | |
c49d13ee | 1986 | static int i915_pm_restore_early(struct device *kdev) |
1f19ac2a | 1987 | { |
c49d13ee | 1988 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
1989 | } |
1990 | ||
c49d13ee | 1991 | static int i915_pm_restore(struct device *kdev) |
1f19ac2a | 1992 | { |
c49d13ee | 1993 | return i915_pm_resume(kdev); |
1f19ac2a CW |
1994 | } |
1995 | ||
ddeea5b0 ID |
1996 | /* |
1997 | * Save all Gunit registers that may be lost after a D3 and a subsequent | |
1998 | * S0i[R123] transition. The list of registers needing a save/restore is | |
1999 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit | |
2000 | * registers in the following way: | |
2001 | * - Driver: saved/restored by the driver | |
2002 | * - Punit : saved/restored by the Punit firmware | |
2003 | * - No, w/o marking: no need to save/restore, since the register is R/O or | |
2004 | * used internally by the HW in a way that doesn't depend | |
2005 | * keeping the content across a suspend/resume. | |
2006 | * - Debug : used for debugging | |
2007 | * | |
2008 | * We save/restore all registers marked with 'Driver', with the following | |
2009 | * exceptions: | |
2010 | * - Registers out of use, including also registers marked with 'Debug'. | |
2011 | * These have no effect on the driver's operation, so we don't save/restore | |
2012 | * them to reduce the overhead. | |
2013 | * - Registers that are fully setup by an initialization function called from | |
2014 | * the resume path. For example many clock gating and RPS/RC6 registers. | |
2015 | * - Registers that provide the right functionality with their reset defaults. | |
2016 | * | |
2017 | * TODO: Except for registers that based on the above 3 criteria can be safely | |
2018 | * ignored, we save/restore all others, practically treating the HW context as | |
2019 | * a black-box for the driver. Further investigation is needed to reduce the | |
2020 | * saved/restored registers even further, by following the same 3 criteria. | |
2021 | */ | |
2022 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2023 | { | |
2024 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2025 | int i; | |
2026 | ||
2027 | /* GAM 0x4000-0x4770 */ | |
2028 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); | |
2029 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); | |
2030 | s->arb_mode = I915_READ(ARB_MODE); | |
2031 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); | |
2032 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); | |
2033 | ||
2034 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2035 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
ddeea5b0 ID |
2036 | |
2037 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); | |
b5f1c97f | 2038 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
ddeea5b0 ID |
2039 | |
2040 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); | |
2041 | s->ecochk = I915_READ(GAM_ECOCHK); | |
2042 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); | |
2043 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); | |
2044 | ||
2045 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); | |
2046 | ||
2047 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2048 | s->g3dctl = I915_READ(VLV_G3DCTL); | |
2049 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); | |
2050 | s->mbctl = I915_READ(GEN6_MBCTL); | |
2051 | ||
2052 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2053 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); | |
2054 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); | |
2055 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); | |
2056 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); | |
2057 | s->rstctl = I915_READ(GEN6_RSTCTL); | |
2058 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); | |
2059 | ||
2060 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2061 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); | |
2062 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); | |
2063 | s->rpdeuc = I915_READ(GEN6_RPDEUC); | |
2064 | s->ecobus = I915_READ(ECOBUS); | |
2065 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); | |
2066 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); | |
2067 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); | |
2068 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); | |
2069 | s->rcedata = I915_READ(VLV_RCEDATA); | |
2070 | s->spare2gh = I915_READ(VLV_SPAREG2H); | |
2071 | ||
2072 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2073 | s->gt_imr = I915_READ(GTIMR); | |
2074 | s->gt_ier = I915_READ(GTIER); | |
2075 | s->pm_imr = I915_READ(GEN6_PMIMR); | |
2076 | s->pm_ier = I915_READ(GEN6_PMIER); | |
2077 | ||
2078 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2079 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
ddeea5b0 ID |
2080 | |
2081 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2082 | s->tilectl = I915_READ(TILECTL); | |
2083 | s->gt_fifoctl = I915_READ(GTFIFOCTL); | |
2084 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2085 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2086 | s->pmwgicz = I915_READ(VLV_PMWGICZ); | |
2087 | ||
2088 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2089 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | |
2090 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | |
9c25210f | 2091 | s->pcbr = I915_READ(VLV_PCBR); |
ddeea5b0 ID |
2092 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
2093 | ||
2094 | /* | |
2095 | * Not saving any of: | |
2096 | * DFT, 0x9800-0x9EC0 | |
2097 | * SARB, 0xB000-0xB1FC | |
2098 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 | |
2099 | * PCI CFG | |
2100 | */ | |
2101 | } | |
2102 | ||
2103 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2104 | { | |
2105 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2106 | u32 val; | |
2107 | int i; | |
2108 | ||
2109 | /* GAM 0x4000-0x4770 */ | |
2110 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); | |
2111 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); | |
2112 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); | |
2113 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); | |
2114 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); | |
2115 | ||
2116 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2117 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
ddeea5b0 ID |
2118 | |
2119 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); | |
b5f1c97f | 2120 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
ddeea5b0 ID |
2121 | |
2122 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); | |
2123 | I915_WRITE(GAM_ECOCHK, s->ecochk); | |
2124 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); | |
2125 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); | |
2126 | ||
2127 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); | |
2128 | ||
2129 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2130 | I915_WRITE(VLV_G3DCTL, s->g3dctl); | |
2131 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); | |
2132 | I915_WRITE(GEN6_MBCTL, s->mbctl); | |
2133 | ||
2134 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2135 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); | |
2136 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); | |
2137 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); | |
2138 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); | |
2139 | I915_WRITE(GEN6_RSTCTL, s->rstctl); | |
2140 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); | |
2141 | ||
2142 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2143 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); | |
2144 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); | |
2145 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); | |
2146 | I915_WRITE(ECOBUS, s->ecobus); | |
2147 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); | |
2148 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); | |
2149 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); | |
2150 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); | |
2151 | I915_WRITE(VLV_RCEDATA, s->rcedata); | |
2152 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); | |
2153 | ||
2154 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2155 | I915_WRITE(GTIMR, s->gt_imr); | |
2156 | I915_WRITE(GTIER, s->gt_ier); | |
2157 | I915_WRITE(GEN6_PMIMR, s->pm_imr); | |
2158 | I915_WRITE(GEN6_PMIER, s->pm_ier); | |
2159 | ||
2160 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2161 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
ddeea5b0 ID |
2162 | |
2163 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2164 | I915_WRITE(TILECTL, s->tilectl); | |
2165 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); | |
2166 | /* | |
2167 | * Preserve the GT allow wake and GFX force clock bit, they are not | |
2168 | * be restored, as they are used to control the s0ix suspend/resume | |
2169 | * sequence by the caller. | |
2170 | */ | |
2171 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2172 | val &= VLV_GTLC_ALLOWWAKEREQ; | |
2173 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; | |
2174 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2175 | ||
2176 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2177 | val &= VLV_GFX_CLK_FORCE_ON_BIT; | |
2178 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2179 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2180 | ||
2181 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); | |
2182 | ||
2183 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2184 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | |
2185 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | |
9c25210f | 2186 | I915_WRITE(VLV_PCBR, s->pcbr); |
ddeea5b0 ID |
2187 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
2188 | } | |
2189 | ||
3dd14c04 CW |
2190 | static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, |
2191 | u32 mask, u32 val) | |
2192 | { | |
2193 | /* The HW does not like us polling for PW_STATUS frequently, so | |
2194 | * use the sleeping loop rather than risk the busy spin within | |
2195 | * intel_wait_for_register(). | |
2196 | * | |
2197 | * Transitioning between RC6 states should be at most 2ms (see | |
2198 | * valleyview_enable_rps) so use a 3ms timeout. | |
2199 | */ | |
2200 | return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, | |
2201 | 3); | |
2202 | } | |
2203 | ||
650ad970 ID |
2204 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
2205 | { | |
2206 | u32 val; | |
2207 | int err; | |
2208 | ||
650ad970 ID |
2209 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
2210 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2211 | if (force_on) | |
2212 | val |= VLV_GFX_CLK_FORCE_ON_BIT; | |
2213 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2214 | ||
2215 | if (!force_on) | |
2216 | return 0; | |
2217 | ||
c6ddc5f3 CW |
2218 | err = intel_wait_for_register(dev_priv, |
2219 | VLV_GTLC_SURVIVABILITY_REG, | |
2220 | VLV_GFX_CLK_STATUS_BIT, | |
2221 | VLV_GFX_CLK_STATUS_BIT, | |
2222 | 20); | |
650ad970 ID |
2223 | if (err) |
2224 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", | |
2225 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | |
2226 | ||
2227 | return err; | |
650ad970 ID |
2228 | } |
2229 | ||
ddeea5b0 ID |
2230 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
2231 | { | |
3dd14c04 | 2232 | u32 mask; |
ddeea5b0 | 2233 | u32 val; |
3dd14c04 | 2234 | int err; |
ddeea5b0 ID |
2235 | |
2236 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2237 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | |
2238 | if (allow) | |
2239 | val |= VLV_GTLC_ALLOWWAKEREQ; | |
2240 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2241 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | |
2242 | ||
3dd14c04 CW |
2243 | mask = VLV_GTLC_ALLOWWAKEACK; |
2244 | val = allow ? mask : 0; | |
2245 | ||
2246 | err = vlv_wait_for_pw_status(dev_priv, mask, val); | |
ddeea5b0 ID |
2247 | if (err) |
2248 | DRM_ERROR("timeout disabling GT waking\n"); | |
b2736695 | 2249 | |
ddeea5b0 | 2250 | return err; |
ddeea5b0 ID |
2251 | } |
2252 | ||
3dd14c04 CW |
2253 | static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
2254 | bool wait_for_on) | |
ddeea5b0 ID |
2255 | { |
2256 | u32 mask; | |
2257 | u32 val; | |
ddeea5b0 ID |
2258 | |
2259 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | |
2260 | val = wait_for_on ? mask : 0; | |
ddeea5b0 ID |
2261 | |
2262 | /* | |
2263 | * RC6 transitioning can be delayed up to 2 msec (see | |
2264 | * valleyview_enable_rps), use 3 msec for safety. | |
2265 | */ | |
3dd14c04 | 2266 | if (vlv_wait_for_pw_status(dev_priv, mask, val)) |
ddeea5b0 | 2267 | DRM_ERROR("timeout waiting for GT wells to go %s\n", |
87ad3212 | 2268 | onoff(wait_for_on)); |
ddeea5b0 ID |
2269 | } |
2270 | ||
2271 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | |
2272 | { | |
2273 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) | |
2274 | return; | |
2275 | ||
6fa283b0 | 2276 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
ddeea5b0 ID |
2277 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
2278 | } | |
2279 | ||
ebc32824 | 2280 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
ddeea5b0 ID |
2281 | { |
2282 | u32 mask; | |
2283 | int err; | |
2284 | ||
2285 | /* | |
2286 | * Bspec defines the following GT well on flags as debug only, so | |
2287 | * don't treat them as hard failures. | |
2288 | */ | |
3dd14c04 | 2289 | vlv_wait_for_gt_wells(dev_priv, false); |
ddeea5b0 ID |
2290 | |
2291 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | |
2292 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | |
2293 | ||
2294 | vlv_check_no_gt_access(dev_priv); | |
2295 | ||
2296 | err = vlv_force_gfx_clock(dev_priv, true); | |
2297 | if (err) | |
2298 | goto err1; | |
2299 | ||
2300 | err = vlv_allow_gt_wake(dev_priv, false); | |
2301 | if (err) | |
2302 | goto err2; | |
98711167 | 2303 | |
2d1fe073 | 2304 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2305 | vlv_save_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2306 | |
2307 | err = vlv_force_gfx_clock(dev_priv, false); | |
2308 | if (err) | |
2309 | goto err2; | |
2310 | ||
2311 | return 0; | |
2312 | ||
2313 | err2: | |
2314 | /* For safety always re-enable waking and disable gfx clock forcing */ | |
2315 | vlv_allow_gt_wake(dev_priv, true); | |
2316 | err1: | |
2317 | vlv_force_gfx_clock(dev_priv, false); | |
2318 | ||
2319 | return err; | |
2320 | } | |
2321 | ||
016970be SK |
2322 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
2323 | bool rpm_resume) | |
ddeea5b0 | 2324 | { |
ddeea5b0 ID |
2325 | int err; |
2326 | int ret; | |
2327 | ||
2328 | /* | |
2329 | * If any of the steps fail just try to continue, that's the best we | |
2330 | * can do at this point. Return the first error code (which will also | |
2331 | * leave RPM permanently disabled). | |
2332 | */ | |
2333 | ret = vlv_force_gfx_clock(dev_priv, true); | |
2334 | ||
2d1fe073 | 2335 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2336 | vlv_restore_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2337 | |
2338 | err = vlv_allow_gt_wake(dev_priv, true); | |
2339 | if (!ret) | |
2340 | ret = err; | |
2341 | ||
2342 | err = vlv_force_gfx_clock(dev_priv, false); | |
2343 | if (!ret) | |
2344 | ret = err; | |
2345 | ||
2346 | vlv_check_no_gt_access(dev_priv); | |
2347 | ||
7c108fd8 | 2348 | if (rpm_resume) |
46f16e63 | 2349 | intel_init_clock_gating(dev_priv); |
ddeea5b0 ID |
2350 | |
2351 | return ret; | |
2352 | } | |
2353 | ||
c49d13ee | 2354 | static int intel_runtime_suspend(struct device *kdev) |
8a187455 | 2355 | { |
c49d13ee | 2356 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2357 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2358 | struct drm_i915_private *dev_priv = to_i915(dev); |
0ab9cfeb | 2359 | int ret; |
8a187455 | 2360 | |
dc97997a | 2361 | if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) |
c6df39b5 ID |
2362 | return -ENODEV; |
2363 | ||
6772ffe0 | 2364 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
604effb7 ID |
2365 | return -ENODEV; |
2366 | ||
8a187455 PZ |
2367 | DRM_DEBUG_KMS("Suspending device\n"); |
2368 | ||
1f814dac ID |
2369 | disable_rpm_wakeref_asserts(dev_priv); |
2370 | ||
d6102977 ID |
2371 | /* |
2372 | * We are safe here against re-faults, since the fault handler takes | |
2373 | * an RPM reference. | |
2374 | */ | |
7c108fd8 | 2375 | i915_gem_runtime_suspend(dev_priv); |
d6102977 | 2376 | |
bf9e8429 | 2377 | intel_guc_suspend(dev_priv); |
a1c41994 | 2378 | |
2eb5252e | 2379 | intel_runtime_pm_disable_interrupts(dev_priv); |
b5478bcd | 2380 | |
507e126e | 2381 | ret = 0; |
b9fd799e | 2382 | if (IS_GEN9_LP(dev_priv)) { |
507e126e ID |
2383 | bxt_display_core_uninit(dev_priv); |
2384 | bxt_enable_dc9(dev_priv); | |
2385 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | |
2386 | hsw_enable_pc8(dev_priv); | |
2387 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | |
2388 | ret = vlv_suspend_complete(dev_priv); | |
2389 | } | |
2390 | ||
0ab9cfeb ID |
2391 | if (ret) { |
2392 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | |
b963291c | 2393 | intel_runtime_pm_enable_interrupts(dev_priv); |
0ab9cfeb | 2394 | |
1f814dac ID |
2395 | enable_rpm_wakeref_asserts(dev_priv); |
2396 | ||
0ab9cfeb ID |
2397 | return ret; |
2398 | } | |
a8a8bd54 | 2399 | |
68f60946 | 2400 | intel_uncore_suspend(dev_priv); |
1f814dac ID |
2401 | |
2402 | enable_rpm_wakeref_asserts(dev_priv); | |
2403 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); | |
55ec45c2 | 2404 | |
bc3b9346 | 2405 | if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
55ec45c2 MK |
2406 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
2407 | ||
8a187455 | 2408 | dev_priv->pm.suspended = true; |
1fb2362b KCA |
2409 | |
2410 | /* | |
c8a0bd42 PZ |
2411 | * FIXME: We really should find a document that references the arguments |
2412 | * used below! | |
1fb2362b | 2413 | */ |
6f9f4b7a | 2414 | if (IS_BROADWELL(dev_priv)) { |
d37ae19a PZ |
2415 | /* |
2416 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
2417 | * being detected, and the call we do at intel_runtime_resume() | |
2418 | * won't be able to restore them. Since PCI_D3hot matches the | |
2419 | * actual specification and appears to be working, use it. | |
2420 | */ | |
6f9f4b7a | 2421 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
d37ae19a | 2422 | } else { |
c8a0bd42 PZ |
2423 | /* |
2424 | * current versions of firmware which depend on this opregion | |
2425 | * notification have repurposed the D1 definition to mean | |
2426 | * "runtime suspended" vs. what you would normally expect (D3) | |
2427 | * to distinguish it from notifications that might be sent via | |
2428 | * the suspend path. | |
2429 | */ | |
6f9f4b7a | 2430 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
c8a0bd42 | 2431 | } |
8a187455 | 2432 | |
59bad947 | 2433 | assert_forcewakes_inactive(dev_priv); |
dc9fb09c | 2434 | |
21d6e0bd | 2435 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
19625e85 L |
2436 | intel_hpd_poll_init(dev_priv); |
2437 | ||
a8a8bd54 | 2438 | DRM_DEBUG_KMS("Device suspended\n"); |
8a187455 PZ |
2439 | return 0; |
2440 | } | |
2441 | ||
c49d13ee | 2442 | static int intel_runtime_resume(struct device *kdev) |
8a187455 | 2443 | { |
c49d13ee | 2444 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2445 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2446 | struct drm_i915_private *dev_priv = to_i915(dev); |
1a5df187 | 2447 | int ret = 0; |
8a187455 | 2448 | |
6772ffe0 | 2449 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
604effb7 | 2450 | return -ENODEV; |
8a187455 PZ |
2451 | |
2452 | DRM_DEBUG_KMS("Resuming device\n"); | |
2453 | ||
1f814dac ID |
2454 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
2455 | disable_rpm_wakeref_asserts(dev_priv); | |
2456 | ||
6f9f4b7a | 2457 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
8a187455 | 2458 | dev_priv->pm.suspended = false; |
55ec45c2 MK |
2459 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
2460 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | |
8a187455 | 2461 | |
bf9e8429 | 2462 | intel_guc_resume(dev_priv); |
a1c41994 | 2463 | |
1a5df187 | 2464 | if (IS_GEN6(dev_priv)) |
c39055b0 | 2465 | intel_init_pch_refclk(dev_priv); |
31335cec | 2466 | |
b9fd799e | 2467 | if (IS_GEN9_LP(dev_priv)) { |
507e126e ID |
2468 | bxt_disable_dc9(dev_priv); |
2469 | bxt_display_core_init(dev_priv, true); | |
f62c79b3 ID |
2470 | if (dev_priv->csr.dmc_payload && |
2471 | (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) | |
2472 | gen9_enable_dc5(dev_priv); | |
507e126e | 2473 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
1a5df187 | 2474 | hsw_disable_pc8(dev_priv); |
507e126e | 2475 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1a5df187 | 2476 | ret = vlv_resume_prepare(dev_priv, true); |
507e126e | 2477 | } |
1a5df187 | 2478 | |
0ab9cfeb ID |
2479 | /* |
2480 | * No point of rolling back things in case of an error, as the best | |
2481 | * we can do is to hope that things will still work (and disable RPM). | |
2482 | */ | |
c6be607a | 2483 | i915_gem_init_swizzling(dev_priv); |
83bf6d55 | 2484 | i915_gem_restore_fences(dev_priv); |
92b806d3 | 2485 | |
b963291c | 2486 | intel_runtime_pm_enable_interrupts(dev_priv); |
08d8a232 VS |
2487 | |
2488 | /* | |
2489 | * On VLV/CHV display interrupts are part of the display | |
2490 | * power well, so hpd is reinitialized from there. For | |
2491 | * everyone else do it here. | |
2492 | */ | |
666a4537 | 2493 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
08d8a232 VS |
2494 | intel_hpd_init(dev_priv); |
2495 | ||
1f814dac ID |
2496 | enable_rpm_wakeref_asserts(dev_priv); |
2497 | ||
0ab9cfeb ID |
2498 | if (ret) |
2499 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | |
2500 | else | |
2501 | DRM_DEBUG_KMS("Device resumed\n"); | |
2502 | ||
2503 | return ret; | |
8a187455 PZ |
2504 | } |
2505 | ||
42f5551d | 2506 | const struct dev_pm_ops i915_pm_ops = { |
5545dbbf ID |
2507 | /* |
2508 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
2509 | * PMSG_RESUME] | |
2510 | */ | |
0206e353 | 2511 | .suspend = i915_pm_suspend, |
76c4b250 ID |
2512 | .suspend_late = i915_pm_suspend_late, |
2513 | .resume_early = i915_pm_resume_early, | |
0206e353 | 2514 | .resume = i915_pm_resume, |
5545dbbf ID |
2515 | |
2516 | /* | |
2517 | * S4 event handlers | |
2518 | * @freeze, @freeze_late : called (1) before creating the | |
2519 | * hibernation image [PMSG_FREEZE] and | |
2520 | * (2) after rebooting, before restoring | |
2521 | * the image [PMSG_QUIESCE] | |
2522 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
2523 | * image, before writing it [PMSG_THAW] | |
2524 | * and (2) after failing to create or | |
2525 | * restore the image [PMSG_RECOVER] | |
2526 | * @poweroff, @poweroff_late: called after writing the hibernation | |
2527 | * image, before rebooting [PMSG_HIBERNATE] | |
2528 | * @restore, @restore_early : called after rebooting and restoring the | |
2529 | * hibernation image [PMSG_RESTORE] | |
2530 | */ | |
1f19ac2a CW |
2531 | .freeze = i915_pm_freeze, |
2532 | .freeze_late = i915_pm_freeze_late, | |
2533 | .thaw_early = i915_pm_thaw_early, | |
2534 | .thaw = i915_pm_thaw, | |
36d61e67 | 2535 | .poweroff = i915_pm_suspend, |
ab3be73f | 2536 | .poweroff_late = i915_pm_poweroff_late, |
1f19ac2a CW |
2537 | .restore_early = i915_pm_restore_early, |
2538 | .restore = i915_pm_restore, | |
5545dbbf ID |
2539 | |
2540 | /* S0ix (via runtime suspend) event handlers */ | |
97bea207 PZ |
2541 | .runtime_suspend = intel_runtime_suspend, |
2542 | .runtime_resume = intel_runtime_resume, | |
cbda12d7 ZW |
2543 | }; |
2544 | ||
78b68556 | 2545 | static const struct vm_operations_struct i915_gem_vm_ops = { |
de151cf6 | 2546 | .fault = i915_gem_fault, |
ab00b3e5 JB |
2547 | .open = drm_gem_vm_open, |
2548 | .close = drm_gem_vm_close, | |
de151cf6 JB |
2549 | }; |
2550 | ||
e08e96de AV |
2551 | static const struct file_operations i915_driver_fops = { |
2552 | .owner = THIS_MODULE, | |
2553 | .open = drm_open, | |
2554 | .release = drm_release, | |
2555 | .unlocked_ioctl = drm_ioctl, | |
2556 | .mmap = drm_gem_mmap, | |
2557 | .poll = drm_poll, | |
e08e96de | 2558 | .read = drm_read, |
e08e96de | 2559 | .compat_ioctl = i915_compat_ioctl, |
e08e96de AV |
2560 | .llseek = noop_llseek, |
2561 | }; | |
2562 | ||
0673ad47 CW |
2563 | static int |
2564 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | |
2565 | struct drm_file *file) | |
2566 | { | |
2567 | return -ENODEV; | |
2568 | } | |
2569 | ||
2570 | static const struct drm_ioctl_desc i915_ioctls[] = { | |
2571 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2572 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | |
2573 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), | |
2574 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), | |
2575 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), | |
2576 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), | |
2577 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), | |
2578 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2579 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
2580 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
2581 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2582 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), | |
2583 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2584 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2585 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), | |
2586 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), | |
2587 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2588 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2589 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | |
fec0445c | 2590 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), |
0673ad47 CW |
2591 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
2592 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
2593 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2594 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), | |
2595 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), | |
2596 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2597 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2598 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2599 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), | |
2600 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), | |
2601 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), | |
2602 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), | |
2603 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), | |
2604 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), | |
2605 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), | |
111dbcab CW |
2606 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), |
2607 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), | |
0673ad47 CW |
2608 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), |
2609 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | |
2610 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), | |
2611 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2612 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2613 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2614 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2615 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2616 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), | |
2617 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), | |
2618 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), | |
2619 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), | |
2620 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | |
2621 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | |
2622 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | |
eec688e1 | 2623 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
2624 | }; |
2625 | ||
1da177e4 | 2626 | static struct drm_driver driver = { |
0c54781b MW |
2627 | /* Don't use MTRRs here; the Xserver or userspace app should |
2628 | * deal with them for Intel hardware. | |
792d2b9a | 2629 | */ |
673a394b | 2630 | .driver_features = |
10ba5012 | 2631 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
8d2b47dd | 2632 | DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC, |
cad3688f | 2633 | .release = i915_driver_release, |
673a394b | 2634 | .open = i915_driver_open, |
22eae947 | 2635 | .lastclose = i915_driver_lastclose, |
673a394b | 2636 | .postclose = i915_driver_postclose, |
915b4d11 | 2637 | .set_busid = drm_pci_set_busid, |
d8e29209 | 2638 | |
b1f788c6 | 2639 | .gem_close_object = i915_gem_close_object, |
f0cd5182 | 2640 | .gem_free_object_unlocked = i915_gem_free_object, |
de151cf6 | 2641 | .gem_vm_ops = &i915_gem_vm_ops, |
1286ff73 DV |
2642 | |
2643 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
2644 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
2645 | .gem_prime_export = i915_gem_prime_export, | |
2646 | .gem_prime_import = i915_gem_prime_import, | |
2647 | ||
ff72145b | 2648 | .dumb_create = i915_gem_dumb_create, |
da6b51d0 | 2649 | .dumb_map_offset = i915_gem_mmap_gtt, |
43387b37 | 2650 | .dumb_destroy = drm_gem_dumb_destroy, |
1da177e4 | 2651 | .ioctls = i915_ioctls, |
0673ad47 | 2652 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
e08e96de | 2653 | .fops = &i915_driver_fops, |
22eae947 DA |
2654 | .name = DRIVER_NAME, |
2655 | .desc = DRIVER_DESC, | |
2656 | .date = DRIVER_DATE, | |
2657 | .major = DRIVER_MAJOR, | |
2658 | .minor = DRIVER_MINOR, | |
2659 | .patchlevel = DRIVER_PATCHLEVEL, | |
1da177e4 | 2660 | }; |
66d9cb5d CW |
2661 | |
2662 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
2663 | #include "selftests/mock_drm.c" | |
2664 | #endif |