]>
Commit | Line | Data |
---|---|---|
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- | |
2 | */ | |
3 | /* | |
4 | * | |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
6 | * All Rights Reserved. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
28 | */ | |
29 | ||
30 | #include <linux/device.h> | |
31 | #include <linux/acpi.h> | |
32 | #include <drm/drmP.h> | |
33 | #include <drm/i915_drm.h> | |
34 | #include "i915_drv.h" | |
35 | #include "i915_trace.h" | |
36 | #include "intel_drv.h" | |
37 | ||
38 | #include <linux/console.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/pm_runtime.h> | |
41 | #include <drm/drm_crtc_helper.h> | |
42 | ||
43 | static struct drm_driver driver; | |
44 | ||
45 | #define GEN_DEFAULT_PIPEOFFSETS \ | |
46 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ | |
47 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ | |
48 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | |
49 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ | |
50 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } | |
51 | ||
52 | #define GEN_CHV_PIPEOFFSETS \ | |
53 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ | |
54 | CHV_PIPE_C_OFFSET }, \ | |
55 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | |
56 | CHV_TRANSCODER_C_OFFSET, }, \ | |
57 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ | |
58 | CHV_PALETTE_C_OFFSET } | |
59 | ||
60 | #define CURSOR_OFFSETS \ | |
61 | .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } | |
62 | ||
63 | #define IVB_CURSOR_OFFSETS \ | |
64 | .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } | |
65 | ||
66 | static const struct intel_device_info intel_i830_info = { | |
67 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, | |
68 | .has_overlay = 1, .overlay_needs_physical = 1, | |
69 | .ring_mask = RENDER_RING, | |
70 | GEN_DEFAULT_PIPEOFFSETS, | |
71 | CURSOR_OFFSETS, | |
72 | }; | |
73 | ||
74 | static const struct intel_device_info intel_845g_info = { | |
75 | .gen = 2, .num_pipes = 1, | |
76 | .has_overlay = 1, .overlay_needs_physical = 1, | |
77 | .ring_mask = RENDER_RING, | |
78 | GEN_DEFAULT_PIPEOFFSETS, | |
79 | CURSOR_OFFSETS, | |
80 | }; | |
81 | ||
82 | static const struct intel_device_info intel_i85x_info = { | |
83 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, | |
84 | .cursor_needs_physical = 1, | |
85 | .has_overlay = 1, .overlay_needs_physical = 1, | |
86 | .has_fbc = 1, | |
87 | .ring_mask = RENDER_RING, | |
88 | GEN_DEFAULT_PIPEOFFSETS, | |
89 | CURSOR_OFFSETS, | |
90 | }; | |
91 | ||
92 | static const struct intel_device_info intel_i865g_info = { | |
93 | .gen = 2, .num_pipes = 1, | |
94 | .has_overlay = 1, .overlay_needs_physical = 1, | |
95 | .ring_mask = RENDER_RING, | |
96 | GEN_DEFAULT_PIPEOFFSETS, | |
97 | CURSOR_OFFSETS, | |
98 | }; | |
99 | ||
100 | static const struct intel_device_info intel_i915g_info = { | |
101 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, | |
102 | .has_overlay = 1, .overlay_needs_physical = 1, | |
103 | .ring_mask = RENDER_RING, | |
104 | GEN_DEFAULT_PIPEOFFSETS, | |
105 | CURSOR_OFFSETS, | |
106 | }; | |
107 | static const struct intel_device_info intel_i915gm_info = { | |
108 | .gen = 3, .is_mobile = 1, .num_pipes = 2, | |
109 | .cursor_needs_physical = 1, | |
110 | .has_overlay = 1, .overlay_needs_physical = 1, | |
111 | .supports_tv = 1, | |
112 | .has_fbc = 1, | |
113 | .ring_mask = RENDER_RING, | |
114 | GEN_DEFAULT_PIPEOFFSETS, | |
115 | CURSOR_OFFSETS, | |
116 | }; | |
117 | static const struct intel_device_info intel_i945g_info = { | |
118 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, | |
119 | .has_overlay = 1, .overlay_needs_physical = 1, | |
120 | .ring_mask = RENDER_RING, | |
121 | GEN_DEFAULT_PIPEOFFSETS, | |
122 | CURSOR_OFFSETS, | |
123 | }; | |
124 | static const struct intel_device_info intel_i945gm_info = { | |
125 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, | |
126 | .has_hotplug = 1, .cursor_needs_physical = 1, | |
127 | .has_overlay = 1, .overlay_needs_physical = 1, | |
128 | .supports_tv = 1, | |
129 | .has_fbc = 1, | |
130 | .ring_mask = RENDER_RING, | |
131 | GEN_DEFAULT_PIPEOFFSETS, | |
132 | CURSOR_OFFSETS, | |
133 | }; | |
134 | ||
135 | static const struct intel_device_info intel_i965g_info = { | |
136 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, | |
137 | .has_hotplug = 1, | |
138 | .has_overlay = 1, | |
139 | .ring_mask = RENDER_RING, | |
140 | GEN_DEFAULT_PIPEOFFSETS, | |
141 | CURSOR_OFFSETS, | |
142 | }; | |
143 | ||
144 | static const struct intel_device_info intel_i965gm_info = { | |
145 | .gen = 4, .is_crestline = 1, .num_pipes = 2, | |
146 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, | |
147 | .has_overlay = 1, | |
148 | .supports_tv = 1, | |
149 | .ring_mask = RENDER_RING, | |
150 | GEN_DEFAULT_PIPEOFFSETS, | |
151 | CURSOR_OFFSETS, | |
152 | }; | |
153 | ||
154 | static const struct intel_device_info intel_g33_info = { | |
155 | .gen = 3, .is_g33 = 1, .num_pipes = 2, | |
156 | .need_gfx_hws = 1, .has_hotplug = 1, | |
157 | .has_overlay = 1, | |
158 | .ring_mask = RENDER_RING, | |
159 | GEN_DEFAULT_PIPEOFFSETS, | |
160 | CURSOR_OFFSETS, | |
161 | }; | |
162 | ||
163 | static const struct intel_device_info intel_g45_info = { | |
164 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, | |
165 | .has_pipe_cxsr = 1, .has_hotplug = 1, | |
166 | .ring_mask = RENDER_RING | BSD_RING, | |
167 | GEN_DEFAULT_PIPEOFFSETS, | |
168 | CURSOR_OFFSETS, | |
169 | }; | |
170 | ||
171 | static const struct intel_device_info intel_gm45_info = { | |
172 | .gen = 4, .is_g4x = 1, .num_pipes = 2, | |
173 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, | |
174 | .has_pipe_cxsr = 1, .has_hotplug = 1, | |
175 | .supports_tv = 1, | |
176 | .ring_mask = RENDER_RING | BSD_RING, | |
177 | GEN_DEFAULT_PIPEOFFSETS, | |
178 | CURSOR_OFFSETS, | |
179 | }; | |
180 | ||
181 | static const struct intel_device_info intel_pineview_info = { | |
182 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, | |
183 | .need_gfx_hws = 1, .has_hotplug = 1, | |
184 | .has_overlay = 1, | |
185 | GEN_DEFAULT_PIPEOFFSETS, | |
186 | CURSOR_OFFSETS, | |
187 | }; | |
188 | ||
189 | static const struct intel_device_info intel_ironlake_d_info = { | |
190 | .gen = 5, .num_pipes = 2, | |
191 | .need_gfx_hws = 1, .has_hotplug = 1, | |
192 | .ring_mask = RENDER_RING | BSD_RING, | |
193 | GEN_DEFAULT_PIPEOFFSETS, | |
194 | CURSOR_OFFSETS, | |
195 | }; | |
196 | ||
197 | static const struct intel_device_info intel_ironlake_m_info = { | |
198 | .gen = 5, .is_mobile = 1, .num_pipes = 2, | |
199 | .need_gfx_hws = 1, .has_hotplug = 1, | |
200 | .has_fbc = 1, | |
201 | .ring_mask = RENDER_RING | BSD_RING, | |
202 | GEN_DEFAULT_PIPEOFFSETS, | |
203 | CURSOR_OFFSETS, | |
204 | }; | |
205 | ||
206 | static const struct intel_device_info intel_sandybridge_d_info = { | |
207 | .gen = 6, .num_pipes = 2, | |
208 | .need_gfx_hws = 1, .has_hotplug = 1, | |
209 | .has_fbc = 1, | |
210 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, | |
211 | .has_llc = 1, | |
212 | GEN_DEFAULT_PIPEOFFSETS, | |
213 | CURSOR_OFFSETS, | |
214 | }; | |
215 | ||
216 | static const struct intel_device_info intel_sandybridge_m_info = { | |
217 | .gen = 6, .is_mobile = 1, .num_pipes = 2, | |
218 | .need_gfx_hws = 1, .has_hotplug = 1, | |
219 | .has_fbc = 1, | |
220 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, | |
221 | .has_llc = 1, | |
222 | GEN_DEFAULT_PIPEOFFSETS, | |
223 | CURSOR_OFFSETS, | |
224 | }; | |
225 | ||
226 | #define GEN7_FEATURES \ | |
227 | .gen = 7, .num_pipes = 3, \ | |
228 | .need_gfx_hws = 1, .has_hotplug = 1, \ | |
229 | .has_fbc = 1, \ | |
230 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | |
231 | .has_llc = 1 | |
232 | ||
233 | static const struct intel_device_info intel_ivybridge_d_info = { | |
234 | GEN7_FEATURES, | |
235 | .is_ivybridge = 1, | |
236 | GEN_DEFAULT_PIPEOFFSETS, | |
237 | IVB_CURSOR_OFFSETS, | |
238 | }; | |
239 | ||
240 | static const struct intel_device_info intel_ivybridge_m_info = { | |
241 | GEN7_FEATURES, | |
242 | .is_ivybridge = 1, | |
243 | .is_mobile = 1, | |
244 | GEN_DEFAULT_PIPEOFFSETS, | |
245 | IVB_CURSOR_OFFSETS, | |
246 | }; | |
247 | ||
248 | static const struct intel_device_info intel_ivybridge_q_info = { | |
249 | GEN7_FEATURES, | |
250 | .is_ivybridge = 1, | |
251 | .num_pipes = 0, /* legal, last one wins */ | |
252 | GEN_DEFAULT_PIPEOFFSETS, | |
253 | IVB_CURSOR_OFFSETS, | |
254 | }; | |
255 | ||
256 | static const struct intel_device_info intel_valleyview_m_info = { | |
257 | GEN7_FEATURES, | |
258 | .is_mobile = 1, | |
259 | .num_pipes = 2, | |
260 | .is_valleyview = 1, | |
261 | .display_mmio_offset = VLV_DISPLAY_BASE, | |
262 | .has_fbc = 0, /* legal, last one wins */ | |
263 | .has_llc = 0, /* legal, last one wins */ | |
264 | GEN_DEFAULT_PIPEOFFSETS, | |
265 | CURSOR_OFFSETS, | |
266 | }; | |
267 | ||
268 | static const struct intel_device_info intel_valleyview_d_info = { | |
269 | GEN7_FEATURES, | |
270 | .num_pipes = 2, | |
271 | .is_valleyview = 1, | |
272 | .display_mmio_offset = VLV_DISPLAY_BASE, | |
273 | .has_fbc = 0, /* legal, last one wins */ | |
274 | .has_llc = 0, /* legal, last one wins */ | |
275 | GEN_DEFAULT_PIPEOFFSETS, | |
276 | CURSOR_OFFSETS, | |
277 | }; | |
278 | ||
279 | static const struct intel_device_info intel_haswell_d_info = { | |
280 | GEN7_FEATURES, | |
281 | .is_haswell = 1, | |
282 | .has_ddi = 1, | |
283 | .has_fpga_dbg = 1, | |
284 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
285 | GEN_DEFAULT_PIPEOFFSETS, | |
286 | IVB_CURSOR_OFFSETS, | |
287 | }; | |
288 | ||
289 | static const struct intel_device_info intel_haswell_m_info = { | |
290 | GEN7_FEATURES, | |
291 | .is_haswell = 1, | |
292 | .is_mobile = 1, | |
293 | .has_ddi = 1, | |
294 | .has_fpga_dbg = 1, | |
295 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
296 | GEN_DEFAULT_PIPEOFFSETS, | |
297 | IVB_CURSOR_OFFSETS, | |
298 | }; | |
299 | ||
300 | static const struct intel_device_info intel_broadwell_d_info = { | |
301 | .gen = 8, .num_pipes = 3, | |
302 | .need_gfx_hws = 1, .has_hotplug = 1, | |
303 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
304 | .has_llc = 1, | |
305 | .has_ddi = 1, | |
306 | .has_fpga_dbg = 1, | |
307 | .has_fbc = 1, | |
308 | GEN_DEFAULT_PIPEOFFSETS, | |
309 | IVB_CURSOR_OFFSETS, | |
310 | }; | |
311 | ||
312 | static const struct intel_device_info intel_broadwell_m_info = { | |
313 | .gen = 8, .is_mobile = 1, .num_pipes = 3, | |
314 | .need_gfx_hws = 1, .has_hotplug = 1, | |
315 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
316 | .has_llc = 1, | |
317 | .has_ddi = 1, | |
318 | .has_fpga_dbg = 1, | |
319 | .has_fbc = 1, | |
320 | GEN_DEFAULT_PIPEOFFSETS, | |
321 | IVB_CURSOR_OFFSETS, | |
322 | }; | |
323 | ||
324 | static const struct intel_device_info intel_broadwell_gt3d_info = { | |
325 | .gen = 8, .num_pipes = 3, | |
326 | .need_gfx_hws = 1, .has_hotplug = 1, | |
327 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | |
328 | .has_llc = 1, | |
329 | .has_ddi = 1, | |
330 | .has_fpga_dbg = 1, | |
331 | .has_fbc = 1, | |
332 | GEN_DEFAULT_PIPEOFFSETS, | |
333 | IVB_CURSOR_OFFSETS, | |
334 | }; | |
335 | ||
336 | static const struct intel_device_info intel_broadwell_gt3m_info = { | |
337 | .gen = 8, .is_mobile = 1, .num_pipes = 3, | |
338 | .need_gfx_hws = 1, .has_hotplug = 1, | |
339 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | |
340 | .has_llc = 1, | |
341 | .has_ddi = 1, | |
342 | .has_fpga_dbg = 1, | |
343 | .has_fbc = 1, | |
344 | GEN_DEFAULT_PIPEOFFSETS, | |
345 | IVB_CURSOR_OFFSETS, | |
346 | }; | |
347 | ||
348 | static const struct intel_device_info intel_cherryview_info = { | |
349 | .gen = 8, .num_pipes = 3, | |
350 | .need_gfx_hws = 1, .has_hotplug = 1, | |
351 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
352 | .is_valleyview = 1, | |
353 | .display_mmio_offset = VLV_DISPLAY_BASE, | |
354 | GEN_CHV_PIPEOFFSETS, | |
355 | CURSOR_OFFSETS, | |
356 | }; | |
357 | ||
358 | static const struct intel_device_info intel_skylake_info = { | |
359 | .is_skylake = 1, | |
360 | .gen = 9, .num_pipes = 3, | |
361 | .need_gfx_hws = 1, .has_hotplug = 1, | |
362 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
363 | .has_llc = 1, | |
364 | .has_ddi = 1, | |
365 | .has_fpga_dbg = 1, | |
366 | .has_fbc = 1, | |
367 | GEN_DEFAULT_PIPEOFFSETS, | |
368 | IVB_CURSOR_OFFSETS, | |
369 | }; | |
370 | ||
371 | static const struct intel_device_info intel_skylake_gt3_info = { | |
372 | .is_skylake = 1, | |
373 | .gen = 9, .num_pipes = 3, | |
374 | .need_gfx_hws = 1, .has_hotplug = 1, | |
375 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | |
376 | .has_llc = 1, | |
377 | .has_ddi = 1, | |
378 | .has_fpga_dbg = 1, | |
379 | .has_fbc = 1, | |
380 | GEN_DEFAULT_PIPEOFFSETS, | |
381 | IVB_CURSOR_OFFSETS, | |
382 | }; | |
383 | ||
384 | static const struct intel_device_info intel_broxton_info = { | |
385 | .is_preliminary = 1, | |
386 | .is_broxton = 1, | |
387 | .gen = 9, | |
388 | .need_gfx_hws = 1, .has_hotplug = 1, | |
389 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
390 | .num_pipes = 3, | |
391 | .has_ddi = 1, | |
392 | .has_fpga_dbg = 1, | |
393 | .has_fbc = 1, | |
394 | GEN_DEFAULT_PIPEOFFSETS, | |
395 | IVB_CURSOR_OFFSETS, | |
396 | }; | |
397 | ||
398 | static const struct intel_device_info intel_kabylake_info = { | |
399 | .is_preliminary = 1, | |
400 | .is_kabylake = 1, | |
401 | .gen = 9, | |
402 | .num_pipes = 3, | |
403 | .need_gfx_hws = 1, .has_hotplug = 1, | |
404 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | |
405 | .has_llc = 1, | |
406 | .has_ddi = 1, | |
407 | .has_fpga_dbg = 1, | |
408 | .has_fbc = 1, | |
409 | GEN_DEFAULT_PIPEOFFSETS, | |
410 | IVB_CURSOR_OFFSETS, | |
411 | }; | |
412 | ||
413 | static const struct intel_device_info intel_kabylake_gt3_info = { | |
414 | .is_preliminary = 1, | |
415 | .is_kabylake = 1, | |
416 | .gen = 9, | |
417 | .num_pipes = 3, | |
418 | .need_gfx_hws = 1, .has_hotplug = 1, | |
419 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | |
420 | .has_llc = 1, | |
421 | .has_ddi = 1, | |
422 | .has_fpga_dbg = 1, | |
423 | .has_fbc = 1, | |
424 | GEN_DEFAULT_PIPEOFFSETS, | |
425 | IVB_CURSOR_OFFSETS, | |
426 | }; | |
427 | ||
428 | /* | |
429 | * Make sure any device matches here are from most specific to most | |
430 | * general. For example, since the Quanta match is based on the subsystem | |
431 | * and subvendor IDs, we need it to come before the more general IVB | |
432 | * PCI ID matches, otherwise we'll use the wrong info struct above. | |
433 | */ | |
434 | static const struct pci_device_id pciidlist[] = { | |
435 | INTEL_I830_IDS(&intel_i830_info), | |
436 | INTEL_I845G_IDS(&intel_845g_info), | |
437 | INTEL_I85X_IDS(&intel_i85x_info), | |
438 | INTEL_I865G_IDS(&intel_i865g_info), | |
439 | INTEL_I915G_IDS(&intel_i915g_info), | |
440 | INTEL_I915GM_IDS(&intel_i915gm_info), | |
441 | INTEL_I945G_IDS(&intel_i945g_info), | |
442 | INTEL_I945GM_IDS(&intel_i945gm_info), | |
443 | INTEL_I965G_IDS(&intel_i965g_info), | |
444 | INTEL_G33_IDS(&intel_g33_info), | |
445 | INTEL_I965GM_IDS(&intel_i965gm_info), | |
446 | INTEL_GM45_IDS(&intel_gm45_info), | |
447 | INTEL_G45_IDS(&intel_g45_info), | |
448 | INTEL_PINEVIEW_IDS(&intel_pineview_info), | |
449 | INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), | |
450 | INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), | |
451 | INTEL_SNB_D_IDS(&intel_sandybridge_d_info), | |
452 | INTEL_SNB_M_IDS(&intel_sandybridge_m_info), | |
453 | INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ | |
454 | INTEL_IVB_M_IDS(&intel_ivybridge_m_info), | |
455 | INTEL_IVB_D_IDS(&intel_ivybridge_d_info), | |
456 | INTEL_HSW_D_IDS(&intel_haswell_d_info), | |
457 | INTEL_HSW_M_IDS(&intel_haswell_m_info), | |
458 | INTEL_VLV_M_IDS(&intel_valleyview_m_info), | |
459 | INTEL_VLV_D_IDS(&intel_valleyview_d_info), | |
460 | INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), | |
461 | INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), | |
462 | INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), | |
463 | INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), | |
464 | INTEL_CHV_IDS(&intel_cherryview_info), | |
465 | INTEL_SKL_GT1_IDS(&intel_skylake_info), | |
466 | INTEL_SKL_GT2_IDS(&intel_skylake_info), | |
467 | INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), | |
468 | INTEL_BXT_IDS(&intel_broxton_info), | |
469 | INTEL_KBL_GT1_IDS(&intel_kabylake_info), | |
470 | INTEL_KBL_GT2_IDS(&intel_kabylake_info), | |
471 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), | |
472 | INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), | |
473 | {0, 0, 0} | |
474 | }; | |
475 | ||
476 | MODULE_DEVICE_TABLE(pci, pciidlist); | |
477 | ||
478 | static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) | |
479 | { | |
480 | enum intel_pch ret = PCH_NOP; | |
481 | ||
482 | /* | |
483 | * In a virtualized passthrough environment we can be in a | |
484 | * setup where the ISA bridge is not able to be passed through. | |
485 | * In this case, a south bridge can be emulated and we have to | |
486 | * make an educated guess as to which PCH is really there. | |
487 | */ | |
488 | ||
489 | if (IS_GEN5(dev)) { | |
490 | ret = PCH_IBX; | |
491 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); | |
492 | } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { | |
493 | ret = PCH_CPT; | |
494 | DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); | |
495 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | |
496 | ret = PCH_LPT; | |
497 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); | |
498 | } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { | |
499 | ret = PCH_SPT; | |
500 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); | |
501 | } | |
502 | ||
503 | return ret; | |
504 | } | |
505 | ||
506 | void intel_detect_pch(struct drm_device *dev) | |
507 | { | |
508 | struct drm_i915_private *dev_priv = dev->dev_private; | |
509 | struct pci_dev *pch = NULL; | |
510 | ||
511 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | |
512 | * (which really amounts to a PCH but no South Display). | |
513 | */ | |
514 | if (INTEL_INFO(dev)->num_pipes == 0) { | |
515 | dev_priv->pch_type = PCH_NOP; | |
516 | return; | |
517 | } | |
518 | ||
519 | /* | |
520 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | |
521 | * make graphics device passthrough work easy for VMM, that only | |
522 | * need to expose ISA bridge to let driver know the real hardware | |
523 | * underneath. This is a requirement from virtualization team. | |
524 | * | |
525 | * In some virtualized environments (e.g. XEN), there is irrelevant | |
526 | * ISA bridge in the system. To work reliably, we should scan trhough | |
527 | * all the ISA bridge devices and check for the first match, instead | |
528 | * of only checking the first one. | |
529 | */ | |
530 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { | |
531 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | |
532 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | |
533 | dev_priv->pch_id = id; | |
534 | ||
535 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | |
536 | dev_priv->pch_type = PCH_IBX; | |
537 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | |
538 | WARN_ON(!IS_GEN5(dev)); | |
539 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | |
540 | dev_priv->pch_type = PCH_CPT; | |
541 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | |
542 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | |
543 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | |
544 | /* PantherPoint is CPT compatible */ | |
545 | dev_priv->pch_type = PCH_CPT; | |
546 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); | |
547 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | |
548 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | |
549 | dev_priv->pch_type = PCH_LPT; | |
550 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | |
551 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); | |
552 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); | |
553 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | |
554 | dev_priv->pch_type = PCH_LPT; | |
555 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | |
556 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); | |
557 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); | |
558 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | |
559 | dev_priv->pch_type = PCH_SPT; | |
560 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | |
561 | WARN_ON(!IS_SKYLAKE(dev) && | |
562 | !IS_KABYLAKE(dev)); | |
563 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { | |
564 | dev_priv->pch_type = PCH_SPT; | |
565 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); | |
566 | WARN_ON(!IS_SKYLAKE(dev) && | |
567 | !IS_KABYLAKE(dev)); | |
568 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || | |
569 | (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) { | |
570 | dev_priv->pch_type = intel_virt_detect_pch(dev); | |
571 | } else | |
572 | continue; | |
573 | ||
574 | break; | |
575 | } | |
576 | } | |
577 | if (!pch) | |
578 | DRM_DEBUG_KMS("No PCH found.\n"); | |
579 | ||
580 | pci_dev_put(pch); | |
581 | } | |
582 | ||
583 | bool i915_semaphore_is_enabled(struct drm_device *dev) | |
584 | { | |
585 | if (INTEL_INFO(dev)->gen < 6) | |
586 | return false; | |
587 | ||
588 | if (i915.semaphores >= 0) | |
589 | return i915.semaphores; | |
590 | ||
591 | /* TODO: make semaphores and Execlists play nicely together */ | |
592 | if (i915.enable_execlists) | |
593 | return false; | |
594 | ||
595 | /* Until we get further testing... */ | |
596 | if (IS_GEN8(dev)) | |
597 | return false; | |
598 | ||
599 | #ifdef CONFIG_INTEL_IOMMU | |
600 | /* Enable semaphores on SNB when IO remapping is off */ | |
601 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) | |
602 | return false; | |
603 | #endif | |
604 | ||
605 | return true; | |
606 | } | |
607 | ||
608 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) | |
609 | { | |
610 | struct drm_device *dev = dev_priv->dev; | |
611 | struct drm_encoder *encoder; | |
612 | ||
613 | drm_modeset_lock_all(dev); | |
614 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | |
615 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | |
616 | ||
617 | if (intel_encoder->suspend) | |
618 | intel_encoder->suspend(intel_encoder); | |
619 | } | |
620 | drm_modeset_unlock_all(dev); | |
621 | } | |
622 | ||
623 | static int intel_suspend_complete(struct drm_i915_private *dev_priv); | |
624 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, | |
625 | bool rpm_resume); | |
626 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv); | |
627 | ||
628 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) | |
629 | { | |
630 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
631 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
632 | return true; | |
633 | #endif | |
634 | return false; | |
635 | } | |
636 | ||
637 | static int i915_drm_suspend(struct drm_device *dev) | |
638 | { | |
639 | struct drm_i915_private *dev_priv = dev->dev_private; | |
640 | pci_power_t opregion_target_state; | |
641 | int error; | |
642 | ||
643 | /* ignore lid events during suspend */ | |
644 | mutex_lock(&dev_priv->modeset_restore_lock); | |
645 | dev_priv->modeset_restore = MODESET_SUSPENDED; | |
646 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
647 | ||
648 | /* We do a lot of poking in a lot of registers, make sure they work | |
649 | * properly. */ | |
650 | intel_display_set_init_power(dev_priv, true); | |
651 | ||
652 | drm_kms_helper_poll_disable(dev); | |
653 | ||
654 | pci_save_state(dev->pdev); | |
655 | ||
656 | error = i915_gem_suspend(dev); | |
657 | if (error) { | |
658 | dev_err(&dev->pdev->dev, | |
659 | "GEM idle failed, resume might fail\n"); | |
660 | return error; | |
661 | } | |
662 | ||
663 | intel_guc_suspend(dev); | |
664 | ||
665 | intel_suspend_gt_powersave(dev); | |
666 | ||
667 | /* | |
668 | * Disable CRTCs directly since we want to preserve sw state | |
669 | * for _thaw. Also, power gate the CRTC power wells. | |
670 | */ | |
671 | drm_modeset_lock_all(dev); | |
672 | intel_display_suspend(dev); | |
673 | drm_modeset_unlock_all(dev); | |
674 | ||
675 | intel_dp_mst_suspend(dev); | |
676 | ||
677 | intel_runtime_pm_disable_interrupts(dev_priv); | |
678 | intel_hpd_cancel_work(dev_priv); | |
679 | ||
680 | intel_suspend_encoders(dev_priv); | |
681 | ||
682 | intel_suspend_hw(dev); | |
683 | ||
684 | i915_gem_suspend_gtt_mappings(dev); | |
685 | ||
686 | i915_save_state(dev); | |
687 | ||
688 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; | |
689 | intel_opregion_notify_adapter(dev, opregion_target_state); | |
690 | ||
691 | intel_uncore_forcewake_reset(dev, false); | |
692 | intel_opregion_fini(dev); | |
693 | ||
694 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); | |
695 | ||
696 | dev_priv->suspend_count++; | |
697 | ||
698 | intel_display_set_init_power(dev_priv, false); | |
699 | ||
700 | if (HAS_CSR(dev_priv)) | |
701 | flush_work(&dev_priv->csr.work); | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |
707 | { | |
708 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
709 | bool fw_csr; | |
710 | int ret; | |
711 | ||
712 | fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | |
713 | /* | |
714 | * In case of firmware assisted context save/restore don't manually | |
715 | * deinit the power domains. This also means the CSR/DMC firmware will | |
716 | * stay active, it will power down any HW resources as required and | |
717 | * also enable deeper system power states that would be blocked if the | |
718 | * firmware was inactive. | |
719 | */ | |
720 | if (!fw_csr) | |
721 | intel_power_domains_suspend(dev_priv); | |
722 | ||
723 | ret = intel_suspend_complete(dev_priv); | |
724 | ||
725 | if (ret) { | |
726 | DRM_ERROR("Suspend complete failed: %d\n", ret); | |
727 | if (!fw_csr) | |
728 | intel_power_domains_init_hw(dev_priv, true); | |
729 | ||
730 | return ret; | |
731 | } | |
732 | ||
733 | pci_disable_device(drm_dev->pdev); | |
734 | /* | |
735 | * During hibernation on some platforms the BIOS may try to access | |
736 | * the device even though it's already in D3 and hang the machine. So | |
737 | * leave the device in D0 on those platforms and hope the BIOS will | |
738 | * power down the device properly. The issue was seen on multiple old | |
739 | * GENs with different BIOS vendors, so having an explicit blacklist | |
740 | * is inpractical; apply the workaround on everything pre GEN6. The | |
741 | * platforms where the issue was seen: | |
742 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
743 | * Fujitsu FSC S7110 | |
744 | * Acer Aspire 1830T | |
745 | */ | |
746 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) | |
747 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | |
748 | ||
749 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); | |
750 | ||
751 | return 0; | |
752 | } | |
753 | ||
754 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) | |
755 | { | |
756 | int error; | |
757 | ||
758 | if (!dev || !dev->dev_private) { | |
759 | DRM_ERROR("dev: %p\n", dev); | |
760 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | |
761 | return -ENODEV; | |
762 | } | |
763 | ||
764 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && | |
765 | state.event != PM_EVENT_FREEZE)) | |
766 | return -EINVAL; | |
767 | ||
768 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
769 | return 0; | |
770 | ||
771 | error = i915_drm_suspend(dev); | |
772 | if (error) | |
773 | return error; | |
774 | ||
775 | return i915_drm_suspend_late(dev, false); | |
776 | } | |
777 | ||
778 | static int i915_drm_resume(struct drm_device *dev) | |
779 | { | |
780 | struct drm_i915_private *dev_priv = dev->dev_private; | |
781 | ||
782 | mutex_lock(&dev->struct_mutex); | |
783 | i915_gem_restore_gtt_mappings(dev); | |
784 | mutex_unlock(&dev->struct_mutex); | |
785 | ||
786 | i915_restore_state(dev); | |
787 | intel_opregion_setup(dev); | |
788 | ||
789 | intel_init_pch_refclk(dev); | |
790 | drm_mode_config_reset(dev); | |
791 | ||
792 | /* | |
793 | * Interrupts have to be enabled before any batches are run. If not the | |
794 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
795 | * update/restore the context. | |
796 | * | |
797 | * Modeset enabling in intel_modeset_init_hw() also needs working | |
798 | * interrupts. | |
799 | */ | |
800 | intel_runtime_pm_enable_interrupts(dev_priv); | |
801 | ||
802 | mutex_lock(&dev->struct_mutex); | |
803 | if (i915_gem_init_hw(dev)) { | |
804 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); | |
805 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); | |
806 | } | |
807 | mutex_unlock(&dev->struct_mutex); | |
808 | ||
809 | intel_guc_resume(dev); | |
810 | ||
811 | intel_modeset_init_hw(dev); | |
812 | ||
813 | spin_lock_irq(&dev_priv->irq_lock); | |
814 | if (dev_priv->display.hpd_irq_setup) | |
815 | dev_priv->display.hpd_irq_setup(dev); | |
816 | spin_unlock_irq(&dev_priv->irq_lock); | |
817 | ||
818 | drm_modeset_lock_all(dev); | |
819 | intel_display_resume(dev); | |
820 | drm_modeset_unlock_all(dev); | |
821 | ||
822 | intel_dp_mst_resume(dev); | |
823 | ||
824 | /* | |
825 | * ... but also need to make sure that hotplug processing | |
826 | * doesn't cause havoc. Like in the driver load code we don't | |
827 | * bother with the tiny race here where we might loose hotplug | |
828 | * notifications. | |
829 | * */ | |
830 | intel_hpd_init(dev_priv); | |
831 | /* Config may have changed between suspend and resume */ | |
832 | drm_helper_hpd_irq_event(dev); | |
833 | ||
834 | intel_opregion_init(dev); | |
835 | ||
836 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); | |
837 | ||
838 | mutex_lock(&dev_priv->modeset_restore_lock); | |
839 | dev_priv->modeset_restore = MODESET_DONE; | |
840 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
841 | ||
842 | intel_opregion_notify_adapter(dev, PCI_D0); | |
843 | ||
844 | drm_kms_helper_poll_enable(dev); | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | static int i915_drm_resume_early(struct drm_device *dev) | |
850 | { | |
851 | struct drm_i915_private *dev_priv = dev->dev_private; | |
852 | int ret = 0; | |
853 | ||
854 | /* | |
855 | * We have a resume ordering issue with the snd-hda driver also | |
856 | * requiring our device to be power up. Due to the lack of a | |
857 | * parent/child relationship we currently solve this with an early | |
858 | * resume hook. | |
859 | * | |
860 | * FIXME: This should be solved with a special hdmi sink device or | |
861 | * similar so that power domains can be employed. | |
862 | */ | |
863 | if (pci_enable_device(dev->pdev)) { | |
864 | ret = -EIO; | |
865 | goto out; | |
866 | } | |
867 | ||
868 | pci_set_master(dev->pdev); | |
869 | ||
870 | if (IS_VALLEYVIEW(dev_priv)) | |
871 | ret = vlv_resume_prepare(dev_priv, false); | |
872 | if (ret) | |
873 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", | |
874 | ret); | |
875 | ||
876 | intel_uncore_early_sanitize(dev, true); | |
877 | ||
878 | if (IS_BROXTON(dev)) | |
879 | ret = bxt_resume_prepare(dev_priv); | |
880 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
881 | hsw_disable_pc8(dev_priv); | |
882 | ||
883 | intel_uncore_sanitize(dev); | |
884 | ||
885 | if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) | |
886 | intel_power_domains_init_hw(dev_priv, true); | |
887 | ||
888 | out: | |
889 | dev_priv->suspended_to_idle = false; | |
890 | ||
891 | return ret; | |
892 | } | |
893 | ||
894 | int i915_resume_switcheroo(struct drm_device *dev) | |
895 | { | |
896 | int ret; | |
897 | ||
898 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
899 | return 0; | |
900 | ||
901 | ret = i915_drm_resume_early(dev); | |
902 | if (ret) | |
903 | return ret; | |
904 | ||
905 | return i915_drm_resume(dev); | |
906 | } | |
907 | ||
908 | /** | |
909 | * i915_reset - reset chip after a hang | |
910 | * @dev: drm device to reset | |
911 | * | |
912 | * Reset the chip. Useful if a hang is detected. Returns zero on successful | |
913 | * reset or otherwise an error code. | |
914 | * | |
915 | * Procedure is fairly simple: | |
916 | * - reset the chip using the reset reg | |
917 | * - re-init context state | |
918 | * - re-init hardware status page | |
919 | * - re-init ring buffer | |
920 | * - re-init interrupt state | |
921 | * - re-init display | |
922 | */ | |
923 | int i915_reset(struct drm_device *dev) | |
924 | { | |
925 | struct drm_i915_private *dev_priv = dev->dev_private; | |
926 | bool simulated; | |
927 | int ret; | |
928 | ||
929 | intel_reset_gt_powersave(dev); | |
930 | ||
931 | mutex_lock(&dev->struct_mutex); | |
932 | ||
933 | i915_gem_reset(dev); | |
934 | ||
935 | simulated = dev_priv->gpu_error.stop_rings != 0; | |
936 | ||
937 | ret = intel_gpu_reset(dev); | |
938 | ||
939 | /* Also reset the gpu hangman. */ | |
940 | if (simulated) { | |
941 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); | |
942 | dev_priv->gpu_error.stop_rings = 0; | |
943 | if (ret == -ENODEV) { | |
944 | DRM_INFO("Reset not implemented, but ignoring " | |
945 | "error for simulated gpu hangs\n"); | |
946 | ret = 0; | |
947 | } | |
948 | } | |
949 | ||
950 | if (i915_stop_ring_allow_warn(dev_priv)) | |
951 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); | |
952 | ||
953 | if (ret) { | |
954 | DRM_ERROR("Failed to reset chip: %i\n", ret); | |
955 | mutex_unlock(&dev->struct_mutex); | |
956 | return ret; | |
957 | } | |
958 | ||
959 | intel_overlay_reset(dev_priv); | |
960 | ||
961 | /* Ok, now get things going again... */ | |
962 | ||
963 | /* | |
964 | * Everything depends on having the GTT running, so we need to start | |
965 | * there. Fortunately we don't need to do this unless we reset the | |
966 | * chip at a PCI level. | |
967 | * | |
968 | * Next we need to restore the context, but we don't use those | |
969 | * yet either... | |
970 | * | |
971 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
972 | * was running at the time of the reset (i.e. we weren't VT | |
973 | * switched away). | |
974 | */ | |
975 | ||
976 | /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ | |
977 | dev_priv->gpu_error.reload_in_reset = true; | |
978 | ||
979 | ret = i915_gem_init_hw(dev); | |
980 | ||
981 | dev_priv->gpu_error.reload_in_reset = false; | |
982 | ||
983 | mutex_unlock(&dev->struct_mutex); | |
984 | if (ret) { | |
985 | DRM_ERROR("Failed hw init on reset %d\n", ret); | |
986 | return ret; | |
987 | } | |
988 | ||
989 | /* | |
990 | * rps/rc6 re-init is necessary to restore state lost after the | |
991 | * reset and the re-install of gt irqs. Skip for ironlake per | |
992 | * previous concerns that it doesn't respond well to some forms | |
993 | * of re-init after reset. | |
994 | */ | |
995 | if (INTEL_INFO(dev)->gen > 5) | |
996 | intel_enable_gt_powersave(dev); | |
997 | ||
998 | return 0; | |
999 | } | |
1000 | ||
1001 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1002 | { | |
1003 | struct intel_device_info *intel_info = | |
1004 | (struct intel_device_info *) ent->driver_data; | |
1005 | ||
1006 | if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { | |
1007 | DRM_INFO("This hardware requires preliminary hardware support.\n" | |
1008 | "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); | |
1009 | return -ENODEV; | |
1010 | } | |
1011 | ||
1012 | /* Only bind to function 0 of the device. Early generations | |
1013 | * used function 1 as a placeholder for multi-head. This causes | |
1014 | * us confusion instead, especially on the systems where both | |
1015 | * functions have the same PCI-ID! | |
1016 | */ | |
1017 | if (PCI_FUNC(pdev->devfn)) | |
1018 | return -ENODEV; | |
1019 | ||
1020 | return drm_get_pci_dev(pdev, ent, &driver); | |
1021 | } | |
1022 | ||
1023 | static void | |
1024 | i915_pci_remove(struct pci_dev *pdev) | |
1025 | { | |
1026 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1027 | ||
1028 | drm_put_dev(dev); | |
1029 | } | |
1030 | ||
1031 | static int i915_pm_suspend(struct device *dev) | |
1032 | { | |
1033 | struct pci_dev *pdev = to_pci_dev(dev); | |
1034 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | |
1035 | ||
1036 | if (!drm_dev || !drm_dev->dev_private) { | |
1037 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | |
1038 | return -ENODEV; | |
1039 | } | |
1040 | ||
1041 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1042 | return 0; | |
1043 | ||
1044 | return i915_drm_suspend(drm_dev); | |
1045 | } | |
1046 | ||
1047 | static int i915_pm_suspend_late(struct device *dev) | |
1048 | { | |
1049 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | |
1050 | ||
1051 | /* | |
1052 | * We have a suspend ordering issue with the snd-hda driver also | |
1053 | * requiring our device to be power up. Due to the lack of a | |
1054 | * parent/child relationship we currently solve this with an late | |
1055 | * suspend hook. | |
1056 | * | |
1057 | * FIXME: This should be solved with a special hdmi sink device or | |
1058 | * similar so that power domains can be employed. | |
1059 | */ | |
1060 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1061 | return 0; | |
1062 | ||
1063 | return i915_drm_suspend_late(drm_dev, false); | |
1064 | } | |
1065 | ||
1066 | static int i915_pm_poweroff_late(struct device *dev) | |
1067 | { | |
1068 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | |
1069 | ||
1070 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1071 | return 0; | |
1072 | ||
1073 | return i915_drm_suspend_late(drm_dev, true); | |
1074 | } | |
1075 | ||
1076 | static int i915_pm_resume_early(struct device *dev) | |
1077 | { | |
1078 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | |
1079 | ||
1080 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1081 | return 0; | |
1082 | ||
1083 | return i915_drm_resume_early(drm_dev); | |
1084 | } | |
1085 | ||
1086 | static int i915_pm_resume(struct device *dev) | |
1087 | { | |
1088 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | |
1089 | ||
1090 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1091 | return 0; | |
1092 | ||
1093 | return i915_drm_resume(drm_dev); | |
1094 | } | |
1095 | ||
1096 | static int hsw_suspend_complete(struct drm_i915_private *dev_priv) | |
1097 | { | |
1098 | hsw_enable_pc8(dev_priv); | |
1099 | ||
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | static int bxt_suspend_complete(struct drm_i915_private *dev_priv) | |
1104 | { | |
1105 | struct drm_device *dev = dev_priv->dev; | |
1106 | ||
1107 | /* TODO: when DC5 support is added disable DC5 here. */ | |
1108 | ||
1109 | broxton_ddi_phy_uninit(dev); | |
1110 | broxton_uninit_cdclk(dev); | |
1111 | bxt_enable_dc9(dev_priv); | |
1112 | ||
1113 | return 0; | |
1114 | } | |
1115 | ||
1116 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv) | |
1117 | { | |
1118 | struct drm_device *dev = dev_priv->dev; | |
1119 | ||
1120 | /* TODO: when CSR FW support is added make sure the FW is loaded */ | |
1121 | ||
1122 | bxt_disable_dc9(dev_priv); | |
1123 | ||
1124 | /* | |
1125 | * TODO: when DC5 support is added enable DC5 here if the CSR FW | |
1126 | * is available. | |
1127 | */ | |
1128 | broxton_init_cdclk(dev); | |
1129 | broxton_ddi_phy_init(dev); | |
1130 | intel_prepare_ddi(dev); | |
1131 | ||
1132 | return 0; | |
1133 | } | |
1134 | ||
1135 | /* | |
1136 | * Save all Gunit registers that may be lost after a D3 and a subsequent | |
1137 | * S0i[R123] transition. The list of registers needing a save/restore is | |
1138 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit | |
1139 | * registers in the following way: | |
1140 | * - Driver: saved/restored by the driver | |
1141 | * - Punit : saved/restored by the Punit firmware | |
1142 | * - No, w/o marking: no need to save/restore, since the register is R/O or | |
1143 | * used internally by the HW in a way that doesn't depend | |
1144 | * keeping the content across a suspend/resume. | |
1145 | * - Debug : used for debugging | |
1146 | * | |
1147 | * We save/restore all registers marked with 'Driver', with the following | |
1148 | * exceptions: | |
1149 | * - Registers out of use, including also registers marked with 'Debug'. | |
1150 | * These have no effect on the driver's operation, so we don't save/restore | |
1151 | * them to reduce the overhead. | |
1152 | * - Registers that are fully setup by an initialization function called from | |
1153 | * the resume path. For example many clock gating and RPS/RC6 registers. | |
1154 | * - Registers that provide the right functionality with their reset defaults. | |
1155 | * | |
1156 | * TODO: Except for registers that based on the above 3 criteria can be safely | |
1157 | * ignored, we save/restore all others, practically treating the HW context as | |
1158 | * a black-box for the driver. Further investigation is needed to reduce the | |
1159 | * saved/restored registers even further, by following the same 3 criteria. | |
1160 | */ | |
1161 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
1162 | { | |
1163 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
1164 | int i; | |
1165 | ||
1166 | /* GAM 0x4000-0x4770 */ | |
1167 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); | |
1168 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); | |
1169 | s->arb_mode = I915_READ(ARB_MODE); | |
1170 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); | |
1171 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); | |
1172 | ||
1173 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
1174 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); | |
1175 | ||
1176 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); | |
1177 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); | |
1178 | ||
1179 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); | |
1180 | s->ecochk = I915_READ(GAM_ECOCHK); | |
1181 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); | |
1182 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); | |
1183 | ||
1184 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); | |
1185 | ||
1186 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
1187 | s->g3dctl = I915_READ(VLV_G3DCTL); | |
1188 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); | |
1189 | s->mbctl = I915_READ(GEN6_MBCTL); | |
1190 | ||
1191 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
1192 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); | |
1193 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); | |
1194 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); | |
1195 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); | |
1196 | s->rstctl = I915_READ(GEN6_RSTCTL); | |
1197 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); | |
1198 | ||
1199 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
1200 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); | |
1201 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); | |
1202 | s->rpdeuc = I915_READ(GEN6_RPDEUC); | |
1203 | s->ecobus = I915_READ(ECOBUS); | |
1204 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); | |
1205 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); | |
1206 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); | |
1207 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); | |
1208 | s->rcedata = I915_READ(VLV_RCEDATA); | |
1209 | s->spare2gh = I915_READ(VLV_SPAREG2H); | |
1210 | ||
1211 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
1212 | s->gt_imr = I915_READ(GTIMR); | |
1213 | s->gt_ier = I915_READ(GTIER); | |
1214 | s->pm_imr = I915_READ(GEN6_PMIMR); | |
1215 | s->pm_ier = I915_READ(GEN6_PMIER); | |
1216 | ||
1217 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
1218 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); | |
1219 | ||
1220 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
1221 | s->tilectl = I915_READ(TILECTL); | |
1222 | s->gt_fifoctl = I915_READ(GTFIFOCTL); | |
1223 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); | |
1224 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
1225 | s->pmwgicz = I915_READ(VLV_PMWGICZ); | |
1226 | ||
1227 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
1228 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | |
1229 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | |
1230 | s->pcbr = I915_READ(VLV_PCBR); | |
1231 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); | |
1232 | ||
1233 | /* | |
1234 | * Not saving any of: | |
1235 | * DFT, 0x9800-0x9EC0 | |
1236 | * SARB, 0xB000-0xB1FC | |
1237 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 | |
1238 | * PCI CFG | |
1239 | */ | |
1240 | } | |
1241 | ||
1242 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
1243 | { | |
1244 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
1245 | u32 val; | |
1246 | int i; | |
1247 | ||
1248 | /* GAM 0x4000-0x4770 */ | |
1249 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); | |
1250 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); | |
1251 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); | |
1252 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); | |
1253 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); | |
1254 | ||
1255 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
1256 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); | |
1257 | ||
1258 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); | |
1259 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); | |
1260 | ||
1261 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); | |
1262 | I915_WRITE(GAM_ECOCHK, s->ecochk); | |
1263 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); | |
1264 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); | |
1265 | ||
1266 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); | |
1267 | ||
1268 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
1269 | I915_WRITE(VLV_G3DCTL, s->g3dctl); | |
1270 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); | |
1271 | I915_WRITE(GEN6_MBCTL, s->mbctl); | |
1272 | ||
1273 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
1274 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); | |
1275 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); | |
1276 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); | |
1277 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); | |
1278 | I915_WRITE(GEN6_RSTCTL, s->rstctl); | |
1279 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); | |
1280 | ||
1281 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
1282 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); | |
1283 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); | |
1284 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); | |
1285 | I915_WRITE(ECOBUS, s->ecobus); | |
1286 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); | |
1287 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); | |
1288 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); | |
1289 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); | |
1290 | I915_WRITE(VLV_RCEDATA, s->rcedata); | |
1291 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); | |
1292 | ||
1293 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
1294 | I915_WRITE(GTIMR, s->gt_imr); | |
1295 | I915_WRITE(GTIER, s->gt_ier); | |
1296 | I915_WRITE(GEN6_PMIMR, s->pm_imr); | |
1297 | I915_WRITE(GEN6_PMIER, s->pm_ier); | |
1298 | ||
1299 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
1300 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); | |
1301 | ||
1302 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
1303 | I915_WRITE(TILECTL, s->tilectl); | |
1304 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); | |
1305 | /* | |
1306 | * Preserve the GT allow wake and GFX force clock bit, they are not | |
1307 | * be restored, as they are used to control the s0ix suspend/resume | |
1308 | * sequence by the caller. | |
1309 | */ | |
1310 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
1311 | val &= VLV_GTLC_ALLOWWAKEREQ; | |
1312 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; | |
1313 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
1314 | ||
1315 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
1316 | val &= VLV_GFX_CLK_FORCE_ON_BIT; | |
1317 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; | |
1318 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
1319 | ||
1320 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); | |
1321 | ||
1322 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
1323 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | |
1324 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | |
1325 | I915_WRITE(VLV_PCBR, s->pcbr); | |
1326 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); | |
1327 | } | |
1328 | ||
1329 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) | |
1330 | { | |
1331 | u32 val; | |
1332 | int err; | |
1333 | ||
1334 | #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) | |
1335 | ||
1336 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
1337 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | |
1338 | if (force_on) | |
1339 | val |= VLV_GFX_CLK_FORCE_ON_BIT; | |
1340 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
1341 | ||
1342 | if (!force_on) | |
1343 | return 0; | |
1344 | ||
1345 | err = wait_for(COND, 20); | |
1346 | if (err) | |
1347 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", | |
1348 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | |
1349 | ||
1350 | return err; | |
1351 | #undef COND | |
1352 | } | |
1353 | ||
1354 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) | |
1355 | { | |
1356 | u32 val; | |
1357 | int err = 0; | |
1358 | ||
1359 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
1360 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | |
1361 | if (allow) | |
1362 | val |= VLV_GTLC_ALLOWWAKEREQ; | |
1363 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
1364 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | |
1365 | ||
1366 | #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ | |
1367 | allow) | |
1368 | err = wait_for(COND, 1); | |
1369 | if (err) | |
1370 | DRM_ERROR("timeout disabling GT waking\n"); | |
1371 | return err; | |
1372 | #undef COND | |
1373 | } | |
1374 | ||
1375 | static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, | |
1376 | bool wait_for_on) | |
1377 | { | |
1378 | u32 mask; | |
1379 | u32 val; | |
1380 | int err; | |
1381 | ||
1382 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | |
1383 | val = wait_for_on ? mask : 0; | |
1384 | #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) | |
1385 | if (COND) | |
1386 | return 0; | |
1387 | ||
1388 | DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", | |
1389 | wait_for_on ? "on" : "off", | |
1390 | I915_READ(VLV_GTLC_PW_STATUS)); | |
1391 | ||
1392 | /* | |
1393 | * RC6 transitioning can be delayed up to 2 msec (see | |
1394 | * valleyview_enable_rps), use 3 msec for safety. | |
1395 | */ | |
1396 | err = wait_for(COND, 3); | |
1397 | if (err) | |
1398 | DRM_ERROR("timeout waiting for GT wells to go %s\n", | |
1399 | wait_for_on ? "on" : "off"); | |
1400 | ||
1401 | return err; | |
1402 | #undef COND | |
1403 | } | |
1404 | ||
1405 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | |
1406 | { | |
1407 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) | |
1408 | return; | |
1409 | ||
1410 | DRM_ERROR("GT register access while GT waking disabled\n"); | |
1411 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); | |
1412 | } | |
1413 | ||
1414 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) | |
1415 | { | |
1416 | u32 mask; | |
1417 | int err; | |
1418 | ||
1419 | /* | |
1420 | * Bspec defines the following GT well on flags as debug only, so | |
1421 | * don't treat them as hard failures. | |
1422 | */ | |
1423 | (void)vlv_wait_for_gt_wells(dev_priv, false); | |
1424 | ||
1425 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | |
1426 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | |
1427 | ||
1428 | vlv_check_no_gt_access(dev_priv); | |
1429 | ||
1430 | err = vlv_force_gfx_clock(dev_priv, true); | |
1431 | if (err) | |
1432 | goto err1; | |
1433 | ||
1434 | err = vlv_allow_gt_wake(dev_priv, false); | |
1435 | if (err) | |
1436 | goto err2; | |
1437 | ||
1438 | if (!IS_CHERRYVIEW(dev_priv->dev)) | |
1439 | vlv_save_gunit_s0ix_state(dev_priv); | |
1440 | ||
1441 | err = vlv_force_gfx_clock(dev_priv, false); | |
1442 | if (err) | |
1443 | goto err2; | |
1444 | ||
1445 | return 0; | |
1446 | ||
1447 | err2: | |
1448 | /* For safety always re-enable waking and disable gfx clock forcing */ | |
1449 | vlv_allow_gt_wake(dev_priv, true); | |
1450 | err1: | |
1451 | vlv_force_gfx_clock(dev_priv, false); | |
1452 | ||
1453 | return err; | |
1454 | } | |
1455 | ||
1456 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, | |
1457 | bool rpm_resume) | |
1458 | { | |
1459 | struct drm_device *dev = dev_priv->dev; | |
1460 | int err; | |
1461 | int ret; | |
1462 | ||
1463 | /* | |
1464 | * If any of the steps fail just try to continue, that's the best we | |
1465 | * can do at this point. Return the first error code (which will also | |
1466 | * leave RPM permanently disabled). | |
1467 | */ | |
1468 | ret = vlv_force_gfx_clock(dev_priv, true); | |
1469 | ||
1470 | if (!IS_CHERRYVIEW(dev_priv->dev)) | |
1471 | vlv_restore_gunit_s0ix_state(dev_priv); | |
1472 | ||
1473 | err = vlv_allow_gt_wake(dev_priv, true); | |
1474 | if (!ret) | |
1475 | ret = err; | |
1476 | ||
1477 | err = vlv_force_gfx_clock(dev_priv, false); | |
1478 | if (!ret) | |
1479 | ret = err; | |
1480 | ||
1481 | vlv_check_no_gt_access(dev_priv); | |
1482 | ||
1483 | if (rpm_resume) { | |
1484 | intel_init_clock_gating(dev); | |
1485 | i915_gem_restore_fences(dev); | |
1486 | } | |
1487 | ||
1488 | return ret; | |
1489 | } | |
1490 | ||
1491 | static int intel_runtime_suspend(struct device *device) | |
1492 | { | |
1493 | struct pci_dev *pdev = to_pci_dev(device); | |
1494 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1495 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1496 | int ret; | |
1497 | ||
1498 | if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) | |
1499 | return -ENODEV; | |
1500 | ||
1501 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) | |
1502 | return -ENODEV; | |
1503 | ||
1504 | DRM_DEBUG_KMS("Suspending device\n"); | |
1505 | ||
1506 | /* | |
1507 | * We could deadlock here in case another thread holding struct_mutex | |
1508 | * calls RPM suspend concurrently, since the RPM suspend will wait | |
1509 | * first for this RPM suspend to finish. In this case the concurrent | |
1510 | * RPM resume will be followed by its RPM suspend counterpart. Still | |
1511 | * for consistency return -EAGAIN, which will reschedule this suspend. | |
1512 | */ | |
1513 | if (!mutex_trylock(&dev->struct_mutex)) { | |
1514 | DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); | |
1515 | /* | |
1516 | * Bump the expiration timestamp, otherwise the suspend won't | |
1517 | * be rescheduled. | |
1518 | */ | |
1519 | pm_runtime_mark_last_busy(device); | |
1520 | ||
1521 | return -EAGAIN; | |
1522 | } | |
1523 | /* | |
1524 | * We are safe here against re-faults, since the fault handler takes | |
1525 | * an RPM reference. | |
1526 | */ | |
1527 | i915_gem_release_all_mmaps(dev_priv); | |
1528 | mutex_unlock(&dev->struct_mutex); | |
1529 | ||
1530 | intel_guc_suspend(dev); | |
1531 | ||
1532 | intel_suspend_gt_powersave(dev); | |
1533 | intel_runtime_pm_disable_interrupts(dev_priv); | |
1534 | ||
1535 | ret = intel_suspend_complete(dev_priv); | |
1536 | if (ret) { | |
1537 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | |
1538 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1539 | ||
1540 | return ret; | |
1541 | } | |
1542 | ||
1543 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | |
1544 | intel_uncore_forcewake_reset(dev, false); | |
1545 | dev_priv->pm.suspended = true; | |
1546 | ||
1547 | /* | |
1548 | * FIXME: We really should find a document that references the arguments | |
1549 | * used below! | |
1550 | */ | |
1551 | if (IS_BROADWELL(dev)) { | |
1552 | /* | |
1553 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
1554 | * being detected, and the call we do at intel_runtime_resume() | |
1555 | * won't be able to restore them. Since PCI_D3hot matches the | |
1556 | * actual specification and appears to be working, use it. | |
1557 | */ | |
1558 | intel_opregion_notify_adapter(dev, PCI_D3hot); | |
1559 | } else { | |
1560 | /* | |
1561 | * current versions of firmware which depend on this opregion | |
1562 | * notification have repurposed the D1 definition to mean | |
1563 | * "runtime suspended" vs. what you would normally expect (D3) | |
1564 | * to distinguish it from notifications that might be sent via | |
1565 | * the suspend path. | |
1566 | */ | |
1567 | intel_opregion_notify_adapter(dev, PCI_D1); | |
1568 | } | |
1569 | ||
1570 | assert_forcewakes_inactive(dev_priv); | |
1571 | ||
1572 | DRM_DEBUG_KMS("Device suspended\n"); | |
1573 | return 0; | |
1574 | } | |
1575 | ||
1576 | static int intel_runtime_resume(struct device *device) | |
1577 | { | |
1578 | struct pci_dev *pdev = to_pci_dev(device); | |
1579 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1580 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1581 | int ret = 0; | |
1582 | ||
1583 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) | |
1584 | return -ENODEV; | |
1585 | ||
1586 | DRM_DEBUG_KMS("Resuming device\n"); | |
1587 | ||
1588 | intel_opregion_notify_adapter(dev, PCI_D0); | |
1589 | dev_priv->pm.suspended = false; | |
1590 | ||
1591 | intel_guc_resume(dev); | |
1592 | ||
1593 | if (IS_GEN6(dev_priv)) | |
1594 | intel_init_pch_refclk(dev); | |
1595 | ||
1596 | if (IS_BROXTON(dev)) | |
1597 | ret = bxt_resume_prepare(dev_priv); | |
1598 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
1599 | hsw_disable_pc8(dev_priv); | |
1600 | else if (IS_VALLEYVIEW(dev_priv)) | |
1601 | ret = vlv_resume_prepare(dev_priv, true); | |
1602 | ||
1603 | /* | |
1604 | * No point of rolling back things in case of an error, as the best | |
1605 | * we can do is to hope that things will still work (and disable RPM). | |
1606 | */ | |
1607 | i915_gem_init_swizzling(dev); | |
1608 | gen6_update_ring_freq(dev); | |
1609 | ||
1610 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1611 | ||
1612 | /* | |
1613 | * On VLV/CHV display interrupts are part of the display | |
1614 | * power well, so hpd is reinitialized from there. For | |
1615 | * everyone else do it here. | |
1616 | */ | |
1617 | if (!IS_VALLEYVIEW(dev_priv)) | |
1618 | intel_hpd_init(dev_priv); | |
1619 | ||
1620 | intel_enable_gt_powersave(dev); | |
1621 | ||
1622 | if (ret) | |
1623 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | |
1624 | else | |
1625 | DRM_DEBUG_KMS("Device resumed\n"); | |
1626 | ||
1627 | return ret; | |
1628 | } | |
1629 | ||
1630 | /* | |
1631 | * This function implements common functionality of runtime and system | |
1632 | * suspend sequence. | |
1633 | */ | |
1634 | static int intel_suspend_complete(struct drm_i915_private *dev_priv) | |
1635 | { | |
1636 | int ret; | |
1637 | ||
1638 | if (IS_BROXTON(dev_priv)) | |
1639 | ret = bxt_suspend_complete(dev_priv); | |
1640 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
1641 | ret = hsw_suspend_complete(dev_priv); | |
1642 | else if (IS_VALLEYVIEW(dev_priv)) | |
1643 | ret = vlv_suspend_complete(dev_priv); | |
1644 | else | |
1645 | ret = 0; | |
1646 | ||
1647 | return ret; | |
1648 | } | |
1649 | ||
1650 | static const struct dev_pm_ops i915_pm_ops = { | |
1651 | /* | |
1652 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
1653 | * PMSG_RESUME] | |
1654 | */ | |
1655 | .suspend = i915_pm_suspend, | |
1656 | .suspend_late = i915_pm_suspend_late, | |
1657 | .resume_early = i915_pm_resume_early, | |
1658 | .resume = i915_pm_resume, | |
1659 | ||
1660 | /* | |
1661 | * S4 event handlers | |
1662 | * @freeze, @freeze_late : called (1) before creating the | |
1663 | * hibernation image [PMSG_FREEZE] and | |
1664 | * (2) after rebooting, before restoring | |
1665 | * the image [PMSG_QUIESCE] | |
1666 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
1667 | * image, before writing it [PMSG_THAW] | |
1668 | * and (2) after failing to create or | |
1669 | * restore the image [PMSG_RECOVER] | |
1670 | * @poweroff, @poweroff_late: called after writing the hibernation | |
1671 | * image, before rebooting [PMSG_HIBERNATE] | |
1672 | * @restore, @restore_early : called after rebooting and restoring the | |
1673 | * hibernation image [PMSG_RESTORE] | |
1674 | */ | |
1675 | .freeze = i915_pm_suspend, | |
1676 | .freeze_late = i915_pm_suspend_late, | |
1677 | .thaw_early = i915_pm_resume_early, | |
1678 | .thaw = i915_pm_resume, | |
1679 | .poweroff = i915_pm_suspend, | |
1680 | .poweroff_late = i915_pm_poweroff_late, | |
1681 | .restore_early = i915_pm_resume_early, | |
1682 | .restore = i915_pm_resume, | |
1683 | ||
1684 | /* S0ix (via runtime suspend) event handlers */ | |
1685 | .runtime_suspend = intel_runtime_suspend, | |
1686 | .runtime_resume = intel_runtime_resume, | |
1687 | }; | |
1688 | ||
1689 | static const struct vm_operations_struct i915_gem_vm_ops = { | |
1690 | .fault = i915_gem_fault, | |
1691 | .open = drm_gem_vm_open, | |
1692 | .close = drm_gem_vm_close, | |
1693 | }; | |
1694 | ||
1695 | static const struct file_operations i915_driver_fops = { | |
1696 | .owner = THIS_MODULE, | |
1697 | .open = drm_open, | |
1698 | .release = drm_release, | |
1699 | .unlocked_ioctl = drm_ioctl, | |
1700 | .mmap = drm_gem_mmap, | |
1701 | .poll = drm_poll, | |
1702 | .read = drm_read, | |
1703 | #ifdef CONFIG_COMPAT | |
1704 | .compat_ioctl = i915_compat_ioctl, | |
1705 | #endif | |
1706 | .llseek = noop_llseek, | |
1707 | }; | |
1708 | ||
1709 | static struct drm_driver driver = { | |
1710 | /* Don't use MTRRs here; the Xserver or userspace app should | |
1711 | * deal with them for Intel hardware. | |
1712 | */ | |
1713 | .driver_features = | |
1714 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | | |
1715 | DRIVER_RENDER | DRIVER_MODESET, | |
1716 | .load = i915_driver_load, | |
1717 | .unload = i915_driver_unload, | |
1718 | .open = i915_driver_open, | |
1719 | .lastclose = i915_driver_lastclose, | |
1720 | .preclose = i915_driver_preclose, | |
1721 | .postclose = i915_driver_postclose, | |
1722 | .set_busid = drm_pci_set_busid, | |
1723 | ||
1724 | #if defined(CONFIG_DEBUG_FS) | |
1725 | .debugfs_init = i915_debugfs_init, | |
1726 | .debugfs_cleanup = i915_debugfs_cleanup, | |
1727 | #endif | |
1728 | .gem_free_object = i915_gem_free_object, | |
1729 | .gem_vm_ops = &i915_gem_vm_ops, | |
1730 | ||
1731 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
1732 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
1733 | .gem_prime_export = i915_gem_prime_export, | |
1734 | .gem_prime_import = i915_gem_prime_import, | |
1735 | ||
1736 | .dumb_create = i915_gem_dumb_create, | |
1737 | .dumb_map_offset = i915_gem_mmap_gtt, | |
1738 | .dumb_destroy = drm_gem_dumb_destroy, | |
1739 | .ioctls = i915_ioctls, | |
1740 | .fops = &i915_driver_fops, | |
1741 | .name = DRIVER_NAME, | |
1742 | .desc = DRIVER_DESC, | |
1743 | .date = DRIVER_DATE, | |
1744 | .major = DRIVER_MAJOR, | |
1745 | .minor = DRIVER_MINOR, | |
1746 | .patchlevel = DRIVER_PATCHLEVEL, | |
1747 | }; | |
1748 | ||
1749 | static struct pci_driver i915_pci_driver = { | |
1750 | .name = DRIVER_NAME, | |
1751 | .id_table = pciidlist, | |
1752 | .probe = i915_pci_probe, | |
1753 | .remove = i915_pci_remove, | |
1754 | .driver.pm = &i915_pm_ops, | |
1755 | }; | |
1756 | ||
1757 | static int __init i915_init(void) | |
1758 | { | |
1759 | driver.num_ioctls = i915_max_ioctl; | |
1760 | ||
1761 | /* | |
1762 | * Enable KMS by default, unless explicitly overriden by | |
1763 | * either the i915.modeset prarameter or by the | |
1764 | * vga_text_mode_force boot option. | |
1765 | */ | |
1766 | ||
1767 | if (i915.modeset == 0) | |
1768 | driver.driver_features &= ~DRIVER_MODESET; | |
1769 | ||
1770 | #ifdef CONFIG_VGA_CONSOLE | |
1771 | if (vgacon_text_force() && i915.modeset == -1) | |
1772 | driver.driver_features &= ~DRIVER_MODESET; | |
1773 | #endif | |
1774 | ||
1775 | if (!(driver.driver_features & DRIVER_MODESET)) { | |
1776 | /* Silently fail loading to not upset userspace. */ | |
1777 | DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); | |
1778 | return 0; | |
1779 | } | |
1780 | ||
1781 | if (i915.nuclear_pageflip) | |
1782 | driver.driver_features |= DRIVER_ATOMIC; | |
1783 | ||
1784 | return drm_pci_init(&driver, &i915_pci_driver); | |
1785 | } | |
1786 | ||
1787 | static void __exit i915_exit(void) | |
1788 | { | |
1789 | if (!(driver.driver_features & DRIVER_MODESET)) | |
1790 | return; /* Never loaded a driver. */ | |
1791 | ||
1792 | drm_pci_exit(&driver, &i915_pci_driver); | |
1793 | } | |
1794 | ||
1795 | module_init(i915_init); | |
1796 | module_exit(i915_exit); | |
1797 | ||
1798 | MODULE_AUTHOR("Tungsten Graphics, Inc."); | |
1799 | MODULE_AUTHOR("Intel Corporation"); | |
1800 | ||
1801 | MODULE_DESCRIPTION(DRIVER_DESC); | |
1802 | MODULE_LICENSE("GPL and additional rights"); |