]>
Commit | Line | Data |
---|---|---|
0bc12bcb RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include <drm/drmP.h> | |
25 | ||
26 | #include "intel_drv.h" | |
27 | #include "i915_drv.h" | |
28 | ||
29 | static bool is_edp_psr(struct intel_dp *intel_dp) | |
30 | { | |
31 | return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; | |
32 | } | |
33 | ||
34 | bool intel_psr_is_enabled(struct drm_device *dev) | |
35 | { | |
36 | struct drm_i915_private *dev_priv = dev->dev_private; | |
37 | ||
38 | if (!HAS_PSR(dev)) | |
39 | return false; | |
40 | ||
41 | return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; | |
42 | } | |
43 | ||
44 | static void intel_psr_write_vsc(struct intel_dp *intel_dp, | |
45 | struct edp_vsc_psr *vsc_psr) | |
46 | { | |
47 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
48 | struct drm_device *dev = dig_port->base.base.dev; | |
49 | struct drm_i915_private *dev_priv = dev->dev_private; | |
50 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); | |
51 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); | |
52 | u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); | |
53 | uint32_t *data = (uint32_t *) vsc_psr; | |
54 | unsigned int i; | |
55 | ||
56 | /* As per BSPec (Pipe Video Data Island Packet), we need to disable | |
57 | the video DIP being updated before program video DIP data buffer | |
58 | registers for DIP being updated. */ | |
59 | I915_WRITE(ctl_reg, 0); | |
60 | POSTING_READ(ctl_reg); | |
61 | ||
62 | for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { | |
63 | if (i < sizeof(struct edp_vsc_psr)) | |
64 | I915_WRITE(data_reg + i, *data++); | |
65 | else | |
66 | I915_WRITE(data_reg + i, 0); | |
67 | } | |
68 | ||
69 | I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); | |
70 | POSTING_READ(ctl_reg); | |
71 | } | |
72 | ||
73 | static void intel_psr_setup_vsc(struct intel_dp *intel_dp) | |
74 | { | |
75 | struct edp_vsc_psr psr_vsc; | |
76 | ||
77 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ | |
78 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
79 | psr_vsc.sdp_header.HB0 = 0; | |
80 | psr_vsc.sdp_header.HB1 = 0x7; | |
81 | psr_vsc.sdp_header.HB2 = 0x2; | |
82 | psr_vsc.sdp_header.HB3 = 0x8; | |
83 | intel_psr_write_vsc(intel_dp, &psr_vsc); | |
84 | } | |
85 | ||
86 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) | |
87 | { | |
88 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
89 | struct drm_device *dev = dig_port->base.base.dev; | |
90 | struct drm_i915_private *dev_priv = dev->dev_private; | |
91 | uint32_t aux_clock_divider; | |
92 | int precharge = 0x3; | |
93 | bool only_standby = false; | |
94 | static const uint8_t aux_msg[] = { | |
95 | [0] = DP_AUX_NATIVE_WRITE << 4, | |
96 | [1] = DP_SET_POWER >> 8, | |
97 | [2] = DP_SET_POWER & 0xff, | |
98 | [3] = 1 - 1, | |
99 | [4] = DP_SET_POWER_D0, | |
100 | }; | |
101 | int i; | |
102 | ||
103 | BUILD_BUG_ON(sizeof(aux_msg) > 20); | |
104 | ||
105 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); | |
106 | ||
107 | if (IS_BROADWELL(dev) && dig_port->port != PORT_A) | |
108 | only_standby = true; | |
109 | ||
110 | /* Enable PSR in sink */ | |
111 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) | |
112 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, | |
113 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); | |
114 | else | |
115 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, | |
116 | DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); | |
117 | ||
118 | /* Setup AUX registers */ | |
119 | for (i = 0; i < sizeof(aux_msg); i += 4) | |
120 | I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i, | |
121 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); | |
122 | ||
123 | I915_WRITE(EDP_PSR_AUX_CTL(dev), | |
124 | DP_AUX_CH_CTL_TIME_OUT_400us | | |
125 | (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | |
126 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | |
127 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); | |
128 | } | |
129 | ||
130 | static void intel_psr_enable_source(struct intel_dp *intel_dp) | |
131 | { | |
132 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
133 | struct drm_device *dev = dig_port->base.base.dev; | |
134 | struct drm_i915_private *dev_priv = dev->dev_private; | |
135 | uint32_t max_sleep_time = 0x1f; | |
136 | uint32_t idle_frames = 1; | |
137 | uint32_t val = 0x0; | |
138 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | |
139 | bool only_standby = false; | |
140 | ||
141 | if (IS_BROADWELL(dev) && dig_port->port != PORT_A) | |
142 | only_standby = true; | |
143 | ||
144 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { | |
145 | val |= EDP_PSR_LINK_STANDBY; | |
146 | val |= EDP_PSR_TP2_TP3_TIME_0us; | |
147 | val |= EDP_PSR_TP1_TIME_0us; | |
148 | val |= EDP_PSR_SKIP_AUX_EXIT; | |
149 | val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; | |
150 | } else | |
151 | val |= EDP_PSR_LINK_DISABLE; | |
152 | ||
153 | I915_WRITE(EDP_PSR_CTL(dev), val | | |
154 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | | |
155 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | |
156 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | |
157 | EDP_PSR_ENABLE); | |
158 | } | |
159 | ||
160 | static bool intel_psr_match_conditions(struct intel_dp *intel_dp) | |
161 | { | |
162 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
163 | struct drm_device *dev = dig_port->base.base.dev; | |
164 | struct drm_i915_private *dev_priv = dev->dev_private; | |
165 | struct drm_crtc *crtc = dig_port->base.base.crtc; | |
166 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
167 | ||
168 | lockdep_assert_held(&dev_priv->psr.lock); | |
169 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | |
170 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | |
171 | ||
172 | dev_priv->psr.source_ok = false; | |
173 | ||
174 | if (IS_HASWELL(dev) && dig_port->port != PORT_A) { | |
175 | DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); | |
176 | return false; | |
177 | } | |
178 | ||
179 | if (!i915.enable_psr) { | |
180 | DRM_DEBUG_KMS("PSR disable by flag\n"); | |
181 | return false; | |
182 | } | |
183 | ||
184 | /* Below limitations aren't valid for Broadwell */ | |
185 | if (IS_BROADWELL(dev)) | |
186 | goto out; | |
187 | ||
188 | if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & | |
189 | S3D_ENABLE) { | |
190 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); | |
191 | return false; | |
192 | } | |
193 | ||
194 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { | |
195 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); | |
196 | return false; | |
197 | } | |
198 | ||
199 | out: | |
200 | dev_priv->psr.source_ok = true; | |
201 | return true; | |
202 | } | |
203 | ||
204 | static void intel_psr_do_enable(struct intel_dp *intel_dp) | |
205 | { | |
206 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
207 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
208 | struct drm_i915_private *dev_priv = dev->dev_private; | |
209 | ||
210 | WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); | |
211 | WARN_ON(dev_priv->psr.active); | |
212 | lockdep_assert_held(&dev_priv->psr.lock); | |
213 | ||
214 | /* Enable/Re-enable PSR on the host */ | |
215 | intel_psr_enable_source(intel_dp); | |
216 | ||
217 | dev_priv->psr.active = true; | |
218 | } | |
219 | ||
220 | void intel_psr_enable(struct intel_dp *intel_dp) | |
221 | { | |
222 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
223 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
224 | struct drm_i915_private *dev_priv = dev->dev_private; | |
225 | ||
226 | if (!HAS_PSR(dev)) { | |
227 | DRM_DEBUG_KMS("PSR not supported on this platform\n"); | |
228 | return; | |
229 | } | |
230 | ||
231 | if (!is_edp_psr(intel_dp)) { | |
232 | DRM_DEBUG_KMS("PSR not supported by this panel\n"); | |
233 | return; | |
234 | } | |
235 | ||
236 | mutex_lock(&dev_priv->psr.lock); | |
237 | if (dev_priv->psr.enabled) { | |
238 | DRM_DEBUG_KMS("PSR already in use\n"); | |
239 | goto unlock; | |
240 | } | |
241 | ||
242 | if (!intel_psr_match_conditions(intel_dp)) | |
243 | goto unlock; | |
244 | ||
245 | dev_priv->psr.busy_frontbuffer_bits = 0; | |
246 | ||
247 | intel_psr_setup_vsc(intel_dp); | |
248 | ||
249 | /* Avoid continuous PSR exit by masking memup and hpd */ | |
250 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | | |
251 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); | |
252 | ||
253 | /* Enable PSR on the panel */ | |
254 | intel_psr_enable_sink(intel_dp); | |
255 | ||
256 | dev_priv->psr.enabled = intel_dp; | |
257 | unlock: | |
258 | mutex_unlock(&dev_priv->psr.lock); | |
259 | } | |
260 | ||
261 | void intel_psr_disable(struct intel_dp *intel_dp) | |
262 | { | |
263 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
264 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
265 | struct drm_i915_private *dev_priv = dev->dev_private; | |
266 | ||
267 | mutex_lock(&dev_priv->psr.lock); | |
268 | if (!dev_priv->psr.enabled) { | |
269 | mutex_unlock(&dev_priv->psr.lock); | |
270 | return; | |
271 | } | |
272 | ||
273 | if (dev_priv->psr.active) { | |
274 | I915_WRITE(EDP_PSR_CTL(dev), | |
275 | I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); | |
276 | ||
277 | /* Wait till PSR is idle */ | |
278 | if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & | |
279 | EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) | |
280 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | |
281 | ||
282 | dev_priv->psr.active = false; | |
283 | } else { | |
284 | WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); | |
285 | } | |
286 | ||
287 | dev_priv->psr.enabled = NULL; | |
288 | mutex_unlock(&dev_priv->psr.lock); | |
289 | ||
290 | cancel_delayed_work_sync(&dev_priv->psr.work); | |
291 | } | |
292 | ||
293 | static void intel_psr_work(struct work_struct *work) | |
294 | { | |
295 | struct drm_i915_private *dev_priv = | |
296 | container_of(work, typeof(*dev_priv), psr.work.work); | |
297 | struct intel_dp *intel_dp = dev_priv->psr.enabled; | |
298 | ||
299 | /* We have to make sure PSR is ready for re-enable | |
300 | * otherwise it keeps disabled until next full enable/disable cycle. | |
301 | * PSR might take some time to get fully disabled | |
302 | * and be ready for re-enable. | |
303 | */ | |
304 | if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & | |
305 | EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { | |
306 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | |
307 | return; | |
308 | } | |
309 | ||
310 | mutex_lock(&dev_priv->psr.lock); | |
311 | intel_dp = dev_priv->psr.enabled; | |
312 | ||
313 | if (!intel_dp) | |
314 | goto unlock; | |
315 | ||
316 | /* | |
317 | * The delayed work can race with an invalidate hence we need to | |
318 | * recheck. Since psr_flush first clears this and then reschedules we | |
319 | * won't ever miss a flush when bailing out here. | |
320 | */ | |
321 | if (dev_priv->psr.busy_frontbuffer_bits) | |
322 | goto unlock; | |
323 | ||
324 | intel_psr_do_enable(intel_dp); | |
325 | unlock: | |
326 | mutex_unlock(&dev_priv->psr.lock); | |
327 | } | |
328 | ||
329 | static void intel_psr_exit(struct drm_device *dev) | |
330 | { | |
331 | struct drm_i915_private *dev_priv = dev->dev_private; | |
332 | ||
333 | if (dev_priv->psr.active) { | |
334 | u32 val = I915_READ(EDP_PSR_CTL(dev)); | |
335 | ||
336 | WARN_ON(!(val & EDP_PSR_ENABLE)); | |
337 | ||
338 | I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); | |
339 | ||
340 | dev_priv->psr.active = false; | |
341 | } | |
342 | ||
343 | } | |
344 | ||
345 | void intel_psr_invalidate(struct drm_device *dev, | |
346 | unsigned frontbuffer_bits) | |
347 | { | |
348 | struct drm_i915_private *dev_priv = dev->dev_private; | |
349 | struct drm_crtc *crtc; | |
350 | enum pipe pipe; | |
351 | ||
352 | mutex_lock(&dev_priv->psr.lock); | |
353 | if (!dev_priv->psr.enabled) { | |
354 | mutex_unlock(&dev_priv->psr.lock); | |
355 | return; | |
356 | } | |
357 | ||
358 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | |
359 | pipe = to_intel_crtc(crtc)->pipe; | |
360 | ||
361 | intel_psr_exit(dev); | |
362 | ||
363 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | |
364 | ||
365 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; | |
366 | mutex_unlock(&dev_priv->psr.lock); | |
367 | } | |
368 | ||
369 | void intel_psr_flush(struct drm_device *dev, | |
370 | unsigned frontbuffer_bits) | |
371 | { | |
372 | struct drm_i915_private *dev_priv = dev->dev_private; | |
373 | struct drm_crtc *crtc; | |
374 | enum pipe pipe; | |
375 | ||
376 | mutex_lock(&dev_priv->psr.lock); | |
377 | if (!dev_priv->psr.enabled) { | |
378 | mutex_unlock(&dev_priv->psr.lock); | |
379 | return; | |
380 | } | |
381 | ||
382 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | |
383 | pipe = to_intel_crtc(crtc)->pipe; | |
384 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; | |
385 | ||
386 | /* | |
387 | * On Haswell sprite plane updates don't result in a psr invalidating | |
388 | * signal in the hardware. Which means we need to manually fake this in | |
389 | * software for all flushes, not just when we've seen a preceding | |
390 | * invalidation through frontbuffer rendering. | |
391 | */ | |
392 | if (IS_HASWELL(dev) && | |
393 | (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) | |
394 | intel_psr_exit(dev); | |
395 | ||
396 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) | |
397 | schedule_delayed_work(&dev_priv->psr.work, | |
398 | msecs_to_jiffies(100)); | |
399 | mutex_unlock(&dev_priv->psr.lock); | |
400 | } | |
401 | ||
402 | void intel_psr_init(struct drm_device *dev) | |
403 | { | |
404 | struct drm_i915_private *dev_priv = dev->dev_private; | |
405 | ||
406 | INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); | |
407 | mutex_init(&dev_priv->psr.lock); | |
408 | } |