]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/i915/gt/intel_gt_pm.c
drm/i915: Pass in intel_gt at some for_each_engine sites
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / gt / intel_gt_pm.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8 #include "i915_globals.h"
9 #include "i915_params.h"
10 #include "intel_context.h"
11 #include "intel_engine_pm.h"
12 #include "intel_gt.h"
13 #include "intel_gt_pm.h"
14 #include "intel_gt_requests.h"
15 #include "intel_pm.h"
16 #include "intel_rc6.h"
17 #include "intel_wakeref.h"
18
19 static void pm_notify(struct intel_gt *gt, int state)
20 {
21 blocking_notifier_call_chain(&gt->pm_notifications, state, gt->i915);
22 }
23
24 static int __gt_unpark(struct intel_wakeref *wf)
25 {
26 struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
27 struct drm_i915_private *i915 = gt->i915;
28
29 GEM_TRACE("\n");
30
31 i915_globals_unpark();
32
33 /*
34 * It seems that the DMC likes to transition between the DC states a lot
35 * when there are no connected displays (no active power domains) during
36 * command submission.
37 *
38 * This activity has negative impact on the performance of the chip with
39 * huge latencies observed in the interrupt handler and elsewhere.
40 *
41 * Work around it by grabbing a GT IRQ power domain whilst there is any
42 * GT activity, preventing any DC state transitions.
43 */
44 gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
45 GEM_BUG_ON(!gt->awake);
46
47 intel_enable_gt_powersave(i915);
48
49 i915_update_gfx_val(i915);
50 if (INTEL_GEN(i915) >= 6)
51 gen6_rps_busy(i915);
52
53 i915_pmu_gt_unparked(i915);
54
55 intel_gt_queue_hangcheck(gt);
56 intel_gt_unpark_requests(gt);
57
58 pm_notify(gt, INTEL_GT_UNPARK);
59
60 return 0;
61 }
62
63 static int __gt_park(struct intel_wakeref *wf)
64 {
65 struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
66 intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
67 struct drm_i915_private *i915 = gt->i915;
68
69 GEM_TRACE("\n");
70
71 pm_notify(gt, INTEL_GT_PARK);
72 intel_gt_park_requests(gt);
73
74 i915_pmu_gt_parked(i915);
75 if (INTEL_GEN(i915) >= 6)
76 gen6_rps_idle(i915);
77
78 /* Everything switched off, flush any residual interrupt just in case */
79 intel_synchronize_irq(i915);
80
81 GEM_BUG_ON(!wakeref);
82 intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
83
84 i915_globals_park();
85
86 return 0;
87 }
88
89 static const struct intel_wakeref_ops wf_ops = {
90 .get = __gt_unpark,
91 .put = __gt_park,
92 .flags = INTEL_WAKEREF_PUT_ASYNC,
93 };
94
95 void intel_gt_pm_init_early(struct intel_gt *gt)
96 {
97 intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
98
99 BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
100 }
101
102 void intel_gt_pm_init(struct intel_gt *gt)
103 {
104 /*
105 * Enabling power-management should be "self-healing". If we cannot
106 * enable a feature, simply leave it disabled with a notice to the
107 * user.
108 */
109 intel_rc6_init(&gt->rc6);
110 }
111
112 static bool reset_engines(struct intel_gt *gt)
113 {
114 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
115 return false;
116
117 return __intel_gt_reset(gt, ALL_ENGINES) == 0;
118 }
119
120 /**
121 * intel_gt_sanitize: called after the GPU has lost power
122 * @gt: the i915 GT container
123 * @force: ignore a failed reset and sanitize engine state anyway
124 *
125 * Anytime we reset the GPU, either with an explicit GPU reset or through a
126 * PCI power cycle, the GPU loses state and we must reset our state tracking
127 * to match. Note that calling intel_gt_sanitize() if the GPU has not
128 * been reset results in much confusion!
129 */
130 void intel_gt_sanitize(struct intel_gt *gt, bool force)
131 {
132 struct intel_engine_cs *engine;
133 enum intel_engine_id id;
134
135 GEM_TRACE("\n");
136
137 intel_uc_sanitize(&gt->uc);
138
139 for_each_engine(engine, gt, id)
140 if (engine->reset.prepare)
141 engine->reset.prepare(engine);
142
143 if (reset_engines(gt) || force) {
144 for_each_engine(engine, gt, id)
145 __intel_engine_reset(engine, false);
146 }
147
148 for_each_engine(engine, gt, id)
149 if (engine->reset.finish)
150 engine->reset.finish(engine);
151 }
152
153 void intel_gt_pm_disable(struct intel_gt *gt)
154 {
155 if (!is_mock_gt(gt))
156 intel_sanitize_gt_powersave(gt->i915);
157 }
158
159 void intel_gt_pm_fini(struct intel_gt *gt)
160 {
161 intel_rc6_fini(&gt->rc6);
162 }
163
164 int intel_gt_resume(struct intel_gt *gt)
165 {
166 struct intel_engine_cs *engine;
167 enum intel_engine_id id;
168 int err = 0;
169
170 /*
171 * After resume, we may need to poke into the pinned kernel
172 * contexts to paper over any damage caused by the sudden suspend.
173 * Only the kernel contexts should remain pinned over suspend,
174 * allowing us to fixup the user contexts on their first pin.
175 */
176 intel_gt_pm_get(gt);
177 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
178 intel_rc6_sanitize(&gt->rc6);
179
180 for_each_engine(engine, gt, id) {
181 struct intel_context *ce;
182
183 intel_engine_pm_get(engine);
184
185 ce = engine->kernel_context;
186 if (ce) {
187 GEM_BUG_ON(!intel_context_is_pinned(ce));
188 mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
189 ce->ops->reset(ce);
190 mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
191 }
192
193 engine->serial++; /* kernel context lost */
194 err = engine->resume(engine);
195
196 intel_engine_pm_put(engine);
197 if (err) {
198 dev_err(gt->i915->drm.dev,
199 "Failed to restart %s (%d)\n",
200 engine->name, err);
201 break;
202 }
203 }
204
205 intel_rc6_enable(&gt->rc6);
206 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
207 intel_gt_pm_put(gt);
208
209 return err;
210 }
211
212 static void wait_for_idle(struct intel_gt *gt)
213 {
214 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
215 /*
216 * Forcibly cancel outstanding work and leave
217 * the gpu quiet.
218 */
219 intel_gt_set_wedged(gt);
220 }
221
222 intel_gt_pm_wait_for_idle(gt);
223 }
224
225 void intel_gt_suspend(struct intel_gt *gt)
226 {
227 intel_wakeref_t wakeref;
228
229 /* We expect to be idle already; but also want to be independent */
230 wait_for_idle(gt);
231
232 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
233 intel_rc6_disable(&gt->rc6);
234 }
235
236 void intel_gt_runtime_suspend(struct intel_gt *gt)
237 {
238 intel_uc_runtime_suspend(&gt->uc);
239 }
240
241 int intel_gt_runtime_resume(struct intel_gt *gt)
242 {
243 intel_gt_init_swizzling(gt);
244
245 return intel_uc_runtime_resume(&gt->uc);
246 }
247
248 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
249 #include "selftest_gt_pm.c"
250 #endif