]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/drm_atomic_helper.c
Merge tag 'dmaengine-4.20-rc1' of git://git.infradead.org/users/vkoul/slave-dma
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / drm_atomic_helper.c
1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_plane_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_writeback.h>
34 #include <linux/dma-fence.h>
35
36 #include "drm_crtc_helper_internal.h"
37 #include "drm_crtc_internal.h"
38
39 /**
40 * DOC: overview
41 *
42 * This helper library provides implementations of check and commit functions on
43 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
44 * also provides convenience implementations for the atomic state handling
45 * callbacks for drivers which don't need to subclass the drm core structures to
46 * add their own additional internal state.
47 *
48 * This library also provides default implementations for the check callback in
49 * drm_atomic_helper_check() and for the commit callback with
50 * drm_atomic_helper_commit(). But the individual stages and callbacks are
51 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
52 * together with a driver private modeset implementation.
53 *
54 * This library also provides implementations for all the legacy driver
55 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
56 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
57 * various functions to implement set_property callbacks. New drivers must not
58 * implement these functions themselves but must use the provided helpers.
59 *
60 * The atomic helper uses the same function table structures as all other
61 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
62 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
63 * also shares the &struct drm_plane_helper_funcs function table with the plane
64 * helpers.
65 */
66 static void
67 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
68 struct drm_plane_state *old_plane_state,
69 struct drm_plane_state *plane_state,
70 struct drm_plane *plane)
71 {
72 struct drm_crtc_state *crtc_state;
73
74 if (old_plane_state->crtc) {
75 crtc_state = drm_atomic_get_new_crtc_state(state,
76 old_plane_state->crtc);
77
78 if (WARN_ON(!crtc_state))
79 return;
80
81 crtc_state->planes_changed = true;
82 }
83
84 if (plane_state->crtc) {
85 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
86
87 if (WARN_ON(!crtc_state))
88 return;
89
90 crtc_state->planes_changed = true;
91 }
92 }
93
94 static int handle_conflicting_encoders(struct drm_atomic_state *state,
95 bool disable_conflicting_encoders)
96 {
97 struct drm_connector_state *new_conn_state;
98 struct drm_connector *connector;
99 struct drm_connector_list_iter conn_iter;
100 struct drm_encoder *encoder;
101 unsigned encoder_mask = 0;
102 int i, ret = 0;
103
104 /*
105 * First loop, find all newly assigned encoders from the connectors
106 * part of the state. If the same encoder is assigned to multiple
107 * connectors bail out.
108 */
109 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
110 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
111 struct drm_encoder *new_encoder;
112
113 if (!new_conn_state->crtc)
114 continue;
115
116 if (funcs->atomic_best_encoder)
117 new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
118 else if (funcs->best_encoder)
119 new_encoder = funcs->best_encoder(connector);
120 else
121 new_encoder = drm_atomic_helper_best_encoder(connector);
122
123 if (new_encoder) {
124 if (encoder_mask & drm_encoder_mask(new_encoder)) {
125 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
126 new_encoder->base.id, new_encoder->name,
127 connector->base.id, connector->name);
128
129 return -EINVAL;
130 }
131
132 encoder_mask |= drm_encoder_mask(new_encoder);
133 }
134 }
135
136 if (!encoder_mask)
137 return 0;
138
139 /*
140 * Second loop, iterate over all connectors not part of the state.
141 *
142 * If a conflicting encoder is found and disable_conflicting_encoders
143 * is not set, an error is returned. Userspace can provide a solution
144 * through the atomic ioctl.
145 *
146 * If the flag is set conflicting connectors are removed from the crtc
147 * and the crtc is disabled if no encoder is left. This preserves
148 * compatibility with the legacy set_config behavior.
149 */
150 drm_connector_list_iter_begin(state->dev, &conn_iter);
151 drm_for_each_connector_iter(connector, &conn_iter) {
152 struct drm_crtc_state *crtc_state;
153
154 if (drm_atomic_get_new_connector_state(state, connector))
155 continue;
156
157 encoder = connector->state->best_encoder;
158 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
159 continue;
160
161 if (!disable_conflicting_encoders) {
162 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
163 encoder->base.id, encoder->name,
164 connector->state->crtc->base.id,
165 connector->state->crtc->name,
166 connector->base.id, connector->name);
167 ret = -EINVAL;
168 goto out;
169 }
170
171 new_conn_state = drm_atomic_get_connector_state(state, connector);
172 if (IS_ERR(new_conn_state)) {
173 ret = PTR_ERR(new_conn_state);
174 goto out;
175 }
176
177 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
178 encoder->base.id, encoder->name,
179 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
180 connector->base.id, connector->name);
181
182 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
183
184 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
185 if (ret)
186 goto out;
187
188 if (!crtc_state->connector_mask) {
189 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
190 NULL);
191 if (ret < 0)
192 goto out;
193
194 crtc_state->active = false;
195 }
196 }
197 out:
198 drm_connector_list_iter_end(&conn_iter);
199
200 return ret;
201 }
202
203 static void
204 set_best_encoder(struct drm_atomic_state *state,
205 struct drm_connector_state *conn_state,
206 struct drm_encoder *encoder)
207 {
208 struct drm_crtc_state *crtc_state;
209 struct drm_crtc *crtc;
210
211 if (conn_state->best_encoder) {
212 /* Unset the encoder_mask in the old crtc state. */
213 crtc = conn_state->connector->state->crtc;
214
215 /* A NULL crtc is an error here because we should have
216 * duplicated a NULL best_encoder when crtc was NULL.
217 * As an exception restoring duplicated atomic state
218 * during resume is allowed, so don't warn when
219 * best_encoder is equal to encoder we intend to set.
220 */
221 WARN_ON(!crtc && encoder != conn_state->best_encoder);
222 if (crtc) {
223 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
224
225 crtc_state->encoder_mask &=
226 ~drm_encoder_mask(conn_state->best_encoder);
227 }
228 }
229
230 if (encoder) {
231 crtc = conn_state->crtc;
232 WARN_ON(!crtc);
233 if (crtc) {
234 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
235
236 crtc_state->encoder_mask |=
237 drm_encoder_mask(encoder);
238 }
239 }
240
241 conn_state->best_encoder = encoder;
242 }
243
244 static void
245 steal_encoder(struct drm_atomic_state *state,
246 struct drm_encoder *encoder)
247 {
248 struct drm_crtc_state *crtc_state;
249 struct drm_connector *connector;
250 struct drm_connector_state *old_connector_state, *new_connector_state;
251 int i;
252
253 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
254 struct drm_crtc *encoder_crtc;
255
256 if (new_connector_state->best_encoder != encoder)
257 continue;
258
259 encoder_crtc = old_connector_state->crtc;
260
261 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
262 encoder->base.id, encoder->name,
263 encoder_crtc->base.id, encoder_crtc->name);
264
265 set_best_encoder(state, new_connector_state, NULL);
266
267 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
268 crtc_state->connectors_changed = true;
269
270 return;
271 }
272 }
273
274 static int
275 update_connector_routing(struct drm_atomic_state *state,
276 struct drm_connector *connector,
277 struct drm_connector_state *old_connector_state,
278 struct drm_connector_state *new_connector_state)
279 {
280 const struct drm_connector_helper_funcs *funcs;
281 struct drm_encoder *new_encoder;
282 struct drm_crtc_state *crtc_state;
283
284 DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
285 connector->base.id,
286 connector->name);
287
288 if (old_connector_state->crtc != new_connector_state->crtc) {
289 if (old_connector_state->crtc) {
290 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
291 crtc_state->connectors_changed = true;
292 }
293
294 if (new_connector_state->crtc) {
295 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
296 crtc_state->connectors_changed = true;
297 }
298 }
299
300 if (!new_connector_state->crtc) {
301 DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
302 connector->base.id,
303 connector->name);
304
305 set_best_encoder(state, new_connector_state, NULL);
306
307 return 0;
308 }
309
310 funcs = connector->helper_private;
311
312 if (funcs->atomic_best_encoder)
313 new_encoder = funcs->atomic_best_encoder(connector,
314 new_connector_state);
315 else if (funcs->best_encoder)
316 new_encoder = funcs->best_encoder(connector);
317 else
318 new_encoder = drm_atomic_helper_best_encoder(connector);
319
320 if (!new_encoder) {
321 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
322 connector->base.id,
323 connector->name);
324 return -EINVAL;
325 }
326
327 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
328 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
329 new_encoder->base.id,
330 new_encoder->name,
331 new_connector_state->crtc->base.id,
332 new_connector_state->crtc->name);
333 return -EINVAL;
334 }
335
336 if (new_encoder == new_connector_state->best_encoder) {
337 set_best_encoder(state, new_connector_state, new_encoder);
338
339 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
340 connector->base.id,
341 connector->name,
342 new_encoder->base.id,
343 new_encoder->name,
344 new_connector_state->crtc->base.id,
345 new_connector_state->crtc->name);
346
347 return 0;
348 }
349
350 steal_encoder(state, new_encoder);
351
352 set_best_encoder(state, new_connector_state, new_encoder);
353
354 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
355 crtc_state->connectors_changed = true;
356
357 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
358 connector->base.id,
359 connector->name,
360 new_encoder->base.id,
361 new_encoder->name,
362 new_connector_state->crtc->base.id,
363 new_connector_state->crtc->name);
364
365 return 0;
366 }
367
368 static int
369 mode_fixup(struct drm_atomic_state *state)
370 {
371 struct drm_crtc *crtc;
372 struct drm_crtc_state *new_crtc_state;
373 struct drm_connector *connector;
374 struct drm_connector_state *new_conn_state;
375 int i;
376 int ret;
377
378 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
379 if (!new_crtc_state->mode_changed &&
380 !new_crtc_state->connectors_changed)
381 continue;
382
383 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
384 }
385
386 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
387 const struct drm_encoder_helper_funcs *funcs;
388 struct drm_encoder *encoder;
389
390 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
391
392 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
393 continue;
394
395 new_crtc_state =
396 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
397
398 /*
399 * Each encoder has at most one connector (since we always steal
400 * it away), so we won't call ->mode_fixup twice.
401 */
402 encoder = new_conn_state->best_encoder;
403 funcs = encoder->helper_private;
404
405 ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
406 &new_crtc_state->adjusted_mode);
407 if (!ret) {
408 DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
409 return -EINVAL;
410 }
411
412 if (funcs && funcs->atomic_check) {
413 ret = funcs->atomic_check(encoder, new_crtc_state,
414 new_conn_state);
415 if (ret) {
416 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
417 encoder->base.id, encoder->name);
418 return ret;
419 }
420 } else if (funcs && funcs->mode_fixup) {
421 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
422 &new_crtc_state->adjusted_mode);
423 if (!ret) {
424 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
425 encoder->base.id, encoder->name);
426 return -EINVAL;
427 }
428 }
429 }
430
431 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
432 const struct drm_crtc_helper_funcs *funcs;
433
434 if (!new_crtc_state->enable)
435 continue;
436
437 if (!new_crtc_state->mode_changed &&
438 !new_crtc_state->connectors_changed)
439 continue;
440
441 funcs = crtc->helper_private;
442 if (!funcs->mode_fixup)
443 continue;
444
445 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
446 &new_crtc_state->adjusted_mode);
447 if (!ret) {
448 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
449 crtc->base.id, crtc->name);
450 return -EINVAL;
451 }
452 }
453
454 return 0;
455 }
456
457 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
458 struct drm_encoder *encoder,
459 struct drm_crtc *crtc,
460 struct drm_display_mode *mode)
461 {
462 enum drm_mode_status ret;
463
464 ret = drm_encoder_mode_valid(encoder, mode);
465 if (ret != MODE_OK) {
466 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
467 encoder->base.id, encoder->name);
468 return ret;
469 }
470
471 ret = drm_bridge_mode_valid(encoder->bridge, mode);
472 if (ret != MODE_OK) {
473 DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
474 return ret;
475 }
476
477 ret = drm_crtc_mode_valid(crtc, mode);
478 if (ret != MODE_OK) {
479 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
480 crtc->base.id, crtc->name);
481 return ret;
482 }
483
484 return ret;
485 }
486
487 static int
488 mode_valid(struct drm_atomic_state *state)
489 {
490 struct drm_connector_state *conn_state;
491 struct drm_connector *connector;
492 int i;
493
494 for_each_new_connector_in_state(state, connector, conn_state, i) {
495 struct drm_encoder *encoder = conn_state->best_encoder;
496 struct drm_crtc *crtc = conn_state->crtc;
497 struct drm_crtc_state *crtc_state;
498 enum drm_mode_status mode_status;
499 struct drm_display_mode *mode;
500
501 if (!crtc || !encoder)
502 continue;
503
504 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
505 if (!crtc_state)
506 continue;
507 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
508 continue;
509
510 mode = &crtc_state->mode;
511
512 mode_status = mode_valid_path(connector, encoder, crtc, mode);
513 if (mode_status != MODE_OK)
514 return -EINVAL;
515 }
516
517 return 0;
518 }
519
520 /**
521 * drm_atomic_helper_check_modeset - validate state object for modeset changes
522 * @dev: DRM device
523 * @state: the driver state object
524 *
525 * Check the state object to see if the requested state is physically possible.
526 * This does all the crtc and connector related computations for an atomic
527 * update and adds any additional connectors needed for full modesets. It calls
528 * the various per-object callbacks in the follow order:
529 *
530 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
531 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
532 * 3. If it's determined a modeset is needed then all connectors on the affected crtc
533 * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
534 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
535 * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
536 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
537 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
538 * This function is only called when the encoder will be part of a configured crtc,
539 * it must not be used for implementing connector property validation.
540 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
541 * instead.
542 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
543 *
544 * &drm_crtc_state.mode_changed is set when the input mode is changed.
545 * &drm_crtc_state.connectors_changed is set when a connector is added or
546 * removed from the crtc. &drm_crtc_state.active_changed is set when
547 * &drm_crtc_state.active changes, which is used for DPMS.
548 * See also: drm_atomic_crtc_needs_modeset()
549 *
550 * IMPORTANT:
551 *
552 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
553 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
554 * without a full modeset) _must_ call this function afterwards after that
555 * change. It is permitted to call this function multiple times for the same
556 * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
557 * upon the adjusted dotclock for fifo space allocation and watermark
558 * computation.
559 *
560 * RETURNS:
561 * Zero for success or -errno
562 */
563 int
564 drm_atomic_helper_check_modeset(struct drm_device *dev,
565 struct drm_atomic_state *state)
566 {
567 struct drm_crtc *crtc;
568 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
569 struct drm_connector *connector;
570 struct drm_connector_state *old_connector_state, *new_connector_state;
571 int i, ret;
572 unsigned connectors_mask = 0;
573
574 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
575 bool has_connectors =
576 !!new_crtc_state->connector_mask;
577
578 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
579
580 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
581 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
582 crtc->base.id, crtc->name);
583 new_crtc_state->mode_changed = true;
584 }
585
586 if (old_crtc_state->enable != new_crtc_state->enable) {
587 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
588 crtc->base.id, crtc->name);
589
590 /*
591 * For clarity this assignment is done here, but
592 * enable == 0 is only true when there are no
593 * connectors and a NULL mode.
594 *
595 * The other way around is true as well. enable != 0
596 * iff connectors are attached and a mode is set.
597 */
598 new_crtc_state->mode_changed = true;
599 new_crtc_state->connectors_changed = true;
600 }
601
602 if (old_crtc_state->active != new_crtc_state->active) {
603 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
604 crtc->base.id, crtc->name);
605 new_crtc_state->active_changed = true;
606 }
607
608 if (new_crtc_state->enable != has_connectors) {
609 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
610 crtc->base.id, crtc->name);
611
612 return -EINVAL;
613 }
614 }
615
616 ret = handle_conflicting_encoders(state, false);
617 if (ret)
618 return ret;
619
620 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
621 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
622
623 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
624
625 /*
626 * This only sets crtc->connectors_changed for routing changes,
627 * drivers must set crtc->connectors_changed themselves when
628 * connector properties need to be updated.
629 */
630 ret = update_connector_routing(state, connector,
631 old_connector_state,
632 new_connector_state);
633 if (ret)
634 return ret;
635 if (old_connector_state->crtc) {
636 new_crtc_state = drm_atomic_get_new_crtc_state(state,
637 old_connector_state->crtc);
638 if (old_connector_state->link_status !=
639 new_connector_state->link_status)
640 new_crtc_state->connectors_changed = true;
641 }
642
643 if (funcs->atomic_check)
644 ret = funcs->atomic_check(connector, new_connector_state);
645 if (ret)
646 return ret;
647
648 connectors_mask |= BIT(i);
649 }
650
651 /*
652 * After all the routing has been prepared we need to add in any
653 * connector which is itself unchanged, but who's crtc changes it's
654 * configuration. This must be done before calling mode_fixup in case a
655 * crtc only changed its mode but has the same set of connectors.
656 */
657 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
658 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
659 continue;
660
661 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
662 crtc->base.id, crtc->name,
663 new_crtc_state->enable ? 'y' : 'n',
664 new_crtc_state->active ? 'y' : 'n');
665
666 ret = drm_atomic_add_affected_connectors(state, crtc);
667 if (ret != 0)
668 return ret;
669
670 ret = drm_atomic_add_affected_planes(state, crtc);
671 if (ret != 0)
672 return ret;
673 }
674
675 /*
676 * Iterate over all connectors again, to make sure atomic_check()
677 * has been called on them when a modeset is forced.
678 */
679 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
680 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
681
682 if (connectors_mask & BIT(i))
683 continue;
684
685 if (funcs->atomic_check)
686 ret = funcs->atomic_check(connector, new_connector_state);
687 if (ret)
688 return ret;
689 }
690
691 ret = mode_valid(state);
692 if (ret)
693 return ret;
694
695 return mode_fixup(state);
696 }
697 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
698
699 /**
700 * drm_atomic_helper_check_plane_state() - Check plane state for validity
701 * @plane_state: plane state to check
702 * @crtc_state: crtc state to check
703 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
704 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
705 * @can_position: is it legal to position the plane such that it
706 * doesn't cover the entire crtc? This will generally
707 * only be false for primary planes.
708 * @can_update_disabled: can the plane be updated while the crtc
709 * is disabled?
710 *
711 * Checks that a desired plane update is valid, and updates various
712 * bits of derived state (clipped coordinates etc.). Drivers that provide
713 * their own plane handling rather than helper-provided implementations may
714 * still wish to call this function to avoid duplication of error checking
715 * code.
716 *
717 * RETURNS:
718 * Zero if update appears valid, error code on failure
719 */
720 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
721 const struct drm_crtc_state *crtc_state,
722 int min_scale,
723 int max_scale,
724 bool can_position,
725 bool can_update_disabled)
726 {
727 struct drm_framebuffer *fb = plane_state->fb;
728 struct drm_rect *src = &plane_state->src;
729 struct drm_rect *dst = &plane_state->dst;
730 unsigned int rotation = plane_state->rotation;
731 struct drm_rect clip = {};
732 int hscale, vscale;
733
734 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
735
736 *src = drm_plane_state_src(plane_state);
737 *dst = drm_plane_state_dest(plane_state);
738
739 if (!fb) {
740 plane_state->visible = false;
741 return 0;
742 }
743
744 /* crtc should only be NULL when disabling (i.e., !fb) */
745 if (WARN_ON(!plane_state->crtc)) {
746 plane_state->visible = false;
747 return 0;
748 }
749
750 if (!crtc_state->enable && !can_update_disabled) {
751 DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
752 return -EINVAL;
753 }
754
755 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
756
757 /* Check scaling */
758 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
759 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
760 if (hscale < 0 || vscale < 0) {
761 DRM_DEBUG_KMS("Invalid scaling of plane\n");
762 drm_rect_debug_print("src: ", &plane_state->src, true);
763 drm_rect_debug_print("dst: ", &plane_state->dst, false);
764 return -ERANGE;
765 }
766
767 if (crtc_state->enable)
768 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
769
770 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
771
772 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
773
774 if (!plane_state->visible)
775 /*
776 * Plane isn't visible; some drivers can handle this
777 * so we just return success here. Drivers that can't
778 * (including those that use the primary plane helper's
779 * update function) will return an error from their
780 * update_plane handler.
781 */
782 return 0;
783
784 if (!can_position && !drm_rect_equals(dst, &clip)) {
785 DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
786 drm_rect_debug_print("dst: ", dst, false);
787 drm_rect_debug_print("clip: ", &clip, false);
788 return -EINVAL;
789 }
790
791 return 0;
792 }
793 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
794
795 /**
796 * drm_atomic_helper_check_planes - validate state object for planes changes
797 * @dev: DRM device
798 * @state: the driver state object
799 *
800 * Check the state object to see if the requested state is physically possible.
801 * This does all the plane update related checks using by calling into the
802 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
803 * hooks provided by the driver.
804 *
805 * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
806 * updated planes.
807 *
808 * RETURNS:
809 * Zero for success or -errno
810 */
811 int
812 drm_atomic_helper_check_planes(struct drm_device *dev,
813 struct drm_atomic_state *state)
814 {
815 struct drm_crtc *crtc;
816 struct drm_crtc_state *new_crtc_state;
817 struct drm_plane *plane;
818 struct drm_plane_state *new_plane_state, *old_plane_state;
819 int i, ret = 0;
820
821 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
822 const struct drm_plane_helper_funcs *funcs;
823
824 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
825
826 funcs = plane->helper_private;
827
828 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
829
830 if (!funcs || !funcs->atomic_check)
831 continue;
832
833 ret = funcs->atomic_check(plane, new_plane_state);
834 if (ret) {
835 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
836 plane->base.id, plane->name);
837 return ret;
838 }
839 }
840
841 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
842 const struct drm_crtc_helper_funcs *funcs;
843
844 funcs = crtc->helper_private;
845
846 if (!funcs || !funcs->atomic_check)
847 continue;
848
849 ret = funcs->atomic_check(crtc, new_crtc_state);
850 if (ret) {
851 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
852 crtc->base.id, crtc->name);
853 return ret;
854 }
855 }
856
857 return ret;
858 }
859 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
860
861 /**
862 * drm_atomic_helper_check - validate state object
863 * @dev: DRM device
864 * @state: the driver state object
865 *
866 * Check the state object to see if the requested state is physically possible.
867 * Only crtcs and planes have check callbacks, so for any additional (global)
868 * checking that a driver needs it can simply wrap that around this function.
869 * Drivers without such needs can directly use this as their
870 * &drm_mode_config_funcs.atomic_check callback.
871 *
872 * This just wraps the two parts of the state checking for planes and modeset
873 * state in the default order: First it calls drm_atomic_helper_check_modeset()
874 * and then drm_atomic_helper_check_planes(). The assumption is that the
875 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
876 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
877 * watermarks.
878 *
879 * Note that zpos normalization will add all enable planes to the state which
880 * might not desired for some drivers.
881 * For example enable/disable of a cursor plane which have fixed zpos value
882 * would trigger all other enabled planes to be forced to the state change.
883 *
884 * RETURNS:
885 * Zero for success or -errno
886 */
887 int drm_atomic_helper_check(struct drm_device *dev,
888 struct drm_atomic_state *state)
889 {
890 int ret;
891
892 ret = drm_atomic_helper_check_modeset(dev, state);
893 if (ret)
894 return ret;
895
896 if (dev->mode_config.normalize_zpos) {
897 ret = drm_atomic_normalize_zpos(dev, state);
898 if (ret)
899 return ret;
900 }
901
902 ret = drm_atomic_helper_check_planes(dev, state);
903 if (ret)
904 return ret;
905
906 if (state->legacy_cursor_update)
907 state->async_update = !drm_atomic_helper_async_check(dev, state);
908
909 return ret;
910 }
911 EXPORT_SYMBOL(drm_atomic_helper_check);
912
913 static void
914 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
915 {
916 struct drm_connector *connector;
917 struct drm_connector_state *old_conn_state, *new_conn_state;
918 struct drm_crtc *crtc;
919 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
920 int i;
921
922 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
923 const struct drm_encoder_helper_funcs *funcs;
924 struct drm_encoder *encoder;
925
926 /* Shut down everything that's in the changeset and currently
927 * still on. So need to check the old, saved state. */
928 if (!old_conn_state->crtc)
929 continue;
930
931 old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
932
933 if (!old_crtc_state->active ||
934 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
935 continue;
936
937 encoder = old_conn_state->best_encoder;
938
939 /* We shouldn't get this far if we didn't previously have
940 * an encoder.. but WARN_ON() rather than explode.
941 */
942 if (WARN_ON(!encoder))
943 continue;
944
945 funcs = encoder->helper_private;
946
947 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
948 encoder->base.id, encoder->name);
949
950 /*
951 * Each encoder has at most one connector (since we always steal
952 * it away), so we won't call disable hooks twice.
953 */
954 drm_bridge_disable(encoder->bridge);
955
956 /* Right function depends upon target state. */
957 if (funcs) {
958 if (new_conn_state->crtc && funcs->prepare)
959 funcs->prepare(encoder);
960 else if (funcs->disable)
961 funcs->disable(encoder);
962 else if (funcs->dpms)
963 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
964 }
965
966 drm_bridge_post_disable(encoder->bridge);
967 }
968
969 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
970 const struct drm_crtc_helper_funcs *funcs;
971 int ret;
972
973 /* Shut down everything that needs a full modeset. */
974 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
975 continue;
976
977 if (!old_crtc_state->active)
978 continue;
979
980 funcs = crtc->helper_private;
981
982 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
983 crtc->base.id, crtc->name);
984
985
986 /* Right function depends upon target state. */
987 if (new_crtc_state->enable && funcs->prepare)
988 funcs->prepare(crtc);
989 else if (funcs->atomic_disable)
990 funcs->atomic_disable(crtc, old_crtc_state);
991 else if (funcs->disable)
992 funcs->disable(crtc);
993 else
994 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
995
996 if (!(dev->irq_enabled && dev->num_crtcs))
997 continue;
998
999 ret = drm_crtc_vblank_get(crtc);
1000 WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1001 if (ret == 0)
1002 drm_crtc_vblank_put(crtc);
1003 }
1004 }
1005
1006 /**
1007 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1008 * @dev: DRM device
1009 * @old_state: atomic state object with old state structures
1010 *
1011 * This function updates all the various legacy modeset state pointers in
1012 * connectors, encoders and crtcs. It also updates the timestamping constants
1013 * used for precise vblank timestamps by calling
1014 * drm_calc_timestamping_constants().
1015 *
1016 * Drivers can use this for building their own atomic commit if they don't have
1017 * a pure helper-based modeset implementation.
1018 *
1019 * Since these updates are not synchronized with lockings, only code paths
1020 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1021 * legacy state filled out by this helper. Defacto this means this helper and
1022 * the legacy state pointers are only really useful for transitioning an
1023 * existing driver to the atomic world.
1024 */
1025 void
1026 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1027 struct drm_atomic_state *old_state)
1028 {
1029 struct drm_connector *connector;
1030 struct drm_connector_state *old_conn_state, *new_conn_state;
1031 struct drm_crtc *crtc;
1032 struct drm_crtc_state *new_crtc_state;
1033 int i;
1034
1035 /* clear out existing links and update dpms */
1036 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1037 if (connector->encoder) {
1038 WARN_ON(!connector->encoder->crtc);
1039
1040 connector->encoder->crtc = NULL;
1041 connector->encoder = NULL;
1042 }
1043
1044 crtc = new_conn_state->crtc;
1045 if ((!crtc && old_conn_state->crtc) ||
1046 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1047 int mode = DRM_MODE_DPMS_OFF;
1048
1049 if (crtc && crtc->state->active)
1050 mode = DRM_MODE_DPMS_ON;
1051
1052 connector->dpms = mode;
1053 }
1054 }
1055
1056 /* set new links */
1057 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1058 if (!new_conn_state->crtc)
1059 continue;
1060
1061 if (WARN_ON(!new_conn_state->best_encoder))
1062 continue;
1063
1064 connector->encoder = new_conn_state->best_encoder;
1065 connector->encoder->crtc = new_conn_state->crtc;
1066 }
1067
1068 /* set legacy state in the crtc structure */
1069 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1070 struct drm_plane *primary = crtc->primary;
1071 struct drm_plane_state *new_plane_state;
1072
1073 crtc->mode = new_crtc_state->mode;
1074 crtc->enabled = new_crtc_state->enable;
1075
1076 new_plane_state =
1077 drm_atomic_get_new_plane_state(old_state, primary);
1078
1079 if (new_plane_state && new_plane_state->crtc == crtc) {
1080 crtc->x = new_plane_state->src_x >> 16;
1081 crtc->y = new_plane_state->src_y >> 16;
1082 }
1083
1084 if (new_crtc_state->enable)
1085 drm_calc_timestamping_constants(crtc,
1086 &new_crtc_state->adjusted_mode);
1087 }
1088 }
1089 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1090
1091 static void
1092 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1093 {
1094 struct drm_crtc *crtc;
1095 struct drm_crtc_state *new_crtc_state;
1096 struct drm_connector *connector;
1097 struct drm_connector_state *new_conn_state;
1098 int i;
1099
1100 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1101 const struct drm_crtc_helper_funcs *funcs;
1102
1103 if (!new_crtc_state->mode_changed)
1104 continue;
1105
1106 funcs = crtc->helper_private;
1107
1108 if (new_crtc_state->enable && funcs->mode_set_nofb) {
1109 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
1110 crtc->base.id, crtc->name);
1111
1112 funcs->mode_set_nofb(crtc);
1113 }
1114 }
1115
1116 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1117 const struct drm_encoder_helper_funcs *funcs;
1118 struct drm_encoder *encoder;
1119 struct drm_display_mode *mode, *adjusted_mode;
1120
1121 if (!new_conn_state->best_encoder)
1122 continue;
1123
1124 encoder = new_conn_state->best_encoder;
1125 funcs = encoder->helper_private;
1126 new_crtc_state = new_conn_state->crtc->state;
1127 mode = &new_crtc_state->mode;
1128 adjusted_mode = &new_crtc_state->adjusted_mode;
1129
1130 if (!new_crtc_state->mode_changed)
1131 continue;
1132
1133 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
1134 encoder->base.id, encoder->name);
1135
1136 /*
1137 * Each encoder has at most one connector (since we always steal
1138 * it away), so we won't call mode_set hooks twice.
1139 */
1140 if (funcs && funcs->atomic_mode_set) {
1141 funcs->atomic_mode_set(encoder, new_crtc_state,
1142 new_conn_state);
1143 } else if (funcs && funcs->mode_set) {
1144 funcs->mode_set(encoder, mode, adjusted_mode);
1145 }
1146
1147 drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
1148 }
1149 }
1150
1151 /**
1152 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1153 * @dev: DRM device
1154 * @old_state: atomic state object with old state structures
1155 *
1156 * This function shuts down all the outputs that need to be shut down and
1157 * prepares them (if required) with the new mode.
1158 *
1159 * For compatibility with legacy crtc helpers this should be called before
1160 * drm_atomic_helper_commit_planes(), which is what the default commit function
1161 * does. But drivers with different needs can group the modeset commits together
1162 * and do the plane commits at the end. This is useful for drivers doing runtime
1163 * PM since planes updates then only happen when the CRTC is actually enabled.
1164 */
1165 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1166 struct drm_atomic_state *old_state)
1167 {
1168 disable_outputs(dev, old_state);
1169
1170 drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1171
1172 crtc_set_mode(dev, old_state);
1173 }
1174 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1175
1176 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1177 struct drm_atomic_state *old_state)
1178 {
1179 struct drm_connector *connector;
1180 struct drm_connector_state *new_conn_state;
1181 int i;
1182
1183 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1184 const struct drm_connector_helper_funcs *funcs;
1185
1186 funcs = connector->helper_private;
1187 if (!funcs->atomic_commit)
1188 continue;
1189
1190 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1191 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1192 funcs->atomic_commit(connector, new_conn_state);
1193 }
1194 }
1195 }
1196
1197 /**
1198 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1199 * @dev: DRM device
1200 * @old_state: atomic state object with old state structures
1201 *
1202 * This function enables all the outputs with the new configuration which had to
1203 * be turned off for the update.
1204 *
1205 * For compatibility with legacy crtc helpers this should be called after
1206 * drm_atomic_helper_commit_planes(), which is what the default commit function
1207 * does. But drivers with different needs can group the modeset commits together
1208 * and do the plane commits at the end. This is useful for drivers doing runtime
1209 * PM since planes updates then only happen when the CRTC is actually enabled.
1210 */
1211 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1212 struct drm_atomic_state *old_state)
1213 {
1214 struct drm_crtc *crtc;
1215 struct drm_crtc_state *old_crtc_state;
1216 struct drm_crtc_state *new_crtc_state;
1217 struct drm_connector *connector;
1218 struct drm_connector_state *new_conn_state;
1219 int i;
1220
1221 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1222 const struct drm_crtc_helper_funcs *funcs;
1223
1224 /* Need to filter out CRTCs where only planes change. */
1225 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1226 continue;
1227
1228 if (!new_crtc_state->active)
1229 continue;
1230
1231 funcs = crtc->helper_private;
1232
1233 if (new_crtc_state->enable) {
1234 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
1235 crtc->base.id, crtc->name);
1236
1237 if (funcs->atomic_enable)
1238 funcs->atomic_enable(crtc, old_crtc_state);
1239 else
1240 funcs->commit(crtc);
1241 }
1242 }
1243
1244 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1245 const struct drm_encoder_helper_funcs *funcs;
1246 struct drm_encoder *encoder;
1247
1248 if (!new_conn_state->best_encoder)
1249 continue;
1250
1251 if (!new_conn_state->crtc->state->active ||
1252 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1253 continue;
1254
1255 encoder = new_conn_state->best_encoder;
1256 funcs = encoder->helper_private;
1257
1258 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
1259 encoder->base.id, encoder->name);
1260
1261 /*
1262 * Each encoder has at most one connector (since we always steal
1263 * it away), so we won't call enable hooks twice.
1264 */
1265 drm_bridge_pre_enable(encoder->bridge);
1266
1267 if (funcs) {
1268 if (funcs->enable)
1269 funcs->enable(encoder);
1270 else if (funcs->commit)
1271 funcs->commit(encoder);
1272 }
1273
1274 drm_bridge_enable(encoder->bridge);
1275 }
1276
1277 drm_atomic_helper_commit_writebacks(dev, old_state);
1278 }
1279 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1280
1281 /**
1282 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1283 * @dev: DRM device
1284 * @state: atomic state object with old state structures
1285 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1286 * Otherwise @state is the old state.
1287 *
1288 * For implicit sync, driver should fish the exclusive fence out from the
1289 * incoming fb's and stash it in the drm_plane_state. This is called after
1290 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1291 * just uses the atomic state to find the changed planes)
1292 *
1293 * Note that @pre_swap is needed since the point where we block for fences moves
1294 * around depending upon whether an atomic commit is blocking or
1295 * non-blocking. For non-blocking commit all waiting needs to happen after
1296 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1297 * to wait **before** we do anything that can't be easily rolled back. That is
1298 * before we call drm_atomic_helper_swap_state().
1299 *
1300 * Returns zero if success or < 0 if dma_fence_wait() fails.
1301 */
1302 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1303 struct drm_atomic_state *state,
1304 bool pre_swap)
1305 {
1306 struct drm_plane *plane;
1307 struct drm_plane_state *new_plane_state;
1308 int i, ret;
1309
1310 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1311 if (!new_plane_state->fence)
1312 continue;
1313
1314 WARN_ON(!new_plane_state->fb);
1315
1316 /*
1317 * If waiting for fences pre-swap (ie: nonblock), userspace can
1318 * still interrupt the operation. Instead of blocking until the
1319 * timer expires, make the wait interruptible.
1320 */
1321 ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1322 if (ret)
1323 return ret;
1324
1325 dma_fence_put(new_plane_state->fence);
1326 new_plane_state->fence = NULL;
1327 }
1328
1329 return 0;
1330 }
1331 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1332
1333 /**
1334 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
1335 * @dev: DRM device
1336 * @old_state: atomic state object with old state structures
1337 *
1338 * Helper to, after atomic commit, wait for vblanks on all effected
1339 * crtcs (ie. before cleaning up old framebuffers using
1340 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1341 * framebuffers have actually changed to optimize for the legacy cursor and
1342 * plane update use-case.
1343 *
1344 * Drivers using the nonblocking commit tracking support initialized by calling
1345 * drm_atomic_helper_setup_commit() should look at
1346 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1347 */
1348 void
1349 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1350 struct drm_atomic_state *old_state)
1351 {
1352 struct drm_crtc *crtc;
1353 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1354 int i, ret;
1355 unsigned crtc_mask = 0;
1356
1357 /*
1358 * Legacy cursor ioctls are completely unsynced, and userspace
1359 * relies on that (by doing tons of cursor updates).
1360 */
1361 if (old_state->legacy_cursor_update)
1362 return;
1363
1364 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1365 if (!new_crtc_state->active)
1366 continue;
1367
1368 ret = drm_crtc_vblank_get(crtc);
1369 if (ret != 0)
1370 continue;
1371
1372 crtc_mask |= drm_crtc_mask(crtc);
1373 old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1374 }
1375
1376 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1377 if (!(crtc_mask & drm_crtc_mask(crtc)))
1378 continue;
1379
1380 ret = wait_event_timeout(dev->vblank[i].queue,
1381 old_state->crtcs[i].last_vblank_count !=
1382 drm_crtc_vblank_count(crtc),
1383 msecs_to_jiffies(50));
1384
1385 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1386 crtc->base.id, crtc->name);
1387
1388 drm_crtc_vblank_put(crtc);
1389 }
1390 }
1391 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1392
1393 /**
1394 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1395 * @dev: DRM device
1396 * @old_state: atomic state object with old state structures
1397 *
1398 * Helper to, after atomic commit, wait for page flips on all effected
1399 * crtcs (ie. before cleaning up old framebuffers using
1400 * drm_atomic_helper_cleanup_planes()). Compared to
1401 * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
1402 * CRTCs, assuming that cursors-only updates are signalling their completion
1403 * immediately (or using a different path).
1404 *
1405 * This requires that drivers use the nonblocking commit tracking support
1406 * initialized using drm_atomic_helper_setup_commit().
1407 */
1408 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1409 struct drm_atomic_state *old_state)
1410 {
1411 struct drm_crtc *crtc;
1412 int i;
1413
1414 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1415 struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1416 int ret;
1417
1418 crtc = old_state->crtcs[i].ptr;
1419
1420 if (!crtc || !commit)
1421 continue;
1422
1423 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1424 if (ret == 0)
1425 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1426 crtc->base.id, crtc->name);
1427 }
1428 }
1429 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1430
1431 /**
1432 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1433 * @old_state: atomic state object with old state structures
1434 *
1435 * This is the default implementation for the
1436 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1437 * that do not support runtime_pm or do not need the CRTC to be
1438 * enabled to perform a commit. Otherwise, see
1439 * drm_atomic_helper_commit_tail_rpm().
1440 *
1441 * Note that the default ordering of how the various stages are called is to
1442 * match the legacy modeset helper library closest.
1443 */
1444 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1445 {
1446 struct drm_device *dev = old_state->dev;
1447
1448 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1449
1450 drm_atomic_helper_commit_planes(dev, old_state, 0);
1451
1452 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1453
1454 drm_atomic_helper_fake_vblank(old_state);
1455
1456 drm_atomic_helper_commit_hw_done(old_state);
1457
1458 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1459
1460 drm_atomic_helper_cleanup_planes(dev, old_state);
1461 }
1462 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1463
1464 /**
1465 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1466 * @old_state: new modeset state to be committed
1467 *
1468 * This is an alternative implementation for the
1469 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1470 * that support runtime_pm or need the CRTC to be enabled to perform a
1471 * commit. Otherwise, one should use the default implementation
1472 * drm_atomic_helper_commit_tail().
1473 */
1474 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1475 {
1476 struct drm_device *dev = old_state->dev;
1477
1478 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1479
1480 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1481
1482 drm_atomic_helper_commit_planes(dev, old_state,
1483 DRM_PLANE_COMMIT_ACTIVE_ONLY);
1484
1485 drm_atomic_helper_fake_vblank(old_state);
1486
1487 drm_atomic_helper_commit_hw_done(old_state);
1488
1489 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1490
1491 drm_atomic_helper_cleanup_planes(dev, old_state);
1492 }
1493 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1494
1495 static void commit_tail(struct drm_atomic_state *old_state)
1496 {
1497 struct drm_device *dev = old_state->dev;
1498 const struct drm_mode_config_helper_funcs *funcs;
1499
1500 funcs = dev->mode_config.helper_private;
1501
1502 drm_atomic_helper_wait_for_fences(dev, old_state, false);
1503
1504 drm_atomic_helper_wait_for_dependencies(old_state);
1505
1506 if (funcs && funcs->atomic_commit_tail)
1507 funcs->atomic_commit_tail(old_state);
1508 else
1509 drm_atomic_helper_commit_tail(old_state);
1510
1511 drm_atomic_helper_commit_cleanup_done(old_state);
1512
1513 drm_atomic_state_put(old_state);
1514 }
1515
1516 static void commit_work(struct work_struct *work)
1517 {
1518 struct drm_atomic_state *state = container_of(work,
1519 struct drm_atomic_state,
1520 commit_work);
1521 commit_tail(state);
1522 }
1523
1524 /**
1525 * drm_atomic_helper_async_check - check if state can be commited asynchronously
1526 * @dev: DRM device
1527 * @state: the driver state object
1528 *
1529 * This helper will check if it is possible to commit the state asynchronously.
1530 * Async commits are not supposed to swap the states like normal sync commits
1531 * but just do in-place changes on the current state.
1532 *
1533 * It will return 0 if the commit can happen in an asynchronous fashion or error
1534 * if not. Note that error just mean it can't be commited asynchronously, if it
1535 * fails the commit should be treated like a normal synchronous commit.
1536 */
1537 int drm_atomic_helper_async_check(struct drm_device *dev,
1538 struct drm_atomic_state *state)
1539 {
1540 struct drm_crtc *crtc;
1541 struct drm_crtc_state *crtc_state;
1542 struct drm_plane *plane = NULL;
1543 struct drm_plane_state *old_plane_state = NULL;
1544 struct drm_plane_state *new_plane_state = NULL;
1545 const struct drm_plane_helper_funcs *funcs;
1546 int i, n_planes = 0;
1547
1548 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1549 if (drm_atomic_crtc_needs_modeset(crtc_state))
1550 return -EINVAL;
1551 }
1552
1553 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1554 n_planes++;
1555
1556 /* FIXME: we support only single plane updates for now */
1557 if (n_planes != 1)
1558 return -EINVAL;
1559
1560 if (!new_plane_state->crtc ||
1561 old_plane_state->crtc != new_plane_state->crtc)
1562 return -EINVAL;
1563
1564 funcs = plane->helper_private;
1565 if (!funcs->atomic_async_update)
1566 return -EINVAL;
1567
1568 if (new_plane_state->fence)
1569 return -EINVAL;
1570
1571 /*
1572 * Don't do an async update if there is an outstanding commit modifying
1573 * the plane. This prevents our async update's changes from getting
1574 * overridden by a previous synchronous update's state.
1575 */
1576 if (old_plane_state->commit &&
1577 !try_wait_for_completion(&old_plane_state->commit->hw_done))
1578 return -EBUSY;
1579
1580 return funcs->atomic_async_check(plane, new_plane_state);
1581 }
1582 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1583
1584 /**
1585 * drm_atomic_helper_async_commit - commit state asynchronously
1586 * @dev: DRM device
1587 * @state: the driver state object
1588 *
1589 * This function commits a state asynchronously, i.e., not vblank
1590 * synchronized. It should be used on a state only when
1591 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1592 * the states like normal sync commits, but just do in-place changes on the
1593 * current state.
1594 */
1595 void drm_atomic_helper_async_commit(struct drm_device *dev,
1596 struct drm_atomic_state *state)
1597 {
1598 struct drm_plane *plane;
1599 struct drm_plane_state *plane_state;
1600 const struct drm_plane_helper_funcs *funcs;
1601 int i;
1602
1603 for_each_new_plane_in_state(state, plane, plane_state, i) {
1604 funcs = plane->helper_private;
1605 funcs->atomic_async_update(plane, plane_state);
1606
1607 /*
1608 * ->atomic_async_update() is supposed to update the
1609 * plane->state in-place, make sure at least common
1610 * properties have been properly updated.
1611 */
1612 WARN_ON_ONCE(plane->state->fb != plane_state->fb);
1613 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1614 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1615 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1616 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1617 }
1618 }
1619 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1620
1621 /**
1622 * drm_atomic_helper_commit - commit validated state object
1623 * @dev: DRM device
1624 * @state: the driver state object
1625 * @nonblock: whether nonblocking behavior is requested.
1626 *
1627 * This function commits a with drm_atomic_helper_check() pre-validated state
1628 * object. This can still fail when e.g. the framebuffer reservation fails. This
1629 * function implements nonblocking commits, using
1630 * drm_atomic_helper_setup_commit() and related functions.
1631 *
1632 * Committing the actual hardware state is done through the
1633 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
1634 * implementation drm_atomic_helper_commit_tail().
1635 *
1636 * RETURNS:
1637 * Zero for success or -errno.
1638 */
1639 int drm_atomic_helper_commit(struct drm_device *dev,
1640 struct drm_atomic_state *state,
1641 bool nonblock)
1642 {
1643 int ret;
1644
1645 if (state->async_update) {
1646 ret = drm_atomic_helper_prepare_planes(dev, state);
1647 if (ret)
1648 return ret;
1649
1650 drm_atomic_helper_async_commit(dev, state);
1651 drm_atomic_helper_cleanup_planes(dev, state);
1652
1653 return 0;
1654 }
1655
1656 ret = drm_atomic_helper_setup_commit(state, nonblock);
1657 if (ret)
1658 return ret;
1659
1660 INIT_WORK(&state->commit_work, commit_work);
1661
1662 ret = drm_atomic_helper_prepare_planes(dev, state);
1663 if (ret)
1664 return ret;
1665
1666 if (!nonblock) {
1667 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1668 if (ret)
1669 goto err;
1670 }
1671
1672 /*
1673 * This is the point of no return - everything below never fails except
1674 * when the hw goes bonghits. Which means we can commit the new state on
1675 * the software side now.
1676 */
1677
1678 ret = drm_atomic_helper_swap_state(state, true);
1679 if (ret)
1680 goto err;
1681
1682 /*
1683 * Everything below can be run asynchronously without the need to grab
1684 * any modeset locks at all under one condition: It must be guaranteed
1685 * that the asynchronous work has either been cancelled (if the driver
1686 * supports it, which at least requires that the framebuffers get
1687 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1688 * before the new state gets committed on the software side with
1689 * drm_atomic_helper_swap_state().
1690 *
1691 * This scheme allows new atomic state updates to be prepared and
1692 * checked in parallel to the asynchronous completion of the previous
1693 * update. Which is important since compositors need to figure out the
1694 * composition of the next frame right after having submitted the
1695 * current layout.
1696 *
1697 * NOTE: Commit work has multiple phases, first hardware commit, then
1698 * cleanup. We want them to overlap, hence need system_unbound_wq to
1699 * make sure work items don't artifically stall on each another.
1700 */
1701
1702 drm_atomic_state_get(state);
1703 if (nonblock)
1704 queue_work(system_unbound_wq, &state->commit_work);
1705 else
1706 commit_tail(state);
1707
1708 return 0;
1709
1710 err:
1711 drm_atomic_helper_cleanup_planes(dev, state);
1712 return ret;
1713 }
1714 EXPORT_SYMBOL(drm_atomic_helper_commit);
1715
1716 /**
1717 * DOC: implementing nonblocking commit
1718 *
1719 * Nonblocking atomic commits have to be implemented in the following sequence:
1720 *
1721 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
1722 * which commit needs to call which can fail, so we want to run it first and
1723 * synchronously.
1724 *
1725 * 2. Synchronize with any outstanding nonblocking commit worker threads which
1726 * might be affected the new state update. This can be done by either cancelling
1727 * or flushing the work items, depending upon whether the driver can deal with
1728 * cancelled updates. Note that it is important to ensure that the framebuffer
1729 * cleanup is still done when cancelling.
1730 *
1731 * Asynchronous workers need to have sufficient parallelism to be able to run
1732 * different atomic commits on different CRTCs in parallel. The simplest way to
1733 * achive this is by running them on the &system_unbound_wq work queue. Note
1734 * that drivers are not required to split up atomic commits and run an
1735 * individual commit in parallel - userspace is supposed to do that if it cares.
1736 * But it might be beneficial to do that for modesets, since those necessarily
1737 * must be done as one global operation, and enabling or disabling a CRTC can
1738 * take a long time. But even that is not required.
1739 *
1740 * 3. The software state is updated synchronously with
1741 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1742 * locks means concurrent callers never see inconsistent state. And doing this
1743 * while it's guaranteed that no relevant nonblocking worker runs means that
1744 * nonblocking workers do not need grab any locks. Actually they must not grab
1745 * locks, for otherwise the work flushing will deadlock.
1746 *
1747 * 4. Schedule a work item to do all subsequent steps, using the split-out
1748 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1749 * then cleaning up the framebuffers after the old framebuffer is no longer
1750 * being displayed.
1751 *
1752 * The above scheme is implemented in the atomic helper libraries in
1753 * drm_atomic_helper_commit() using a bunch of helper functions. See
1754 * drm_atomic_helper_setup_commit() for a starting point.
1755 */
1756
1757 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1758 {
1759 struct drm_crtc_commit *commit, *stall_commit = NULL;
1760 bool completed = true;
1761 int i;
1762 long ret = 0;
1763
1764 spin_lock(&crtc->commit_lock);
1765 i = 0;
1766 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1767 if (i == 0) {
1768 completed = try_wait_for_completion(&commit->flip_done);
1769 /* Userspace is not allowed to get ahead of the previous
1770 * commit with nonblocking ones. */
1771 if (!completed && nonblock) {
1772 spin_unlock(&crtc->commit_lock);
1773 return -EBUSY;
1774 }
1775 } else if (i == 1) {
1776 stall_commit = drm_crtc_commit_get(commit);
1777 break;
1778 }
1779
1780 i++;
1781 }
1782 spin_unlock(&crtc->commit_lock);
1783
1784 if (!stall_commit)
1785 return 0;
1786
1787 /* We don't want to let commits get ahead of cleanup work too much,
1788 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1789 */
1790 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1791 10*HZ);
1792 if (ret == 0)
1793 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1794 crtc->base.id, crtc->name);
1795
1796 drm_crtc_commit_put(stall_commit);
1797
1798 return ret < 0 ? ret : 0;
1799 }
1800
1801 static void release_crtc_commit(struct completion *completion)
1802 {
1803 struct drm_crtc_commit *commit = container_of(completion,
1804 typeof(*commit),
1805 flip_done);
1806
1807 drm_crtc_commit_put(commit);
1808 }
1809
1810 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
1811 {
1812 init_completion(&commit->flip_done);
1813 init_completion(&commit->hw_done);
1814 init_completion(&commit->cleanup_done);
1815 INIT_LIST_HEAD(&commit->commit_entry);
1816 kref_init(&commit->ref);
1817 commit->crtc = crtc;
1818 }
1819
1820 static struct drm_crtc_commit *
1821 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
1822 {
1823 if (crtc) {
1824 struct drm_crtc_state *new_crtc_state;
1825
1826 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1827
1828 return new_crtc_state->commit;
1829 }
1830
1831 if (!state->fake_commit) {
1832 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
1833 if (!state->fake_commit)
1834 return NULL;
1835
1836 init_commit(state->fake_commit, NULL);
1837 }
1838
1839 return state->fake_commit;
1840 }
1841
1842 /**
1843 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1844 * @state: new modeset state to be committed
1845 * @nonblock: whether nonblocking behavior is requested.
1846 *
1847 * This function prepares @state to be used by the atomic helper's support for
1848 * nonblocking commits. Drivers using the nonblocking commit infrastructure
1849 * should always call this function from their
1850 * &drm_mode_config_funcs.atomic_commit hook.
1851 *
1852 * To be able to use this support drivers need to use a few more helper
1853 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1854 * actually committing the hardware state, and for nonblocking commits this call
1855 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1856 * and it's stall parameter, for when a driver's commit hooks look at the
1857 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
1858 *
1859 * Completion of the hardware commit step must be signalled using
1860 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1861 * to read or change any permanent software or hardware modeset state. The only
1862 * exception is state protected by other means than &drm_modeset_lock locks.
1863 * Only the free standing @state with pointers to the old state structures can
1864 * be inspected, e.g. to clean up old buffers using
1865 * drm_atomic_helper_cleanup_planes().
1866 *
1867 * At the very end, before cleaning up @state drivers must call
1868 * drm_atomic_helper_commit_cleanup_done().
1869 *
1870 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1871 * complete and easy-to-use default implementation of the atomic_commit() hook.
1872 *
1873 * The tracking of asynchronously executed and still pending commits is done
1874 * using the core structure &drm_crtc_commit.
1875 *
1876 * By default there's no need to clean up resources allocated by this function
1877 * explicitly: drm_atomic_state_default_clear() will take care of that
1878 * automatically.
1879 *
1880 * Returns:
1881 *
1882 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1883 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1884 */
1885 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1886 bool nonblock)
1887 {
1888 struct drm_crtc *crtc;
1889 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1890 struct drm_connector *conn;
1891 struct drm_connector_state *old_conn_state, *new_conn_state;
1892 struct drm_plane *plane;
1893 struct drm_plane_state *old_plane_state, *new_plane_state;
1894 struct drm_crtc_commit *commit;
1895 int i, ret;
1896
1897 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1898 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1899 if (!commit)
1900 return -ENOMEM;
1901
1902 init_commit(commit, crtc);
1903
1904 new_crtc_state->commit = commit;
1905
1906 ret = stall_checks(crtc, nonblock);
1907 if (ret)
1908 return ret;
1909
1910 /* Drivers only send out events when at least either current or
1911 * new CRTC state is active. Complete right away if everything
1912 * stays off. */
1913 if (!old_crtc_state->active && !new_crtc_state->active) {
1914 complete_all(&commit->flip_done);
1915 continue;
1916 }
1917
1918 /* Legacy cursor updates are fully unsynced. */
1919 if (state->legacy_cursor_update) {
1920 complete_all(&commit->flip_done);
1921 continue;
1922 }
1923
1924 if (!new_crtc_state->event) {
1925 commit->event = kzalloc(sizeof(*commit->event),
1926 GFP_KERNEL);
1927 if (!commit->event)
1928 return -ENOMEM;
1929
1930 new_crtc_state->event = commit->event;
1931 }
1932
1933 new_crtc_state->event->base.completion = &commit->flip_done;
1934 new_crtc_state->event->base.completion_release = release_crtc_commit;
1935 drm_crtc_commit_get(commit);
1936
1937 commit->abort_completion = true;
1938
1939 state->crtcs[i].commit = commit;
1940 drm_crtc_commit_get(commit);
1941 }
1942
1943 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
1944 /* Userspace is not allowed to get ahead of the previous
1945 * commit with nonblocking ones. */
1946 if (nonblock && old_conn_state->commit &&
1947 !try_wait_for_completion(&old_conn_state->commit->flip_done))
1948 return -EBUSY;
1949
1950 /* Always track connectors explicitly for e.g. link retraining. */
1951 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
1952 if (!commit)
1953 return -ENOMEM;
1954
1955 new_conn_state->commit = drm_crtc_commit_get(commit);
1956 }
1957
1958 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1959 /* Userspace is not allowed to get ahead of the previous
1960 * commit with nonblocking ones. */
1961 if (nonblock && old_plane_state->commit &&
1962 !try_wait_for_completion(&old_plane_state->commit->flip_done))
1963 return -EBUSY;
1964
1965 /* Always track planes explicitly for async pageflip support. */
1966 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
1967 if (!commit)
1968 return -ENOMEM;
1969
1970 new_plane_state->commit = drm_crtc_commit_get(commit);
1971 }
1972
1973 return 0;
1974 }
1975 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
1976
1977 /**
1978 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
1979 * @old_state: atomic state object with old state structures
1980 *
1981 * This function waits for all preceeding commits that touch the same CRTC as
1982 * @old_state to both be committed to the hardware (as signalled by
1983 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
1984 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
1985 *
1986 * This is part of the atomic helper support for nonblocking commits, see
1987 * drm_atomic_helper_setup_commit() for an overview.
1988 */
1989 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
1990 {
1991 struct drm_crtc *crtc;
1992 struct drm_crtc_state *old_crtc_state;
1993 struct drm_plane *plane;
1994 struct drm_plane_state *old_plane_state;
1995 struct drm_connector *conn;
1996 struct drm_connector_state *old_conn_state;
1997 struct drm_crtc_commit *commit;
1998 int i;
1999 long ret;
2000
2001 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2002 commit = old_crtc_state->commit;
2003
2004 if (!commit)
2005 continue;
2006
2007 ret = wait_for_completion_timeout(&commit->hw_done,
2008 10*HZ);
2009 if (ret == 0)
2010 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
2011 crtc->base.id, crtc->name);
2012
2013 /* Currently no support for overwriting flips, hence
2014 * stall for previous one to execute completely. */
2015 ret = wait_for_completion_timeout(&commit->flip_done,
2016 10*HZ);
2017 if (ret == 0)
2018 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
2019 crtc->base.id, crtc->name);
2020 }
2021
2022 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2023 commit = old_conn_state->commit;
2024
2025 if (!commit)
2026 continue;
2027
2028 ret = wait_for_completion_timeout(&commit->hw_done,
2029 10*HZ);
2030 if (ret == 0)
2031 DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
2032 conn->base.id, conn->name);
2033
2034 /* Currently no support for overwriting flips, hence
2035 * stall for previous one to execute completely. */
2036 ret = wait_for_completion_timeout(&commit->flip_done,
2037 10*HZ);
2038 if (ret == 0)
2039 DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
2040 conn->base.id, conn->name);
2041 }
2042
2043 for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2044 commit = old_plane_state->commit;
2045
2046 if (!commit)
2047 continue;
2048
2049 ret = wait_for_completion_timeout(&commit->hw_done,
2050 10*HZ);
2051 if (ret == 0)
2052 DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
2053 plane->base.id, plane->name);
2054
2055 /* Currently no support for overwriting flips, hence
2056 * stall for previous one to execute completely. */
2057 ret = wait_for_completion_timeout(&commit->flip_done,
2058 10*HZ);
2059 if (ret == 0)
2060 DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
2061 plane->base.id, plane->name);
2062 }
2063 }
2064 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2065
2066 /**
2067 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2068 * @old_state: atomic state object with old state structures
2069 *
2070 * This function walks all CRTCs and fake VBLANK events on those with
2071 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2072 * The primary use of this function is writeback connectors working in oneshot
2073 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2074 * when a job is queued, and any change to the pipeline that does not touch the
2075 * connector is leading to timeouts when calling
2076 * drm_atomic_helper_wait_for_vblanks() or
2077 * drm_atomic_helper_wait_for_flip_done().
2078 *
2079 * This is part of the atomic helper support for nonblocking commits, see
2080 * drm_atomic_helper_setup_commit() for an overview.
2081 */
2082 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2083 {
2084 struct drm_crtc_state *new_crtc_state;
2085 struct drm_crtc *crtc;
2086 int i;
2087
2088 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2089 unsigned long flags;
2090
2091 if (!new_crtc_state->no_vblank)
2092 continue;
2093
2094 spin_lock_irqsave(&old_state->dev->event_lock, flags);
2095 if (new_crtc_state->event) {
2096 drm_crtc_send_vblank_event(crtc,
2097 new_crtc_state->event);
2098 new_crtc_state->event = NULL;
2099 }
2100 spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2101 }
2102 }
2103 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2104
2105 /**
2106 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2107 * @old_state: atomic state object with old state structures
2108 *
2109 * This function is used to signal completion of the hardware commit step. After
2110 * this step the driver is not allowed to read or change any permanent software
2111 * or hardware modeset state. The only exception is state protected by other
2112 * means than &drm_modeset_lock locks.
2113 *
2114 * Drivers should try to postpone any expensive or delayed cleanup work after
2115 * this function is called.
2116 *
2117 * This is part of the atomic helper support for nonblocking commits, see
2118 * drm_atomic_helper_setup_commit() for an overview.
2119 */
2120 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2121 {
2122 struct drm_crtc *crtc;
2123 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2124 struct drm_crtc_commit *commit;
2125 int i;
2126
2127 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2128 commit = new_crtc_state->commit;
2129 if (!commit)
2130 continue;
2131
2132 /*
2133 * copy new_crtc_state->commit to old_crtc_state->commit,
2134 * it's unsafe to touch new_crtc_state after hw_done,
2135 * but we still need to do so in cleanup_done().
2136 */
2137 if (old_crtc_state->commit)
2138 drm_crtc_commit_put(old_crtc_state->commit);
2139
2140 old_crtc_state->commit = drm_crtc_commit_get(commit);
2141
2142 /* backend must have consumed any event by now */
2143 WARN_ON(new_crtc_state->event);
2144 complete_all(&commit->hw_done);
2145 }
2146
2147 if (old_state->fake_commit) {
2148 complete_all(&old_state->fake_commit->hw_done);
2149 complete_all(&old_state->fake_commit->flip_done);
2150 }
2151 }
2152 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2153
2154 /**
2155 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2156 * @old_state: atomic state object with old state structures
2157 *
2158 * This signals completion of the atomic update @old_state, including any
2159 * cleanup work. If used, it must be called right before calling
2160 * drm_atomic_state_put().
2161 *
2162 * This is part of the atomic helper support for nonblocking commits, see
2163 * drm_atomic_helper_setup_commit() for an overview.
2164 */
2165 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2166 {
2167 struct drm_crtc *crtc;
2168 struct drm_crtc_state *old_crtc_state;
2169 struct drm_crtc_commit *commit;
2170 int i;
2171
2172 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2173 commit = old_crtc_state->commit;
2174 if (WARN_ON(!commit))
2175 continue;
2176
2177 complete_all(&commit->cleanup_done);
2178 WARN_ON(!try_wait_for_completion(&commit->hw_done));
2179
2180 spin_lock(&crtc->commit_lock);
2181 list_del(&commit->commit_entry);
2182 spin_unlock(&crtc->commit_lock);
2183 }
2184
2185 if (old_state->fake_commit)
2186 complete_all(&old_state->fake_commit->cleanup_done);
2187 }
2188 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2189
2190 /**
2191 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2192 * @dev: DRM device
2193 * @state: atomic state object with new state structures
2194 *
2195 * This function prepares plane state, specifically framebuffers, for the new
2196 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2197 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2198 * any already successfully prepared framebuffer.
2199 *
2200 * Returns:
2201 * 0 on success, negative error code on failure.
2202 */
2203 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2204 struct drm_atomic_state *state)
2205 {
2206 struct drm_plane *plane;
2207 struct drm_plane_state *new_plane_state;
2208 int ret, i, j;
2209
2210 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2211 const struct drm_plane_helper_funcs *funcs;
2212
2213 funcs = plane->helper_private;
2214
2215 if (funcs->prepare_fb) {
2216 ret = funcs->prepare_fb(plane, new_plane_state);
2217 if (ret)
2218 goto fail;
2219 }
2220 }
2221
2222 return 0;
2223
2224 fail:
2225 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2226 const struct drm_plane_helper_funcs *funcs;
2227
2228 if (j >= i)
2229 continue;
2230
2231 funcs = plane->helper_private;
2232
2233 if (funcs->cleanup_fb)
2234 funcs->cleanup_fb(plane, new_plane_state);
2235 }
2236
2237 return ret;
2238 }
2239 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2240
2241 static bool plane_crtc_active(const struct drm_plane_state *state)
2242 {
2243 return state->crtc && state->crtc->state->active;
2244 }
2245
2246 /**
2247 * drm_atomic_helper_commit_planes - commit plane state
2248 * @dev: DRM device
2249 * @old_state: atomic state object with old state structures
2250 * @flags: flags for committing plane state
2251 *
2252 * This function commits the new plane state using the plane and atomic helper
2253 * functions for planes and crtcs. It assumes that the atomic state has already
2254 * been pushed into the relevant object state pointers, since this step can no
2255 * longer fail.
2256 *
2257 * It still requires the global state object @old_state to know which planes and
2258 * crtcs need to be updated though.
2259 *
2260 * Note that this function does all plane updates across all CRTCs in one step.
2261 * If the hardware can't support this approach look at
2262 * drm_atomic_helper_commit_planes_on_crtc() instead.
2263 *
2264 * Plane parameters can be updated by applications while the associated CRTC is
2265 * disabled. The DRM/KMS core will store the parameters in the plane state,
2266 * which will be available to the driver when the CRTC is turned on. As a result
2267 * most drivers don't need to be immediately notified of plane updates for a
2268 * disabled CRTC.
2269 *
2270 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2271 * @flags in order not to receive plane update notifications related to a
2272 * disabled CRTC. This avoids the need to manually ignore plane updates in
2273 * driver code when the driver and/or hardware can't or just don't need to deal
2274 * with updates on disabled CRTCs, for example when supporting runtime PM.
2275 *
2276 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2277 * display controllers require to disable a CRTC's planes when the CRTC is
2278 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2279 * call for a plane if the CRTC of the old plane state needs a modesetting
2280 * operation. Of course, the drivers need to disable the planes in their CRTC
2281 * disable callbacks since no one else would do that.
2282 *
2283 * The drm_atomic_helper_commit() default implementation doesn't set the
2284 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2285 * This should not be copied blindly by drivers.
2286 */
2287 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2288 struct drm_atomic_state *old_state,
2289 uint32_t flags)
2290 {
2291 struct drm_crtc *crtc;
2292 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2293 struct drm_plane *plane;
2294 struct drm_plane_state *old_plane_state, *new_plane_state;
2295 int i;
2296 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2297 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2298
2299 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2300 const struct drm_crtc_helper_funcs *funcs;
2301
2302 funcs = crtc->helper_private;
2303
2304 if (!funcs || !funcs->atomic_begin)
2305 continue;
2306
2307 if (active_only && !new_crtc_state->active)
2308 continue;
2309
2310 funcs->atomic_begin(crtc, old_crtc_state);
2311 }
2312
2313 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2314 const struct drm_plane_helper_funcs *funcs;
2315 bool disabling;
2316
2317 funcs = plane->helper_private;
2318
2319 if (!funcs)
2320 continue;
2321
2322 disabling = drm_atomic_plane_disabling(old_plane_state,
2323 new_plane_state);
2324
2325 if (active_only) {
2326 /*
2327 * Skip planes related to inactive CRTCs. If the plane
2328 * is enabled use the state of the current CRTC. If the
2329 * plane is being disabled use the state of the old
2330 * CRTC to avoid skipping planes being disabled on an
2331 * active CRTC.
2332 */
2333 if (!disabling && !plane_crtc_active(new_plane_state))
2334 continue;
2335 if (disabling && !plane_crtc_active(old_plane_state))
2336 continue;
2337 }
2338
2339 /*
2340 * Special-case disabling the plane if drivers support it.
2341 */
2342 if (disabling && funcs->atomic_disable) {
2343 struct drm_crtc_state *crtc_state;
2344
2345 crtc_state = old_plane_state->crtc->state;
2346
2347 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2348 no_disable)
2349 continue;
2350
2351 funcs->atomic_disable(plane, old_plane_state);
2352 } else if (new_plane_state->crtc || disabling) {
2353 funcs->atomic_update(plane, old_plane_state);
2354 }
2355 }
2356
2357 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2358 const struct drm_crtc_helper_funcs *funcs;
2359
2360 funcs = crtc->helper_private;
2361
2362 if (!funcs || !funcs->atomic_flush)
2363 continue;
2364
2365 if (active_only && !new_crtc_state->active)
2366 continue;
2367
2368 funcs->atomic_flush(crtc, old_crtc_state);
2369 }
2370 }
2371 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2372
2373 /**
2374 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
2375 * @old_crtc_state: atomic state object with the old crtc state
2376 *
2377 * This function commits the new plane state using the plane and atomic helper
2378 * functions for planes on the specific crtc. It assumes that the atomic state
2379 * has already been pushed into the relevant object state pointers, since this
2380 * step can no longer fail.
2381 *
2382 * This function is useful when plane updates should be done crtc-by-crtc
2383 * instead of one global step like drm_atomic_helper_commit_planes() does.
2384 *
2385 * This function can only be savely used when planes are not allowed to move
2386 * between different CRTCs because this function doesn't handle inter-CRTC
2387 * depencies. Callers need to ensure that either no such depencies exist,
2388 * resolve them through ordering of commit calls or through some other means.
2389 */
2390 void
2391 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2392 {
2393 const struct drm_crtc_helper_funcs *crtc_funcs;
2394 struct drm_crtc *crtc = old_crtc_state->crtc;
2395 struct drm_atomic_state *old_state = old_crtc_state->state;
2396 struct drm_crtc_state *new_crtc_state =
2397 drm_atomic_get_new_crtc_state(old_state, crtc);
2398 struct drm_plane *plane;
2399 unsigned plane_mask;
2400
2401 plane_mask = old_crtc_state->plane_mask;
2402 plane_mask |= new_crtc_state->plane_mask;
2403
2404 crtc_funcs = crtc->helper_private;
2405 if (crtc_funcs && crtc_funcs->atomic_begin)
2406 crtc_funcs->atomic_begin(crtc, old_crtc_state);
2407
2408 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2409 struct drm_plane_state *old_plane_state =
2410 drm_atomic_get_old_plane_state(old_state, plane);
2411 struct drm_plane_state *new_plane_state =
2412 drm_atomic_get_new_plane_state(old_state, plane);
2413 const struct drm_plane_helper_funcs *plane_funcs;
2414
2415 plane_funcs = plane->helper_private;
2416
2417 if (!old_plane_state || !plane_funcs)
2418 continue;
2419
2420 WARN_ON(new_plane_state->crtc &&
2421 new_plane_state->crtc != crtc);
2422
2423 if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2424 plane_funcs->atomic_disable)
2425 plane_funcs->atomic_disable(plane, old_plane_state);
2426 else if (new_plane_state->crtc ||
2427 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2428 plane_funcs->atomic_update(plane, old_plane_state);
2429 }
2430
2431 if (crtc_funcs && crtc_funcs->atomic_flush)
2432 crtc_funcs->atomic_flush(crtc, old_crtc_state);
2433 }
2434 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2435
2436 /**
2437 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2438 * @old_crtc_state: atomic state object with the old CRTC state
2439 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2440 *
2441 * Disables all planes associated with the given CRTC. This can be
2442 * used for instance in the CRTC helper atomic_disable callback to disable
2443 * all planes.
2444 *
2445 * If the atomic-parameter is set the function calls the CRTC's
2446 * atomic_begin hook before and atomic_flush hook after disabling the
2447 * planes.
2448 *
2449 * It is a bug to call this function without having implemented the
2450 * &drm_plane_helper_funcs.atomic_disable plane hook.
2451 */
2452 void
2453 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2454 bool atomic)
2455 {
2456 struct drm_crtc *crtc = old_crtc_state->crtc;
2457 const struct drm_crtc_helper_funcs *crtc_funcs =
2458 crtc->helper_private;
2459 struct drm_plane *plane;
2460
2461 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2462 crtc_funcs->atomic_begin(crtc, NULL);
2463
2464 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2465 const struct drm_plane_helper_funcs *plane_funcs =
2466 plane->helper_private;
2467
2468 if (!plane_funcs)
2469 continue;
2470
2471 WARN_ON(!plane_funcs->atomic_disable);
2472 if (plane_funcs->atomic_disable)
2473 plane_funcs->atomic_disable(plane, NULL);
2474 }
2475
2476 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2477 crtc_funcs->atomic_flush(crtc, NULL);
2478 }
2479 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2480
2481 /**
2482 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2483 * @dev: DRM device
2484 * @old_state: atomic state object with old state structures
2485 *
2486 * This function cleans up plane state, specifically framebuffers, from the old
2487 * configuration. Hence the old configuration must be perserved in @old_state to
2488 * be able to call this function.
2489 *
2490 * This function must also be called on the new state when the atomic update
2491 * fails at any point after calling drm_atomic_helper_prepare_planes().
2492 */
2493 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2494 struct drm_atomic_state *old_state)
2495 {
2496 struct drm_plane *plane;
2497 struct drm_plane_state *old_plane_state, *new_plane_state;
2498 int i;
2499
2500 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2501 const struct drm_plane_helper_funcs *funcs;
2502 struct drm_plane_state *plane_state;
2503
2504 /*
2505 * This might be called before swapping when commit is aborted,
2506 * in which case we have to cleanup the new state.
2507 */
2508 if (old_plane_state == plane->state)
2509 plane_state = new_plane_state;
2510 else
2511 plane_state = old_plane_state;
2512
2513 funcs = plane->helper_private;
2514
2515 if (funcs->cleanup_fb)
2516 funcs->cleanup_fb(plane, plane_state);
2517 }
2518 }
2519 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2520
2521 /**
2522 * drm_atomic_helper_swap_state - store atomic state into current sw state
2523 * @state: atomic state
2524 * @stall: stall for preceeding commits
2525 *
2526 * This function stores the atomic state into the current state pointers in all
2527 * driver objects. It should be called after all failing steps have been done
2528 * and succeeded, but before the actual hardware state is committed.
2529 *
2530 * For cleanup and error recovery the current state for all changed objects will
2531 * be swapped into @state.
2532 *
2533 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2534 *
2535 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2536 *
2537 * 2. Do any other steps that might fail.
2538 *
2539 * 3. Put the staged state into the current state pointers with this function.
2540 *
2541 * 4. Actually commit the hardware state.
2542 *
2543 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2544 * contains the old state. Also do any other cleanup required with that state.
2545 *
2546 * @stall must be set when nonblocking commits for this driver directly access
2547 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2548 * the current atomic helpers this is almost always the case, since the helpers
2549 * don't pass the right state structures to the callbacks.
2550 *
2551 * Returns:
2552 *
2553 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2554 * waiting for the previous commits has been interrupted.
2555 */
2556 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2557 bool stall)
2558 {
2559 int i, ret;
2560 struct drm_connector *connector;
2561 struct drm_connector_state *old_conn_state, *new_conn_state;
2562 struct drm_crtc *crtc;
2563 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2564 struct drm_plane *plane;
2565 struct drm_plane_state *old_plane_state, *new_plane_state;
2566 struct drm_crtc_commit *commit;
2567 struct drm_private_obj *obj;
2568 struct drm_private_state *old_obj_state, *new_obj_state;
2569
2570 if (stall) {
2571 /*
2572 * We have to stall for hw_done here before
2573 * drm_atomic_helper_wait_for_dependencies() because flip
2574 * depth > 1 is not yet supported by all drivers. As long as
2575 * obj->state is directly dereferenced anywhere in the drivers
2576 * atomic_commit_tail function, then it's unsafe to swap state
2577 * before drm_atomic_helper_commit_hw_done() is called.
2578 */
2579
2580 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2581 commit = old_crtc_state->commit;
2582
2583 if (!commit)
2584 continue;
2585
2586 ret = wait_for_completion_interruptible(&commit->hw_done);
2587 if (ret)
2588 return ret;
2589 }
2590
2591 for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2592 commit = old_conn_state->commit;
2593
2594 if (!commit)
2595 continue;
2596
2597 ret = wait_for_completion_interruptible(&commit->hw_done);
2598 if (ret)
2599 return ret;
2600 }
2601
2602 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2603 commit = old_plane_state->commit;
2604
2605 if (!commit)
2606 continue;
2607
2608 ret = wait_for_completion_interruptible(&commit->hw_done);
2609 if (ret)
2610 return ret;
2611 }
2612 }
2613
2614 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2615 WARN_ON(connector->state != old_conn_state);
2616
2617 old_conn_state->state = state;
2618 new_conn_state->state = NULL;
2619
2620 state->connectors[i].state = old_conn_state;
2621 connector->state = new_conn_state;
2622 }
2623
2624 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2625 WARN_ON(crtc->state != old_crtc_state);
2626
2627 old_crtc_state->state = state;
2628 new_crtc_state->state = NULL;
2629
2630 state->crtcs[i].state = old_crtc_state;
2631 crtc->state = new_crtc_state;
2632
2633 if (new_crtc_state->commit) {
2634 spin_lock(&crtc->commit_lock);
2635 list_add(&new_crtc_state->commit->commit_entry,
2636 &crtc->commit_list);
2637 spin_unlock(&crtc->commit_lock);
2638
2639 new_crtc_state->commit->event = NULL;
2640 }
2641 }
2642
2643 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2644 WARN_ON(plane->state != old_plane_state);
2645
2646 old_plane_state->state = state;
2647 new_plane_state->state = NULL;
2648
2649 state->planes[i].state = old_plane_state;
2650 plane->state = new_plane_state;
2651 }
2652
2653 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2654 WARN_ON(obj->state != old_obj_state);
2655
2656 old_obj_state->state = state;
2657 new_obj_state->state = NULL;
2658
2659 state->private_objs[i].state = old_obj_state;
2660 obj->state = new_obj_state;
2661 }
2662
2663 return 0;
2664 }
2665 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2666
2667 /**
2668 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2669 * @plane: plane object to update
2670 * @crtc: owning CRTC of owning plane
2671 * @fb: framebuffer to flip onto plane
2672 * @crtc_x: x offset of primary plane on crtc
2673 * @crtc_y: y offset of primary plane on crtc
2674 * @crtc_w: width of primary plane rectangle on crtc
2675 * @crtc_h: height of primary plane rectangle on crtc
2676 * @src_x: x offset of @fb for panning
2677 * @src_y: y offset of @fb for panning
2678 * @src_w: width of source rectangle in @fb
2679 * @src_h: height of source rectangle in @fb
2680 * @ctx: lock acquire context
2681 *
2682 * Provides a default plane update handler using the atomic driver interface.
2683 *
2684 * RETURNS:
2685 * Zero on success, error code on failure
2686 */
2687 int drm_atomic_helper_update_plane(struct drm_plane *plane,
2688 struct drm_crtc *crtc,
2689 struct drm_framebuffer *fb,
2690 int crtc_x, int crtc_y,
2691 unsigned int crtc_w, unsigned int crtc_h,
2692 uint32_t src_x, uint32_t src_y,
2693 uint32_t src_w, uint32_t src_h,
2694 struct drm_modeset_acquire_ctx *ctx)
2695 {
2696 struct drm_atomic_state *state;
2697 struct drm_plane_state *plane_state;
2698 int ret = 0;
2699
2700 state = drm_atomic_state_alloc(plane->dev);
2701 if (!state)
2702 return -ENOMEM;
2703
2704 state->acquire_ctx = ctx;
2705 plane_state = drm_atomic_get_plane_state(state, plane);
2706 if (IS_ERR(plane_state)) {
2707 ret = PTR_ERR(plane_state);
2708 goto fail;
2709 }
2710
2711 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2712 if (ret != 0)
2713 goto fail;
2714 drm_atomic_set_fb_for_plane(plane_state, fb);
2715 plane_state->crtc_x = crtc_x;
2716 plane_state->crtc_y = crtc_y;
2717 plane_state->crtc_w = crtc_w;
2718 plane_state->crtc_h = crtc_h;
2719 plane_state->src_x = src_x;
2720 plane_state->src_y = src_y;
2721 plane_state->src_w = src_w;
2722 plane_state->src_h = src_h;
2723
2724 if (plane == crtc->cursor)
2725 state->legacy_cursor_update = true;
2726
2727 ret = drm_atomic_commit(state);
2728 fail:
2729 drm_atomic_state_put(state);
2730 return ret;
2731 }
2732 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2733
2734 /**
2735 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2736 * @plane: plane to disable
2737 * @ctx: lock acquire context
2738 *
2739 * Provides a default plane disable handler using the atomic driver interface.
2740 *
2741 * RETURNS:
2742 * Zero on success, error code on failure
2743 */
2744 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2745 struct drm_modeset_acquire_ctx *ctx)
2746 {
2747 struct drm_atomic_state *state;
2748 struct drm_plane_state *plane_state;
2749 int ret = 0;
2750
2751 state = drm_atomic_state_alloc(plane->dev);
2752 if (!state)
2753 return -ENOMEM;
2754
2755 state->acquire_ctx = ctx;
2756 plane_state = drm_atomic_get_plane_state(state, plane);
2757 if (IS_ERR(plane_state)) {
2758 ret = PTR_ERR(plane_state);
2759 goto fail;
2760 }
2761
2762 if (plane_state->crtc && plane_state->crtc->cursor == plane)
2763 plane_state->state->legacy_cursor_update = true;
2764
2765 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2766 if (ret != 0)
2767 goto fail;
2768
2769 ret = drm_atomic_commit(state);
2770 fail:
2771 drm_atomic_state_put(state);
2772 return ret;
2773 }
2774 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2775
2776 /* just used from fb-helper and atomic-helper: */
2777 int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
2778 struct drm_plane_state *plane_state)
2779 {
2780 int ret;
2781
2782 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
2783 if (ret != 0)
2784 return ret;
2785
2786 drm_atomic_set_fb_for_plane(plane_state, NULL);
2787 plane_state->crtc_x = 0;
2788 plane_state->crtc_y = 0;
2789 plane_state->crtc_w = 0;
2790 plane_state->crtc_h = 0;
2791 plane_state->src_x = 0;
2792 plane_state->src_y = 0;
2793 plane_state->src_w = 0;
2794 plane_state->src_h = 0;
2795
2796 return 0;
2797 }
2798
2799 static int update_output_state(struct drm_atomic_state *state,
2800 struct drm_mode_set *set)
2801 {
2802 struct drm_device *dev = set->crtc->dev;
2803 struct drm_crtc *crtc;
2804 struct drm_crtc_state *new_crtc_state;
2805 struct drm_connector *connector;
2806 struct drm_connector_state *new_conn_state;
2807 int ret, i;
2808
2809 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2810 state->acquire_ctx);
2811 if (ret)
2812 return ret;
2813
2814 /* First disable all connectors on the target crtc. */
2815 ret = drm_atomic_add_affected_connectors(state, set->crtc);
2816 if (ret)
2817 return ret;
2818
2819 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2820 if (new_conn_state->crtc == set->crtc) {
2821 ret = drm_atomic_set_crtc_for_connector(new_conn_state,
2822 NULL);
2823 if (ret)
2824 return ret;
2825
2826 /* Make sure legacy setCrtc always re-trains */
2827 new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
2828 }
2829 }
2830
2831 /* Then set all connectors from set->connectors on the target crtc */
2832 for (i = 0; i < set->num_connectors; i++) {
2833 new_conn_state = drm_atomic_get_connector_state(state,
2834 set->connectors[i]);
2835 if (IS_ERR(new_conn_state))
2836 return PTR_ERR(new_conn_state);
2837
2838 ret = drm_atomic_set_crtc_for_connector(new_conn_state,
2839 set->crtc);
2840 if (ret)
2841 return ret;
2842 }
2843
2844 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2845 /* Don't update ->enable for the CRTC in the set_config request,
2846 * since a mismatch would indicate a bug in the upper layers.
2847 * The actual modeset code later on will catch any
2848 * inconsistencies here. */
2849 if (crtc == set->crtc)
2850 continue;
2851
2852 if (!new_crtc_state->connector_mask) {
2853 ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
2854 NULL);
2855 if (ret < 0)
2856 return ret;
2857
2858 new_crtc_state->active = false;
2859 }
2860 }
2861
2862 return 0;
2863 }
2864
2865 /**
2866 * drm_atomic_helper_set_config - set a new config from userspace
2867 * @set: mode set configuration
2868 * @ctx: lock acquisition context
2869 *
2870 * Provides a default crtc set_config handler using the atomic driver interface.
2871 *
2872 * NOTE: For backwards compatibility with old userspace this automatically
2873 * resets the "link-status" property to GOOD, to force any link
2874 * re-training. The SETCRTC ioctl does not define whether an update does
2875 * need a full modeset or just a plane update, hence we're allowed to do
2876 * that. See also drm_connector_set_link_status_property().
2877 *
2878 * Returns:
2879 * Returns 0 on success, negative errno numbers on failure.
2880 */
2881 int drm_atomic_helper_set_config(struct drm_mode_set *set,
2882 struct drm_modeset_acquire_ctx *ctx)
2883 {
2884 struct drm_atomic_state *state;
2885 struct drm_crtc *crtc = set->crtc;
2886 int ret = 0;
2887
2888 state = drm_atomic_state_alloc(crtc->dev);
2889 if (!state)
2890 return -ENOMEM;
2891
2892 state->acquire_ctx = ctx;
2893 ret = __drm_atomic_helper_set_config(set, state);
2894 if (ret != 0)
2895 goto fail;
2896
2897 ret = handle_conflicting_encoders(state, true);
2898 if (ret)
2899 return ret;
2900
2901 ret = drm_atomic_commit(state);
2902
2903 fail:
2904 drm_atomic_state_put(state);
2905 return ret;
2906 }
2907 EXPORT_SYMBOL(drm_atomic_helper_set_config);
2908
2909 /* just used from fb-helper and atomic-helper: */
2910 int __drm_atomic_helper_set_config(struct drm_mode_set *set,
2911 struct drm_atomic_state *state)
2912 {
2913 struct drm_crtc_state *crtc_state;
2914 struct drm_plane_state *primary_state;
2915 struct drm_crtc *crtc = set->crtc;
2916 int hdisplay, vdisplay;
2917 int ret;
2918
2919 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2920 if (IS_ERR(crtc_state))
2921 return PTR_ERR(crtc_state);
2922
2923 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
2924 if (IS_ERR(primary_state))
2925 return PTR_ERR(primary_state);
2926
2927 if (!set->mode) {
2928 WARN_ON(set->fb);
2929 WARN_ON(set->num_connectors);
2930
2931 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
2932 if (ret != 0)
2933 return ret;
2934
2935 crtc_state->active = false;
2936
2937 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
2938 if (ret != 0)
2939 return ret;
2940
2941 drm_atomic_set_fb_for_plane(primary_state, NULL);
2942
2943 goto commit;
2944 }
2945
2946 WARN_ON(!set->fb);
2947 WARN_ON(!set->num_connectors);
2948
2949 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
2950 if (ret != 0)
2951 return ret;
2952
2953 crtc_state->active = true;
2954
2955 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
2956 if (ret != 0)
2957 return ret;
2958
2959 drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
2960
2961 drm_atomic_set_fb_for_plane(primary_state, set->fb);
2962 primary_state->crtc_x = 0;
2963 primary_state->crtc_y = 0;
2964 primary_state->crtc_w = hdisplay;
2965 primary_state->crtc_h = vdisplay;
2966 primary_state->src_x = set->x << 16;
2967 primary_state->src_y = set->y << 16;
2968 if (drm_rotation_90_or_270(primary_state->rotation)) {
2969 primary_state->src_w = vdisplay << 16;
2970 primary_state->src_h = hdisplay << 16;
2971 } else {
2972 primary_state->src_w = hdisplay << 16;
2973 primary_state->src_h = vdisplay << 16;
2974 }
2975
2976 commit:
2977 ret = update_output_state(state, set);
2978 if (ret)
2979 return ret;
2980
2981 return 0;
2982 }
2983
2984 static int __drm_atomic_helper_disable_all(struct drm_device *dev,
2985 struct drm_modeset_acquire_ctx *ctx,
2986 bool clean_old_fbs)
2987 {
2988 struct drm_atomic_state *state;
2989 struct drm_connector_state *conn_state;
2990 struct drm_connector *conn;
2991 struct drm_plane_state *plane_state;
2992 struct drm_plane *plane;
2993 struct drm_crtc_state *crtc_state;
2994 struct drm_crtc *crtc;
2995 int ret, i;
2996
2997 state = drm_atomic_state_alloc(dev);
2998 if (!state)
2999 return -ENOMEM;
3000
3001 state->acquire_ctx = ctx;
3002
3003 drm_for_each_crtc(crtc, dev) {
3004 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3005 if (IS_ERR(crtc_state)) {
3006 ret = PTR_ERR(crtc_state);
3007 goto free;
3008 }
3009
3010 crtc_state->active = false;
3011
3012 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3013 if (ret < 0)
3014 goto free;
3015
3016 ret = drm_atomic_add_affected_planes(state, crtc);
3017 if (ret < 0)
3018 goto free;
3019
3020 ret = drm_atomic_add_affected_connectors(state, crtc);
3021 if (ret < 0)
3022 goto free;
3023 }
3024
3025 for_each_new_connector_in_state(state, conn, conn_state, i) {
3026 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3027 if (ret < 0)
3028 goto free;
3029 }
3030
3031 for_each_new_plane_in_state(state, plane, plane_state, i) {
3032 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3033 if (ret < 0)
3034 goto free;
3035
3036 drm_atomic_set_fb_for_plane(plane_state, NULL);
3037 }
3038
3039 ret = drm_atomic_commit(state);
3040 free:
3041 drm_atomic_state_put(state);
3042 return ret;
3043 }
3044
3045 /**
3046 * drm_atomic_helper_disable_all - disable all currently active outputs
3047 * @dev: DRM device
3048 * @ctx: lock acquisition context
3049 *
3050 * Loops through all connectors, finding those that aren't turned off and then
3051 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3052 * that they are connected to.
3053 *
3054 * This is used for example in suspend/resume to disable all currently active
3055 * functions when suspending. If you just want to shut down everything at e.g.
3056 * driver unload, look at drm_atomic_helper_shutdown().
3057 *
3058 * Note that if callers haven't already acquired all modeset locks this might
3059 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3060 *
3061 * Returns:
3062 * 0 on success or a negative error code on failure.
3063 *
3064 * See also:
3065 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3066 * drm_atomic_helper_shutdown().
3067 */
3068 int drm_atomic_helper_disable_all(struct drm_device *dev,
3069 struct drm_modeset_acquire_ctx *ctx)
3070 {
3071 return __drm_atomic_helper_disable_all(dev, ctx, false);
3072 }
3073 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3074
3075 /**
3076 * drm_atomic_helper_shutdown - shutdown all CRTC
3077 * @dev: DRM device
3078 *
3079 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3080 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3081 * that also takes a snapshot of the modeset state to be restored on resume.
3082 *
3083 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3084 * and it is the atomic version of drm_crtc_force_disable_all().
3085 */
3086 void drm_atomic_helper_shutdown(struct drm_device *dev)
3087 {
3088 struct drm_modeset_acquire_ctx ctx;
3089 int ret;
3090
3091 drm_modeset_acquire_init(&ctx, 0);
3092 while (1) {
3093 ret = drm_modeset_lock_all_ctx(dev, &ctx);
3094 if (!ret)
3095 ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
3096
3097 if (ret != -EDEADLK)
3098 break;
3099
3100 drm_modeset_backoff(&ctx);
3101 }
3102
3103 if (ret)
3104 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3105
3106 drm_modeset_drop_locks(&ctx);
3107 drm_modeset_acquire_fini(&ctx);
3108 }
3109 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3110
3111 /**
3112 * drm_atomic_helper_suspend - subsystem-level suspend helper
3113 * @dev: DRM device
3114 *
3115 * Duplicates the current atomic state, disables all active outputs and then
3116 * returns a pointer to the original atomic state to the caller. Drivers can
3117 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3118 * restore the output configuration that was active at the time the system
3119 * entered suspend.
3120 *
3121 * Note that it is potentially unsafe to use this. The atomic state object
3122 * returned by this function is assumed to be persistent. Drivers must ensure
3123 * that this holds true. Before calling this function, drivers must make sure
3124 * to suspend fbdev emulation so that nothing can be using the device.
3125 *
3126 * Returns:
3127 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3128 * encoded error code on failure. Drivers should store the returned atomic
3129 * state object and pass it to the drm_atomic_helper_resume() helper upon
3130 * resume.
3131 *
3132 * See also:
3133 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3134 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3135 */
3136 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3137 {
3138 struct drm_modeset_acquire_ctx ctx;
3139 struct drm_atomic_state *state;
3140 int err;
3141
3142 drm_modeset_acquire_init(&ctx, 0);
3143
3144 retry:
3145 err = drm_modeset_lock_all_ctx(dev, &ctx);
3146 if (err < 0) {
3147 state = ERR_PTR(err);
3148 goto unlock;
3149 }
3150
3151 state = drm_atomic_helper_duplicate_state(dev, &ctx);
3152 if (IS_ERR(state))
3153 goto unlock;
3154
3155 err = drm_atomic_helper_disable_all(dev, &ctx);
3156 if (err < 0) {
3157 drm_atomic_state_put(state);
3158 state = ERR_PTR(err);
3159 goto unlock;
3160 }
3161
3162 unlock:
3163 if (PTR_ERR(state) == -EDEADLK) {
3164 drm_modeset_backoff(&ctx);
3165 goto retry;
3166 }
3167
3168 drm_modeset_drop_locks(&ctx);
3169 drm_modeset_acquire_fini(&ctx);
3170 return state;
3171 }
3172 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3173
3174 /**
3175 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3176 * @state: duplicated atomic state to commit
3177 * @ctx: pointer to acquire_ctx to use for commit.
3178 *
3179 * The state returned by drm_atomic_helper_duplicate_state() and
3180 * drm_atomic_helper_suspend() is partially invalid, and needs to
3181 * be fixed up before commit.
3182 *
3183 * Returns:
3184 * 0 on success or a negative error code on failure.
3185 *
3186 * See also:
3187 * drm_atomic_helper_suspend()
3188 */
3189 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3190 struct drm_modeset_acquire_ctx *ctx)
3191 {
3192 int i;
3193 struct drm_plane *plane;
3194 struct drm_plane_state *new_plane_state;
3195 struct drm_connector *connector;
3196 struct drm_connector_state *new_conn_state;
3197 struct drm_crtc *crtc;
3198 struct drm_crtc_state *new_crtc_state;
3199
3200 state->acquire_ctx = ctx;
3201
3202 for_each_new_plane_in_state(state, plane, new_plane_state, i)
3203 state->planes[i].old_state = plane->state;
3204
3205 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3206 state->crtcs[i].old_state = crtc->state;
3207
3208 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3209 state->connectors[i].old_state = connector->state;
3210
3211 return drm_atomic_commit(state);
3212 }
3213 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3214
3215 /**
3216 * drm_atomic_helper_resume - subsystem-level resume helper
3217 * @dev: DRM device
3218 * @state: atomic state to resume to
3219 *
3220 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3221 * grabs all modeset locks and commits the atomic state object. This can be
3222 * used in conjunction with the drm_atomic_helper_suspend() helper to
3223 * implement suspend/resume for drivers that support atomic mode-setting.
3224 *
3225 * Returns:
3226 * 0 on success or a negative error code on failure.
3227 *
3228 * See also:
3229 * drm_atomic_helper_suspend()
3230 */
3231 int drm_atomic_helper_resume(struct drm_device *dev,
3232 struct drm_atomic_state *state)
3233 {
3234 struct drm_modeset_acquire_ctx ctx;
3235 int err;
3236
3237 drm_mode_config_reset(dev);
3238
3239 drm_modeset_acquire_init(&ctx, 0);
3240 while (1) {
3241 err = drm_modeset_lock_all_ctx(dev, &ctx);
3242 if (err)
3243 goto out;
3244
3245 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3246 out:
3247 if (err != -EDEADLK)
3248 break;
3249
3250 drm_modeset_backoff(&ctx);
3251 }
3252
3253 drm_atomic_state_put(state);
3254 drm_modeset_drop_locks(&ctx);
3255 drm_modeset_acquire_fini(&ctx);
3256
3257 return err;
3258 }
3259 EXPORT_SYMBOL(drm_atomic_helper_resume);
3260
3261 static int page_flip_common(struct drm_atomic_state *state,
3262 struct drm_crtc *crtc,
3263 struct drm_framebuffer *fb,
3264 struct drm_pending_vblank_event *event,
3265 uint32_t flags)
3266 {
3267 struct drm_plane *plane = crtc->primary;
3268 struct drm_plane_state *plane_state;
3269 struct drm_crtc_state *crtc_state;
3270 int ret = 0;
3271
3272 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3273 if (IS_ERR(crtc_state))
3274 return PTR_ERR(crtc_state);
3275
3276 crtc_state->event = event;
3277 crtc_state->pageflip_flags = flags;
3278
3279 plane_state = drm_atomic_get_plane_state(state, plane);
3280 if (IS_ERR(plane_state))
3281 return PTR_ERR(plane_state);
3282
3283 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3284 if (ret != 0)
3285 return ret;
3286 drm_atomic_set_fb_for_plane(plane_state, fb);
3287
3288 /* Make sure we don't accidentally do a full modeset. */
3289 state->allow_modeset = false;
3290 if (!crtc_state->active) {
3291 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3292 crtc->base.id, crtc->name);
3293 return -EINVAL;
3294 }
3295
3296 return ret;
3297 }
3298
3299 /**
3300 * drm_atomic_helper_page_flip - execute a legacy page flip
3301 * @crtc: DRM crtc
3302 * @fb: DRM framebuffer
3303 * @event: optional DRM event to signal upon completion
3304 * @flags: flip flags for non-vblank sync'ed updates
3305 * @ctx: lock acquisition context
3306 *
3307 * Provides a default &drm_crtc_funcs.page_flip implementation
3308 * using the atomic driver interface.
3309 *
3310 * Returns:
3311 * Returns 0 on success, negative errno numbers on failure.
3312 *
3313 * See also:
3314 * drm_atomic_helper_page_flip_target()
3315 */
3316 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3317 struct drm_framebuffer *fb,
3318 struct drm_pending_vblank_event *event,
3319 uint32_t flags,
3320 struct drm_modeset_acquire_ctx *ctx)
3321 {
3322 struct drm_plane *plane = crtc->primary;
3323 struct drm_atomic_state *state;
3324 int ret = 0;
3325
3326 state = drm_atomic_state_alloc(plane->dev);
3327 if (!state)
3328 return -ENOMEM;
3329
3330 state->acquire_ctx = ctx;
3331
3332 ret = page_flip_common(state, crtc, fb, event, flags);
3333 if (ret != 0)
3334 goto fail;
3335
3336 ret = drm_atomic_nonblocking_commit(state);
3337 fail:
3338 drm_atomic_state_put(state);
3339 return ret;
3340 }
3341 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3342
3343 /**
3344 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3345 * @crtc: DRM crtc
3346 * @fb: DRM framebuffer
3347 * @event: optional DRM event to signal upon completion
3348 * @flags: flip flags for non-vblank sync'ed updates
3349 * @target: specifying the target vblank period when the flip to take effect
3350 * @ctx: lock acquisition context
3351 *
3352 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3353 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3354 * target vblank period to flip.
3355 *
3356 * Returns:
3357 * Returns 0 on success, negative errno numbers on failure.
3358 */
3359 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3360 struct drm_framebuffer *fb,
3361 struct drm_pending_vblank_event *event,
3362 uint32_t flags,
3363 uint32_t target,
3364 struct drm_modeset_acquire_ctx *ctx)
3365 {
3366 struct drm_plane *plane = crtc->primary;
3367 struct drm_atomic_state *state;
3368 struct drm_crtc_state *crtc_state;
3369 int ret = 0;
3370
3371 state = drm_atomic_state_alloc(plane->dev);
3372 if (!state)
3373 return -ENOMEM;
3374
3375 state->acquire_ctx = ctx;
3376
3377 ret = page_flip_common(state, crtc, fb, event, flags);
3378 if (ret != 0)
3379 goto fail;
3380
3381 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3382 if (WARN_ON(!crtc_state)) {
3383 ret = -EINVAL;
3384 goto fail;
3385 }
3386 crtc_state->target_vblank = target;
3387
3388 ret = drm_atomic_nonblocking_commit(state);
3389 fail:
3390 drm_atomic_state_put(state);
3391 return ret;
3392 }
3393 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3394
3395 /**
3396 * drm_atomic_helper_best_encoder - Helper for
3397 * &drm_connector_helper_funcs.best_encoder callback
3398 * @connector: Connector control structure
3399 *
3400 * This is a &drm_connector_helper_funcs.best_encoder callback helper for
3401 * connectors that support exactly 1 encoder, statically determined at driver
3402 * init time.
3403 */
3404 struct drm_encoder *
3405 drm_atomic_helper_best_encoder(struct drm_connector *connector)
3406 {
3407 WARN_ON(connector->encoder_ids[1]);
3408 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3409 }
3410 EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
3411
3412 /**
3413 * DOC: atomic state reset and initialization
3414 *
3415 * Both the drm core and the atomic helpers assume that there is always the full
3416 * and correct atomic software state for all connectors, CRTCs and planes
3417 * available. Which is a bit a problem on driver load and also after system
3418 * suspend. One way to solve this is to have a hardware state read-out
3419 * infrastructure which reconstructs the full software state (e.g. the i915
3420 * driver).
3421 *
3422 * The simpler solution is to just reset the software state to everything off,
3423 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
3424 * the atomic helpers provide default reset implementations for all hooks.
3425 *
3426 * On the upside the precise state tracking of atomic simplifies system suspend
3427 * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
3428 * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
3429 * For other drivers the building blocks are split out, see the documentation
3430 * for these functions.
3431 */
3432
3433 /**
3434 * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
3435 * @crtc: drm CRTC
3436 *
3437 * Resets the atomic state for @crtc by freeing the state pointer (which might
3438 * be NULL, e.g. at driver load time) and allocating a new empty state object.
3439 */
3440 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
3441 {
3442 if (crtc->state)
3443 __drm_atomic_helper_crtc_destroy_state(crtc->state);
3444
3445 kfree(crtc->state);
3446 crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
3447
3448 if (crtc->state)
3449 crtc->state->crtc = crtc;
3450 }
3451 EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
3452
3453 /**
3454 * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
3455 * @crtc: CRTC object
3456 * @state: atomic CRTC state
3457 *
3458 * Copies atomic state from a CRTC's current state and resets inferred values.
3459 * This is useful for drivers that subclass the CRTC state.
3460 */
3461 void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
3462 struct drm_crtc_state *state)
3463 {
3464 memcpy(state, crtc->state, sizeof(*state));
3465
3466 if (state->mode_blob)
3467 drm_property_blob_get(state->mode_blob);
3468 if (state->degamma_lut)
3469 drm_property_blob_get(state->degamma_lut);
3470 if (state->ctm)
3471 drm_property_blob_get(state->ctm);
3472 if (state->gamma_lut)
3473 drm_property_blob_get(state->gamma_lut);
3474 state->mode_changed = false;
3475 state->active_changed = false;
3476 state->planes_changed = false;
3477 state->connectors_changed = false;
3478 state->color_mgmt_changed = false;
3479 state->zpos_changed = false;
3480 state->commit = NULL;
3481 state->event = NULL;
3482 state->pageflip_flags = 0;
3483 }
3484 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
3485
3486 /**
3487 * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
3488 * @crtc: drm CRTC
3489 *
3490 * Default CRTC state duplicate hook for drivers which don't have their own
3491 * subclassed CRTC state structure.
3492 */
3493 struct drm_crtc_state *
3494 drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
3495 {
3496 struct drm_crtc_state *state;
3497
3498 if (WARN_ON(!crtc->state))
3499 return NULL;
3500
3501 state = kmalloc(sizeof(*state), GFP_KERNEL);
3502 if (state)
3503 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
3504
3505 return state;
3506 }
3507 EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
3508
3509 /**
3510 * __drm_atomic_helper_crtc_destroy_state - release CRTC state
3511 * @state: CRTC state object to release
3512 *
3513 * Releases all resources stored in the CRTC state without actually freeing
3514 * the memory of the CRTC state. This is useful for drivers that subclass the
3515 * CRTC state.
3516 */
3517 void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
3518 {
3519 if (state->commit) {
3520 /*
3521 * In the event that a non-blocking commit returns
3522 * -ERESTARTSYS before the commit_tail work is queued, we will
3523 * have an extra reference to the commit object. Release it, if
3524 * the event has not been consumed by the worker.
3525 *
3526 * state->event may be freed, so we can't directly look at
3527 * state->event->base.completion.
3528 */
3529 if (state->event && state->commit->abort_completion)
3530 drm_crtc_commit_put(state->commit);
3531
3532 kfree(state->commit->event);
3533 state->commit->event = NULL;
3534
3535 drm_crtc_commit_put(state->commit);
3536 }
3537
3538 drm_property_blob_put(state->mode_blob);
3539 drm_property_blob_put(state->degamma_lut);
3540 drm_property_blob_put(state->ctm);
3541 drm_property_blob_put(state->gamma_lut);
3542 }
3543 EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
3544
3545 /**
3546 * drm_atomic_helper_crtc_destroy_state - default state destroy hook
3547 * @crtc: drm CRTC
3548 * @state: CRTC state object to release
3549 *
3550 * Default CRTC state destroy hook for drivers which don't have their own
3551 * subclassed CRTC state structure.
3552 */
3553 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
3554 struct drm_crtc_state *state)
3555 {
3556 __drm_atomic_helper_crtc_destroy_state(state);
3557 kfree(state);
3558 }
3559 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
3560
3561 /**
3562 * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
3563 * @plane: drm plane
3564 *
3565 * Resets the atomic state for @plane by freeing the state pointer (which might
3566 * be NULL, e.g. at driver load time) and allocating a new empty state object.
3567 */
3568 void drm_atomic_helper_plane_reset(struct drm_plane *plane)
3569 {
3570 if (plane->state)
3571 __drm_atomic_helper_plane_destroy_state(plane->state);
3572
3573 kfree(plane->state);
3574 plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
3575
3576 if (plane->state) {
3577 plane->state->plane = plane;
3578 plane->state->rotation = DRM_MODE_ROTATE_0;
3579
3580 /* Reset the alpha value to fully opaque if it matters */
3581 if (plane->alpha_property)
3582 plane->state->alpha = plane->alpha_property->values[1];
3583 }
3584 }
3585 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
3586
3587 /**
3588 * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
3589 * @plane: plane object
3590 * @state: atomic plane state
3591 *
3592 * Copies atomic state from a plane's current state. This is useful for
3593 * drivers that subclass the plane state.
3594 */
3595 void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
3596 struct drm_plane_state *state)
3597 {
3598 memcpy(state, plane->state, sizeof(*state));
3599
3600 if (state->fb)
3601 drm_framebuffer_get(state->fb);
3602
3603 state->fence = NULL;
3604 state->commit = NULL;
3605 }
3606 EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
3607
3608 /**
3609 * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
3610 * @plane: drm plane
3611 *
3612 * Default plane state duplicate hook for drivers which don't have their own
3613 * subclassed plane state structure.
3614 */
3615 struct drm_plane_state *
3616 drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
3617 {
3618 struct drm_plane_state *state;
3619
3620 if (WARN_ON(!plane->state))
3621 return NULL;
3622
3623 state = kmalloc(sizeof(*state), GFP_KERNEL);
3624 if (state)
3625 __drm_atomic_helper_plane_duplicate_state(plane, state);
3626
3627 return state;
3628 }
3629 EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
3630
3631 /**
3632 * __drm_atomic_helper_plane_destroy_state - release plane state
3633 * @state: plane state object to release
3634 *
3635 * Releases all resources stored in the plane state without actually freeing
3636 * the memory of the plane state. This is useful for drivers that subclass the
3637 * plane state.
3638 */
3639 void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
3640 {
3641 if (state->fb)
3642 drm_framebuffer_put(state->fb);
3643
3644 if (state->fence)
3645 dma_fence_put(state->fence);
3646
3647 if (state->commit)
3648 drm_crtc_commit_put(state->commit);
3649 }
3650 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
3651
3652 /**
3653 * drm_atomic_helper_plane_destroy_state - default state destroy hook
3654 * @plane: drm plane
3655 * @state: plane state object to release
3656 *
3657 * Default plane state destroy hook for drivers which don't have their own
3658 * subclassed plane state structure.
3659 */
3660 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
3661 struct drm_plane_state *state)
3662 {
3663 __drm_atomic_helper_plane_destroy_state(state);
3664 kfree(state);
3665 }
3666 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
3667
3668 /**
3669 * __drm_atomic_helper_connector_reset - reset state on connector
3670 * @connector: drm connector
3671 * @conn_state: connector state to assign
3672 *
3673 * Initializes the newly allocated @conn_state and assigns it to
3674 * the &drm_conector->state pointer of @connector, usually required when
3675 * initializing the drivers or when called from the &drm_connector_funcs.reset
3676 * hook.
3677 *
3678 * This is useful for drivers that subclass the connector state.
3679 */
3680 void
3681 __drm_atomic_helper_connector_reset(struct drm_connector *connector,
3682 struct drm_connector_state *conn_state)
3683 {
3684 if (conn_state)
3685 conn_state->connector = connector;
3686
3687 connector->state = conn_state;
3688 }
3689 EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
3690
3691 /**
3692 * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
3693 * @connector: drm connector
3694 *
3695 * Resets the atomic state for @connector by freeing the state pointer (which
3696 * might be NULL, e.g. at driver load time) and allocating a new empty state
3697 * object.
3698 */
3699 void drm_atomic_helper_connector_reset(struct drm_connector *connector)
3700 {
3701 struct drm_connector_state *conn_state =
3702 kzalloc(sizeof(*conn_state), GFP_KERNEL);
3703
3704 if (connector->state)
3705 __drm_atomic_helper_connector_destroy_state(connector->state);
3706
3707 kfree(connector->state);
3708 __drm_atomic_helper_connector_reset(connector, conn_state);
3709 }
3710 EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
3711
3712 /**
3713 * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
3714 * @connector: connector object
3715 * @state: atomic connector state
3716 *
3717 * Copies atomic state from a connector's current state. This is useful for
3718 * drivers that subclass the connector state.
3719 */
3720 void
3721 __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
3722 struct drm_connector_state *state)
3723 {
3724 memcpy(state, connector->state, sizeof(*state));
3725 if (state->crtc)
3726 drm_connector_get(connector);
3727 state->commit = NULL;
3728
3729 /* Don't copy over a writeback job, they are used only once */
3730 state->writeback_job = NULL;
3731 }
3732 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
3733
3734 /**
3735 * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
3736 * @connector: drm connector
3737 *
3738 * Default connector state duplicate hook for drivers which don't have their own
3739 * subclassed connector state structure.
3740 */
3741 struct drm_connector_state *
3742 drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
3743 {
3744 struct drm_connector_state *state;
3745
3746 if (WARN_ON(!connector->state))
3747 return NULL;
3748
3749 state = kmalloc(sizeof(*state), GFP_KERNEL);
3750 if (state)
3751 __drm_atomic_helper_connector_duplicate_state(connector, state);
3752
3753 return state;
3754 }
3755 EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
3756
3757 /**
3758 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3759 * @dev: DRM device
3760 * @ctx: lock acquisition context
3761 *
3762 * Makes a copy of the current atomic state by looping over all objects and
3763 * duplicating their respective states. This is used for example by suspend/
3764 * resume support code to save the state prior to suspend such that it can
3765 * be restored upon resume.
3766 *
3767 * Note that this treats atomic state as persistent between save and restore.
3768 * Drivers must make sure that this is possible and won't result in confusion
3769 * or erroneous behaviour.
3770 *
3771 * Note that if callers haven't already acquired all modeset locks this might
3772 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3773 *
3774 * Returns:
3775 * A pointer to the copy of the atomic state object on success or an
3776 * ERR_PTR()-encoded error code on failure.
3777 *
3778 * See also:
3779 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3780 */
3781 struct drm_atomic_state *
3782 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3783 struct drm_modeset_acquire_ctx *ctx)
3784 {
3785 struct drm_atomic_state *state;
3786 struct drm_connector *conn;
3787 struct drm_connector_list_iter conn_iter;
3788 struct drm_plane *plane;
3789 struct drm_crtc *crtc;
3790 int err = 0;
3791
3792 state = drm_atomic_state_alloc(dev);
3793 if (!state)
3794 return ERR_PTR(-ENOMEM);
3795
3796 state->acquire_ctx = ctx;
3797
3798 drm_for_each_crtc(crtc, dev) {
3799 struct drm_crtc_state *crtc_state;
3800
3801 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3802 if (IS_ERR(crtc_state)) {
3803 err = PTR_ERR(crtc_state);
3804 goto free;
3805 }
3806 }
3807
3808 drm_for_each_plane(plane, dev) {
3809 struct drm_plane_state *plane_state;
3810
3811 plane_state = drm_atomic_get_plane_state(state, plane);
3812 if (IS_ERR(plane_state)) {
3813 err = PTR_ERR(plane_state);
3814 goto free;
3815 }
3816 }
3817
3818 drm_connector_list_iter_begin(dev, &conn_iter);
3819 drm_for_each_connector_iter(conn, &conn_iter) {
3820 struct drm_connector_state *conn_state;
3821
3822 conn_state = drm_atomic_get_connector_state(state, conn);
3823 if (IS_ERR(conn_state)) {
3824 err = PTR_ERR(conn_state);
3825 drm_connector_list_iter_end(&conn_iter);
3826 goto free;
3827 }
3828 }
3829 drm_connector_list_iter_end(&conn_iter);
3830
3831 /* clear the acquire context so that it isn't accidentally reused */
3832 state->acquire_ctx = NULL;
3833
3834 free:
3835 if (err < 0) {
3836 drm_atomic_state_put(state);
3837 state = ERR_PTR(err);
3838 }
3839
3840 return state;
3841 }
3842 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3843
3844 /**
3845 * __drm_atomic_helper_connector_destroy_state - release connector state
3846 * @state: connector state object to release
3847 *
3848 * Releases all resources stored in the connector state without actually
3849 * freeing the memory of the connector state. This is useful for drivers that
3850 * subclass the connector state.
3851 */
3852 void
3853 __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
3854 {
3855 if (state->crtc)
3856 drm_connector_put(state->connector);
3857
3858 if (state->commit)
3859 drm_crtc_commit_put(state->commit);
3860 }
3861 EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
3862
3863 /**
3864 * drm_atomic_helper_connector_destroy_state - default state destroy hook
3865 * @connector: drm connector
3866 * @state: connector state object to release
3867 *
3868 * Default connector state destroy hook for drivers which don't have their own
3869 * subclassed connector state structure.
3870 */
3871 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
3872 struct drm_connector_state *state)
3873 {
3874 __drm_atomic_helper_connector_destroy_state(state);
3875 kfree(state);
3876 }
3877 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
3878
3879 /**
3880 * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
3881 * @crtc: CRTC object
3882 * @red: red correction table
3883 * @green: green correction table
3884 * @blue: green correction table
3885 * @size: size of the tables
3886 * @ctx: lock acquire context
3887 *
3888 * Implements support for legacy gamma correction table for drivers
3889 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
3890 * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
3891 * how the atomic color management and gamma tables work.
3892 */
3893 int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3894 u16 *red, u16 *green, u16 *blue,
3895 uint32_t size,
3896 struct drm_modeset_acquire_ctx *ctx)
3897 {
3898 struct drm_device *dev = crtc->dev;
3899 struct drm_atomic_state *state;
3900 struct drm_crtc_state *crtc_state;
3901 struct drm_property_blob *blob = NULL;
3902 struct drm_color_lut *blob_data;
3903 int i, ret = 0;
3904 bool replaced;
3905
3906 state = drm_atomic_state_alloc(crtc->dev);
3907 if (!state)
3908 return -ENOMEM;
3909
3910 blob = drm_property_create_blob(dev,
3911 sizeof(struct drm_color_lut) * size,
3912 NULL);
3913 if (IS_ERR(blob)) {
3914 ret = PTR_ERR(blob);
3915 blob = NULL;
3916 goto fail;
3917 }
3918
3919 /* Prepare GAMMA_LUT with the legacy values. */
3920 blob_data = blob->data;
3921 for (i = 0; i < size; i++) {
3922 blob_data[i].red = red[i];
3923 blob_data[i].green = green[i];
3924 blob_data[i].blue = blue[i];
3925 }
3926
3927 state->acquire_ctx = ctx;
3928 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3929 if (IS_ERR(crtc_state)) {
3930 ret = PTR_ERR(crtc_state);
3931 goto fail;
3932 }
3933
3934 /* Reset DEGAMMA_LUT and CTM properties. */
3935 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
3936 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
3937 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
3938 crtc_state->color_mgmt_changed |= replaced;
3939
3940 ret = drm_atomic_commit(state);
3941
3942 fail:
3943 drm_atomic_state_put(state);
3944 drm_property_blob_put(blob);
3945 return ret;
3946 }
3947 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
3948
3949 /**
3950 * __drm_atomic_helper_private_duplicate_state - copy atomic private state
3951 * @obj: CRTC object
3952 * @state: new private object state
3953 *
3954 * Copies atomic state from a private objects's current state and resets inferred values.
3955 * This is useful for drivers that subclass the private state.
3956 */
3957 void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
3958 struct drm_private_state *state)
3959 {
3960 memcpy(state, obj->state, sizeof(*state));
3961 }
3962 EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);