]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/drm_atomic.c
Merge tag 'nfs-for-4.14-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_atomic.c
1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_mode.h>
32 #include <drm/drm_print.h>
33 #include <linux/sync_file.h>
34
35 #include "drm_crtc_internal.h"
36
37 void __drm_crtc_commit_free(struct kref *kref)
38 {
39 struct drm_crtc_commit *commit =
40 container_of(kref, struct drm_crtc_commit, ref);
41
42 kfree(commit);
43 }
44 EXPORT_SYMBOL(__drm_crtc_commit_free);
45
46 /**
47 * drm_atomic_state_default_release -
48 * release memory initialized by drm_atomic_state_init
49 * @state: atomic state
50 *
51 * Free all the memory allocated by drm_atomic_state_init.
52 * This is useful for drivers that subclass the atomic state.
53 */
54 void drm_atomic_state_default_release(struct drm_atomic_state *state)
55 {
56 kfree(state->connectors);
57 kfree(state->crtcs);
58 kfree(state->planes);
59 kfree(state->private_objs);
60 }
61 EXPORT_SYMBOL(drm_atomic_state_default_release);
62
63 /**
64 * drm_atomic_state_init - init new atomic state
65 * @dev: DRM device
66 * @state: atomic state
67 *
68 * Default implementation for filling in a new atomic state.
69 * This is useful for drivers that subclass the atomic state.
70 */
71 int
72 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
73 {
74 kref_init(&state->ref);
75
76 /* TODO legacy paths should maybe do a better job about
77 * setting this appropriately?
78 */
79 state->allow_modeset = true;
80
81 state->crtcs = kcalloc(dev->mode_config.num_crtc,
82 sizeof(*state->crtcs), GFP_KERNEL);
83 if (!state->crtcs)
84 goto fail;
85 state->planes = kcalloc(dev->mode_config.num_total_plane,
86 sizeof(*state->planes), GFP_KERNEL);
87 if (!state->planes)
88 goto fail;
89
90 state->dev = dev;
91
92 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
93
94 return 0;
95 fail:
96 drm_atomic_state_default_release(state);
97 return -ENOMEM;
98 }
99 EXPORT_SYMBOL(drm_atomic_state_init);
100
101 /**
102 * drm_atomic_state_alloc - allocate atomic state
103 * @dev: DRM device
104 *
105 * This allocates an empty atomic state to track updates.
106 */
107 struct drm_atomic_state *
108 drm_atomic_state_alloc(struct drm_device *dev)
109 {
110 struct drm_mode_config *config = &dev->mode_config;
111
112 if (!config->funcs->atomic_state_alloc) {
113 struct drm_atomic_state *state;
114
115 state = kzalloc(sizeof(*state), GFP_KERNEL);
116 if (!state)
117 return NULL;
118 if (drm_atomic_state_init(dev, state) < 0) {
119 kfree(state);
120 return NULL;
121 }
122 return state;
123 }
124
125 return config->funcs->atomic_state_alloc(dev);
126 }
127 EXPORT_SYMBOL(drm_atomic_state_alloc);
128
129 /**
130 * drm_atomic_state_default_clear - clear base atomic state
131 * @state: atomic state
132 *
133 * Default implementation for clearing atomic state.
134 * This is useful for drivers that subclass the atomic state.
135 */
136 void drm_atomic_state_default_clear(struct drm_atomic_state *state)
137 {
138 struct drm_device *dev = state->dev;
139 struct drm_mode_config *config = &dev->mode_config;
140 int i;
141
142 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
143
144 for (i = 0; i < state->num_connector; i++) {
145 struct drm_connector *connector = state->connectors[i].ptr;
146
147 if (!connector)
148 continue;
149
150 connector->funcs->atomic_destroy_state(connector,
151 state->connectors[i].state);
152 state->connectors[i].ptr = NULL;
153 state->connectors[i].state = NULL;
154 drm_connector_put(connector);
155 }
156
157 for (i = 0; i < config->num_crtc; i++) {
158 struct drm_crtc *crtc = state->crtcs[i].ptr;
159
160 if (!crtc)
161 continue;
162
163 crtc->funcs->atomic_destroy_state(crtc,
164 state->crtcs[i].state);
165
166 if (state->crtcs[i].commit) {
167 kfree(state->crtcs[i].commit->event);
168 state->crtcs[i].commit->event = NULL;
169 drm_crtc_commit_put(state->crtcs[i].commit);
170 }
171
172 state->crtcs[i].commit = NULL;
173 state->crtcs[i].ptr = NULL;
174 state->crtcs[i].state = NULL;
175 }
176
177 for (i = 0; i < config->num_total_plane; i++) {
178 struct drm_plane *plane = state->planes[i].ptr;
179
180 if (!plane)
181 continue;
182
183 plane->funcs->atomic_destroy_state(plane,
184 state->planes[i].state);
185 state->planes[i].ptr = NULL;
186 state->planes[i].state = NULL;
187 }
188
189 for (i = 0; i < state->num_private_objs; i++) {
190 struct drm_private_obj *obj = state->private_objs[i].ptr;
191
192 if (!obj)
193 continue;
194
195 obj->funcs->atomic_destroy_state(obj,
196 state->private_objs[i].state);
197 state->private_objs[i].ptr = NULL;
198 state->private_objs[i].state = NULL;
199 }
200 state->num_private_objs = 0;
201
202 }
203 EXPORT_SYMBOL(drm_atomic_state_default_clear);
204
205 /**
206 * drm_atomic_state_clear - clear state object
207 * @state: atomic state
208 *
209 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
210 * all locks. So someone else could sneak in and change the current modeset
211 * configuration. Which means that all the state assembled in @state is no
212 * longer an atomic update to the current state, but to some arbitrary earlier
213 * state. Which could break assumptions the driver's
214 * &drm_mode_config_funcs.atomic_check likely relies on.
215 *
216 * Hence we must clear all cached state and completely start over, using this
217 * function.
218 */
219 void drm_atomic_state_clear(struct drm_atomic_state *state)
220 {
221 struct drm_device *dev = state->dev;
222 struct drm_mode_config *config = &dev->mode_config;
223
224 if (config->funcs->atomic_state_clear)
225 config->funcs->atomic_state_clear(state);
226 else
227 drm_atomic_state_default_clear(state);
228 }
229 EXPORT_SYMBOL(drm_atomic_state_clear);
230
231 /**
232 * __drm_atomic_state_free - free all memory for an atomic state
233 * @ref: This atomic state to deallocate
234 *
235 * This frees all memory associated with an atomic state, including all the
236 * per-object state for planes, crtcs and connectors.
237 */
238 void __drm_atomic_state_free(struct kref *ref)
239 {
240 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
241 struct drm_mode_config *config = &state->dev->mode_config;
242
243 drm_atomic_state_clear(state);
244
245 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
246
247 if (config->funcs->atomic_state_free) {
248 config->funcs->atomic_state_free(state);
249 } else {
250 drm_atomic_state_default_release(state);
251 kfree(state);
252 }
253 }
254 EXPORT_SYMBOL(__drm_atomic_state_free);
255
256 /**
257 * drm_atomic_get_crtc_state - get crtc state
258 * @state: global atomic state object
259 * @crtc: crtc to get state object for
260 *
261 * This function returns the crtc state for the given crtc, allocating it if
262 * needed. It will also grab the relevant crtc lock to make sure that the state
263 * is consistent.
264 *
265 * Returns:
266 *
267 * Either the allocated state or the error code encoded into the pointer. When
268 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
269 * entire atomic sequence must be restarted. All other errors are fatal.
270 */
271 struct drm_crtc_state *
272 drm_atomic_get_crtc_state(struct drm_atomic_state *state,
273 struct drm_crtc *crtc)
274 {
275 int ret, index = drm_crtc_index(crtc);
276 struct drm_crtc_state *crtc_state;
277
278 WARN_ON(!state->acquire_ctx);
279
280 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
281 if (crtc_state)
282 return crtc_state;
283
284 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
285 if (ret)
286 return ERR_PTR(ret);
287
288 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
289 if (!crtc_state)
290 return ERR_PTR(-ENOMEM);
291
292 state->crtcs[index].state = crtc_state;
293 state->crtcs[index].old_state = crtc->state;
294 state->crtcs[index].new_state = crtc_state;
295 state->crtcs[index].ptr = crtc;
296 crtc_state->state = state;
297
298 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
299 crtc->base.id, crtc->name, crtc_state, state);
300
301 return crtc_state;
302 }
303 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
304
305 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
306 struct drm_crtc *crtc, s32 __user *fence_ptr)
307 {
308 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
309 }
310
311 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
312 struct drm_crtc *crtc)
313 {
314 s32 __user *fence_ptr;
315
316 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
317 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
318
319 return fence_ptr;
320 }
321
322 /**
323 * drm_atomic_set_mode_for_crtc - set mode for CRTC
324 * @state: the CRTC whose incoming state to update
325 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
326 *
327 * Set a mode (originating from the kernel) on the desired CRTC state and update
328 * the enable property.
329 *
330 * RETURNS:
331 * Zero on success, error code on failure. Cannot return -EDEADLK.
332 */
333 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
334 const struct drm_display_mode *mode)
335 {
336 struct drm_mode_modeinfo umode;
337
338 /* Early return for no change. */
339 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
340 return 0;
341
342 drm_property_blob_put(state->mode_blob);
343 state->mode_blob = NULL;
344
345 if (mode) {
346 drm_mode_convert_to_umode(&umode, mode);
347 state->mode_blob =
348 drm_property_create_blob(state->crtc->dev,
349 sizeof(umode),
350 &umode);
351 if (IS_ERR(state->mode_blob))
352 return PTR_ERR(state->mode_blob);
353
354 drm_mode_copy(&state->mode, mode);
355 state->enable = true;
356 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
357 mode->name, state);
358 } else {
359 memset(&state->mode, 0, sizeof(state->mode));
360 state->enable = false;
361 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
362 state);
363 }
364
365 return 0;
366 }
367 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
368
369 /**
370 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
371 * @state: the CRTC whose incoming state to update
372 * @blob: pointer to blob property to use for mode
373 *
374 * Set a mode (originating from a blob property) on the desired CRTC state.
375 * This function will take a reference on the blob property for the CRTC state,
376 * and release the reference held on the state's existing mode property, if any
377 * was set.
378 *
379 * RETURNS:
380 * Zero on success, error code on failure. Cannot return -EDEADLK.
381 */
382 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
383 struct drm_property_blob *blob)
384 {
385 if (blob == state->mode_blob)
386 return 0;
387
388 drm_property_blob_put(state->mode_blob);
389 state->mode_blob = NULL;
390
391 memset(&state->mode, 0, sizeof(state->mode));
392
393 if (blob) {
394 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
395 drm_mode_convert_umode(&state->mode,
396 (const struct drm_mode_modeinfo *)
397 blob->data))
398 return -EINVAL;
399
400 state->mode_blob = drm_property_blob_get(blob);
401 state->enable = true;
402 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
403 state->mode.name, state);
404 } else {
405 state->enable = false;
406 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
407 state);
408 }
409
410 return 0;
411 }
412 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
413
414 static int
415 drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
416 struct drm_property_blob **blob,
417 uint64_t blob_id,
418 ssize_t expected_size,
419 bool *replaced)
420 {
421 struct drm_property_blob *new_blob = NULL;
422
423 if (blob_id != 0) {
424 new_blob = drm_property_lookup_blob(dev, blob_id);
425 if (new_blob == NULL)
426 return -EINVAL;
427
428 if (expected_size > 0 && expected_size != new_blob->length) {
429 drm_property_blob_put(new_blob);
430 return -EINVAL;
431 }
432 }
433
434 *replaced |= drm_property_replace_blob(blob, new_blob);
435 drm_property_blob_put(new_blob);
436
437 return 0;
438 }
439
440 /**
441 * drm_atomic_crtc_set_property - set property on CRTC
442 * @crtc: the drm CRTC to set a property on
443 * @state: the state object to update with the new property value
444 * @property: the property to set
445 * @val: the new property value
446 *
447 * This function handles generic/core properties and calls out to driver's
448 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
449 * consistent behavior you must call this function rather than the driver hook
450 * directly.
451 *
452 * RETURNS:
453 * Zero on success, error code on failure
454 */
455 int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
456 struct drm_crtc_state *state, struct drm_property *property,
457 uint64_t val)
458 {
459 struct drm_device *dev = crtc->dev;
460 struct drm_mode_config *config = &dev->mode_config;
461 bool replaced = false;
462 int ret;
463
464 if (property == config->prop_active)
465 state->active = val;
466 else if (property == config->prop_mode_id) {
467 struct drm_property_blob *mode =
468 drm_property_lookup_blob(dev, val);
469 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
470 drm_property_blob_put(mode);
471 return ret;
472 } else if (property == config->degamma_lut_property) {
473 ret = drm_atomic_replace_property_blob_from_id(dev,
474 &state->degamma_lut,
475 val,
476 -1,
477 &replaced);
478 state->color_mgmt_changed |= replaced;
479 return ret;
480 } else if (property == config->ctm_property) {
481 ret = drm_atomic_replace_property_blob_from_id(dev,
482 &state->ctm,
483 val,
484 sizeof(struct drm_color_ctm),
485 &replaced);
486 state->color_mgmt_changed |= replaced;
487 return ret;
488 } else if (property == config->gamma_lut_property) {
489 ret = drm_atomic_replace_property_blob_from_id(dev,
490 &state->gamma_lut,
491 val,
492 -1,
493 &replaced);
494 state->color_mgmt_changed |= replaced;
495 return ret;
496 } else if (property == config->prop_out_fence_ptr) {
497 s32 __user *fence_ptr = u64_to_user_ptr(val);
498
499 if (!fence_ptr)
500 return 0;
501
502 if (put_user(-1, fence_ptr))
503 return -EFAULT;
504
505 set_out_fence_for_crtc(state->state, crtc, fence_ptr);
506 } else if (crtc->funcs->atomic_set_property)
507 return crtc->funcs->atomic_set_property(crtc, state, property, val);
508 else
509 return -EINVAL;
510
511 return 0;
512 }
513 EXPORT_SYMBOL(drm_atomic_crtc_set_property);
514
515 /**
516 * drm_atomic_crtc_get_property - get property value from CRTC state
517 * @crtc: the drm CRTC to set a property on
518 * @state: the state object to get the property value from
519 * @property: the property to set
520 * @val: return location for the property value
521 *
522 * This function handles generic/core properties and calls out to driver's
523 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
524 * consistent behavior you must call this function rather than the driver hook
525 * directly.
526 *
527 * RETURNS:
528 * Zero on success, error code on failure
529 */
530 static int
531 drm_atomic_crtc_get_property(struct drm_crtc *crtc,
532 const struct drm_crtc_state *state,
533 struct drm_property *property, uint64_t *val)
534 {
535 struct drm_device *dev = crtc->dev;
536 struct drm_mode_config *config = &dev->mode_config;
537
538 if (property == config->prop_active)
539 *val = state->active;
540 else if (property == config->prop_mode_id)
541 *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
542 else if (property == config->degamma_lut_property)
543 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
544 else if (property == config->ctm_property)
545 *val = (state->ctm) ? state->ctm->base.id : 0;
546 else if (property == config->gamma_lut_property)
547 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
548 else if (property == config->prop_out_fence_ptr)
549 *val = 0;
550 else if (crtc->funcs->atomic_get_property)
551 return crtc->funcs->atomic_get_property(crtc, state, property, val);
552 else
553 return -EINVAL;
554
555 return 0;
556 }
557
558 /**
559 * drm_atomic_crtc_check - check crtc state
560 * @crtc: crtc to check
561 * @state: crtc state to check
562 *
563 * Provides core sanity checks for crtc state.
564 *
565 * RETURNS:
566 * Zero on success, error code on failure
567 */
568 static int drm_atomic_crtc_check(struct drm_crtc *crtc,
569 struct drm_crtc_state *state)
570 {
571 /* NOTE: we explicitly don't enforce constraints such as primary
572 * layer covering entire screen, since that is something we want
573 * to allow (on hw that supports it). For hw that does not, it
574 * should be checked in driver's crtc->atomic_check() vfunc.
575 *
576 * TODO: Add generic modeset state checks once we support those.
577 */
578
579 if (state->active && !state->enable) {
580 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
581 crtc->base.id, crtc->name);
582 return -EINVAL;
583 }
584
585 /* The state->enable vs. state->mode_blob checks can be WARN_ON,
586 * as this is a kernel-internal detail that userspace should never
587 * be able to trigger. */
588 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
589 WARN_ON(state->enable && !state->mode_blob)) {
590 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
591 crtc->base.id, crtc->name);
592 return -EINVAL;
593 }
594
595 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
596 WARN_ON(!state->enable && state->mode_blob)) {
597 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
598 crtc->base.id, crtc->name);
599 return -EINVAL;
600 }
601
602 /*
603 * Reject event generation for when a CRTC is off and stays off.
604 * It wouldn't be hard to implement this, but userspace has a track
605 * record of happily burning through 100% cpu (or worse, crash) when the
606 * display pipe is suspended. To avoid all that fun just reject updates
607 * that ask for events since likely that indicates a bug in the
608 * compositor's drawing loop. This is consistent with the vblank IOCTL
609 * and legacy page_flip IOCTL which also reject service on a disabled
610 * pipe.
611 */
612 if (state->event && !state->active && !crtc->state->active) {
613 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
614 crtc->base.id, crtc->name);
615 return -EINVAL;
616 }
617
618 return 0;
619 }
620
621 static void drm_atomic_crtc_print_state(struct drm_printer *p,
622 const struct drm_crtc_state *state)
623 {
624 struct drm_crtc *crtc = state->crtc;
625
626 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
627 drm_printf(p, "\tenable=%d\n", state->enable);
628 drm_printf(p, "\tactive=%d\n", state->active);
629 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
630 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
631 drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
632 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
633 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
634 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
635 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
636 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
637 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
638
639 if (crtc->funcs->atomic_print_state)
640 crtc->funcs->atomic_print_state(p, state);
641 }
642
643 /**
644 * drm_atomic_get_plane_state - get plane state
645 * @state: global atomic state object
646 * @plane: plane to get state object for
647 *
648 * This function returns the plane state for the given plane, allocating it if
649 * needed. It will also grab the relevant plane lock to make sure that the state
650 * is consistent.
651 *
652 * Returns:
653 *
654 * Either the allocated state or the error code encoded into the pointer. When
655 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
656 * entire atomic sequence must be restarted. All other errors are fatal.
657 */
658 struct drm_plane_state *
659 drm_atomic_get_plane_state(struct drm_atomic_state *state,
660 struct drm_plane *plane)
661 {
662 int ret, index = drm_plane_index(plane);
663 struct drm_plane_state *plane_state;
664
665 WARN_ON(!state->acquire_ctx);
666
667 plane_state = drm_atomic_get_existing_plane_state(state, plane);
668 if (plane_state)
669 return plane_state;
670
671 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
672 if (ret)
673 return ERR_PTR(ret);
674
675 plane_state = plane->funcs->atomic_duplicate_state(plane);
676 if (!plane_state)
677 return ERR_PTR(-ENOMEM);
678
679 state->planes[index].state = plane_state;
680 state->planes[index].ptr = plane;
681 state->planes[index].old_state = plane->state;
682 state->planes[index].new_state = plane_state;
683 plane_state->state = state;
684
685 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
686 plane->base.id, plane->name, plane_state, state);
687
688 if (plane_state->crtc) {
689 struct drm_crtc_state *crtc_state;
690
691 crtc_state = drm_atomic_get_crtc_state(state,
692 plane_state->crtc);
693 if (IS_ERR(crtc_state))
694 return ERR_CAST(crtc_state);
695 }
696
697 return plane_state;
698 }
699 EXPORT_SYMBOL(drm_atomic_get_plane_state);
700
701 /**
702 * drm_atomic_plane_set_property - set property on plane
703 * @plane: the drm plane to set a property on
704 * @state: the state object to update with the new property value
705 * @property: the property to set
706 * @val: the new property value
707 *
708 * This function handles generic/core properties and calls out to driver's
709 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
710 * consistent behavior you must call this function rather than the driver hook
711 * directly.
712 *
713 * RETURNS:
714 * Zero on success, error code on failure
715 */
716 static int drm_atomic_plane_set_property(struct drm_plane *plane,
717 struct drm_plane_state *state, struct drm_property *property,
718 uint64_t val)
719 {
720 struct drm_device *dev = plane->dev;
721 struct drm_mode_config *config = &dev->mode_config;
722
723 if (property == config->prop_fb_id) {
724 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
725 drm_atomic_set_fb_for_plane(state, fb);
726 if (fb)
727 drm_framebuffer_put(fb);
728 } else if (property == config->prop_in_fence_fd) {
729 if (state->fence)
730 return -EINVAL;
731
732 if (U642I64(val) == -1)
733 return 0;
734
735 state->fence = sync_file_get_fence(val);
736 if (!state->fence)
737 return -EINVAL;
738
739 } else if (property == config->prop_crtc_id) {
740 struct drm_crtc *crtc = drm_crtc_find(dev, val);
741 return drm_atomic_set_crtc_for_plane(state, crtc);
742 } else if (property == config->prop_crtc_x) {
743 state->crtc_x = U642I64(val);
744 } else if (property == config->prop_crtc_y) {
745 state->crtc_y = U642I64(val);
746 } else if (property == config->prop_crtc_w) {
747 state->crtc_w = val;
748 } else if (property == config->prop_crtc_h) {
749 state->crtc_h = val;
750 } else if (property == config->prop_src_x) {
751 state->src_x = val;
752 } else if (property == config->prop_src_y) {
753 state->src_y = val;
754 } else if (property == config->prop_src_w) {
755 state->src_w = val;
756 } else if (property == config->prop_src_h) {
757 state->src_h = val;
758 } else if (property == plane->rotation_property) {
759 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
760 return -EINVAL;
761 state->rotation = val;
762 } else if (property == plane->zpos_property) {
763 state->zpos = val;
764 } else if (plane->funcs->atomic_set_property) {
765 return plane->funcs->atomic_set_property(plane, state,
766 property, val);
767 } else {
768 return -EINVAL;
769 }
770
771 return 0;
772 }
773
774 /**
775 * drm_atomic_plane_get_property - get property value from plane state
776 * @plane: the drm plane to set a property on
777 * @state: the state object to get the property value from
778 * @property: the property to set
779 * @val: return location for the property value
780 *
781 * This function handles generic/core properties and calls out to driver's
782 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
783 * consistent behavior you must call this function rather than the driver hook
784 * directly.
785 *
786 * RETURNS:
787 * Zero on success, error code on failure
788 */
789 static int
790 drm_atomic_plane_get_property(struct drm_plane *plane,
791 const struct drm_plane_state *state,
792 struct drm_property *property, uint64_t *val)
793 {
794 struct drm_device *dev = plane->dev;
795 struct drm_mode_config *config = &dev->mode_config;
796
797 if (property == config->prop_fb_id) {
798 *val = (state->fb) ? state->fb->base.id : 0;
799 } else if (property == config->prop_in_fence_fd) {
800 *val = -1;
801 } else if (property == config->prop_crtc_id) {
802 *val = (state->crtc) ? state->crtc->base.id : 0;
803 } else if (property == config->prop_crtc_x) {
804 *val = I642U64(state->crtc_x);
805 } else if (property == config->prop_crtc_y) {
806 *val = I642U64(state->crtc_y);
807 } else if (property == config->prop_crtc_w) {
808 *val = state->crtc_w;
809 } else if (property == config->prop_crtc_h) {
810 *val = state->crtc_h;
811 } else if (property == config->prop_src_x) {
812 *val = state->src_x;
813 } else if (property == config->prop_src_y) {
814 *val = state->src_y;
815 } else if (property == config->prop_src_w) {
816 *val = state->src_w;
817 } else if (property == config->prop_src_h) {
818 *val = state->src_h;
819 } else if (property == plane->rotation_property) {
820 *val = state->rotation;
821 } else if (property == plane->zpos_property) {
822 *val = state->zpos;
823 } else if (plane->funcs->atomic_get_property) {
824 return plane->funcs->atomic_get_property(plane, state, property, val);
825 } else {
826 return -EINVAL;
827 }
828
829 return 0;
830 }
831
832 static bool
833 plane_switching_crtc(struct drm_atomic_state *state,
834 struct drm_plane *plane,
835 struct drm_plane_state *plane_state)
836 {
837 if (!plane->state->crtc || !plane_state->crtc)
838 return false;
839
840 if (plane->state->crtc == plane_state->crtc)
841 return false;
842
843 /* This could be refined, but currently there's no helper or driver code
844 * to implement direct switching of active planes nor userspace to take
845 * advantage of more direct plane switching without the intermediate
846 * full OFF state.
847 */
848 return true;
849 }
850
851 /**
852 * drm_atomic_plane_check - check plane state
853 * @plane: plane to check
854 * @state: plane state to check
855 *
856 * Provides core sanity checks for plane state.
857 *
858 * RETURNS:
859 * Zero on success, error code on failure
860 */
861 static int drm_atomic_plane_check(struct drm_plane *plane,
862 struct drm_plane_state *state)
863 {
864 unsigned int fb_width, fb_height;
865 int ret;
866
867 /* either *both* CRTC and FB must be set, or neither */
868 if (WARN_ON(state->crtc && !state->fb)) {
869 DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
870 return -EINVAL;
871 } else if (WARN_ON(state->fb && !state->crtc)) {
872 DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
873 return -EINVAL;
874 }
875
876 /* if disabled, we don't care about the rest of the state: */
877 if (!state->crtc)
878 return 0;
879
880 /* Check whether this plane is usable on this CRTC */
881 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
882 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
883 return -EINVAL;
884 }
885
886 /* Check whether this plane supports the fb pixel format. */
887 ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
888 if (ret) {
889 struct drm_format_name_buf format_name;
890 DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
891 drm_get_format_name(state->fb->format->format,
892 &format_name));
893 return ret;
894 }
895
896 /* Give drivers some help against integer overflows */
897 if (state->crtc_w > INT_MAX ||
898 state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
899 state->crtc_h > INT_MAX ||
900 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
901 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
902 state->crtc_w, state->crtc_h,
903 state->crtc_x, state->crtc_y);
904 return -ERANGE;
905 }
906
907 fb_width = state->fb->width << 16;
908 fb_height = state->fb->height << 16;
909
910 /* Make sure source coordinates are inside the fb. */
911 if (state->src_w > fb_width ||
912 state->src_x > fb_width - state->src_w ||
913 state->src_h > fb_height ||
914 state->src_y > fb_height - state->src_h) {
915 DRM_DEBUG_ATOMIC("Invalid source coordinates "
916 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
917 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
918 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
919 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
920 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
921 return -ENOSPC;
922 }
923
924 if (plane_switching_crtc(state->state, plane, state)) {
925 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
926 plane->base.id, plane->name);
927 return -EINVAL;
928 }
929
930 return 0;
931 }
932
933 static void drm_atomic_plane_print_state(struct drm_printer *p,
934 const struct drm_plane_state *state)
935 {
936 struct drm_plane *plane = state->plane;
937 struct drm_rect src = drm_plane_state_src(state);
938 struct drm_rect dest = drm_plane_state_dest(state);
939
940 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
941 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
942 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
943 if (state->fb) {
944 struct drm_framebuffer *fb = state->fb;
945 int i, n = fb->format->num_planes;
946 struct drm_format_name_buf format_name;
947
948 drm_printf(p, "\t\tformat=%s\n",
949 drm_get_format_name(fb->format->format, &format_name));
950 drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
951 drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
952 drm_printf(p, "\t\tlayers:\n");
953 for (i = 0; i < n; i++) {
954 drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
955 drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
956 }
957 }
958 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
959 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
960 drm_printf(p, "\trotation=%x\n", state->rotation);
961
962 if (plane->funcs->atomic_print_state)
963 plane->funcs->atomic_print_state(p, state);
964 }
965
966 /**
967 * drm_atomic_private_obj_init - initialize private object
968 * @obj: private object
969 * @state: initial private object state
970 * @funcs: pointer to the struct of function pointers that identify the object
971 * type
972 *
973 * Initialize the private object, which can be embedded into any
974 * driver private object that needs its own atomic state.
975 */
976 void
977 drm_atomic_private_obj_init(struct drm_private_obj *obj,
978 struct drm_private_state *state,
979 const struct drm_private_state_funcs *funcs)
980 {
981 memset(obj, 0, sizeof(*obj));
982
983 obj->state = state;
984 obj->funcs = funcs;
985 }
986 EXPORT_SYMBOL(drm_atomic_private_obj_init);
987
988 /**
989 * drm_atomic_private_obj_fini - finalize private object
990 * @obj: private object
991 *
992 * Finalize the private object.
993 */
994 void
995 drm_atomic_private_obj_fini(struct drm_private_obj *obj)
996 {
997 obj->funcs->atomic_destroy_state(obj, obj->state);
998 }
999 EXPORT_SYMBOL(drm_atomic_private_obj_fini);
1000
1001 /**
1002 * drm_atomic_get_private_obj_state - get private object state
1003 * @state: global atomic state
1004 * @obj: private object to get the state for
1005 *
1006 * This function returns the private object state for the given private object,
1007 * allocating the state if needed. It does not grab any locks as the caller is
1008 * expected to care of any required locking.
1009 *
1010 * RETURNS:
1011 *
1012 * Either the allocated state or the error code encoded into a pointer.
1013 */
1014 struct drm_private_state *
1015 drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
1016 struct drm_private_obj *obj)
1017 {
1018 int index, num_objs, i;
1019 size_t size;
1020 struct __drm_private_objs_state *arr;
1021 struct drm_private_state *obj_state;
1022
1023 for (i = 0; i < state->num_private_objs; i++)
1024 if (obj == state->private_objs[i].ptr)
1025 return state->private_objs[i].state;
1026
1027 num_objs = state->num_private_objs + 1;
1028 size = sizeof(*state->private_objs) * num_objs;
1029 arr = krealloc(state->private_objs, size, GFP_KERNEL);
1030 if (!arr)
1031 return ERR_PTR(-ENOMEM);
1032
1033 state->private_objs = arr;
1034 index = state->num_private_objs;
1035 memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
1036
1037 obj_state = obj->funcs->atomic_duplicate_state(obj);
1038 if (!obj_state)
1039 return ERR_PTR(-ENOMEM);
1040
1041 state->private_objs[index].state = obj_state;
1042 state->private_objs[index].old_state = obj->state;
1043 state->private_objs[index].new_state = obj_state;
1044 state->private_objs[index].ptr = obj;
1045
1046 state->num_private_objs = num_objs;
1047
1048 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
1049 obj, obj_state, state);
1050
1051 return obj_state;
1052 }
1053 EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
1054
1055 /**
1056 * drm_atomic_get_connector_state - get connector state
1057 * @state: global atomic state object
1058 * @connector: connector to get state object for
1059 *
1060 * This function returns the connector state for the given connector,
1061 * allocating it if needed. It will also grab the relevant connector lock to
1062 * make sure that the state is consistent.
1063 *
1064 * Returns:
1065 *
1066 * Either the allocated state or the error code encoded into the pointer. When
1067 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
1068 * entire atomic sequence must be restarted. All other errors are fatal.
1069 */
1070 struct drm_connector_state *
1071 drm_atomic_get_connector_state(struct drm_atomic_state *state,
1072 struct drm_connector *connector)
1073 {
1074 int ret, index;
1075 struct drm_mode_config *config = &connector->dev->mode_config;
1076 struct drm_connector_state *connector_state;
1077
1078 WARN_ON(!state->acquire_ctx);
1079
1080 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1081 if (ret)
1082 return ERR_PTR(ret);
1083
1084 index = drm_connector_index(connector);
1085
1086 if (index >= state->num_connector) {
1087 struct __drm_connnectors_state *c;
1088 int alloc = max(index + 1, config->num_connector);
1089
1090 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
1091 if (!c)
1092 return ERR_PTR(-ENOMEM);
1093
1094 state->connectors = c;
1095 memset(&state->connectors[state->num_connector], 0,
1096 sizeof(*state->connectors) * (alloc - state->num_connector));
1097
1098 state->num_connector = alloc;
1099 }
1100
1101 if (state->connectors[index].state)
1102 return state->connectors[index].state;
1103
1104 connector_state = connector->funcs->atomic_duplicate_state(connector);
1105 if (!connector_state)
1106 return ERR_PTR(-ENOMEM);
1107
1108 drm_connector_get(connector);
1109 state->connectors[index].state = connector_state;
1110 state->connectors[index].old_state = connector->state;
1111 state->connectors[index].new_state = connector_state;
1112 state->connectors[index].ptr = connector;
1113 connector_state->state = state;
1114
1115 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
1116 connector->base.id, connector->name,
1117 connector_state, state);
1118
1119 if (connector_state->crtc) {
1120 struct drm_crtc_state *crtc_state;
1121
1122 crtc_state = drm_atomic_get_crtc_state(state,
1123 connector_state->crtc);
1124 if (IS_ERR(crtc_state))
1125 return ERR_CAST(crtc_state);
1126 }
1127
1128 return connector_state;
1129 }
1130 EXPORT_SYMBOL(drm_atomic_get_connector_state);
1131
1132 /**
1133 * drm_atomic_connector_set_property - set property on connector.
1134 * @connector: the drm connector to set a property on
1135 * @state: the state object to update with the new property value
1136 * @property: the property to set
1137 * @val: the new property value
1138 *
1139 * This function handles generic/core properties and calls out to driver's
1140 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
1141 * consistent behavior you must call this function rather than the driver hook
1142 * directly.
1143 *
1144 * RETURNS:
1145 * Zero on success, error code on failure
1146 */
1147 static int drm_atomic_connector_set_property(struct drm_connector *connector,
1148 struct drm_connector_state *state, struct drm_property *property,
1149 uint64_t val)
1150 {
1151 struct drm_device *dev = connector->dev;
1152 struct drm_mode_config *config = &dev->mode_config;
1153
1154 if (property == config->prop_crtc_id) {
1155 struct drm_crtc *crtc = drm_crtc_find(dev, val);
1156 return drm_atomic_set_crtc_for_connector(state, crtc);
1157 } else if (property == config->dpms_property) {
1158 /* setting DPMS property requires special handling, which
1159 * is done in legacy setprop path for us. Disallow (for
1160 * now?) atomic writes to DPMS property:
1161 */
1162 return -EINVAL;
1163 } else if (property == config->tv_select_subconnector_property) {
1164 state->tv.subconnector = val;
1165 } else if (property == config->tv_left_margin_property) {
1166 state->tv.margins.left = val;
1167 } else if (property == config->tv_right_margin_property) {
1168 state->tv.margins.right = val;
1169 } else if (property == config->tv_top_margin_property) {
1170 state->tv.margins.top = val;
1171 } else if (property == config->tv_bottom_margin_property) {
1172 state->tv.margins.bottom = val;
1173 } else if (property == config->tv_mode_property) {
1174 state->tv.mode = val;
1175 } else if (property == config->tv_brightness_property) {
1176 state->tv.brightness = val;
1177 } else if (property == config->tv_contrast_property) {
1178 state->tv.contrast = val;
1179 } else if (property == config->tv_flicker_reduction_property) {
1180 state->tv.flicker_reduction = val;
1181 } else if (property == config->tv_overscan_property) {
1182 state->tv.overscan = val;
1183 } else if (property == config->tv_saturation_property) {
1184 state->tv.saturation = val;
1185 } else if (property == config->tv_hue_property) {
1186 state->tv.hue = val;
1187 } else if (property == config->link_status_property) {
1188 /* Never downgrade from GOOD to BAD on userspace's request here,
1189 * only hw issues can do that.
1190 *
1191 * For an atomic property the userspace doesn't need to be able
1192 * to understand all the properties, but needs to be able to
1193 * restore the state it wants on VT switch. So if the userspace
1194 * tries to change the link_status from GOOD to BAD, driver
1195 * silently rejects it and returns a 0. This prevents userspace
1196 * from accidently breaking the display when it restores the
1197 * state.
1198 */
1199 if (state->link_status != DRM_LINK_STATUS_GOOD)
1200 state->link_status = val;
1201 } else if (property == config->aspect_ratio_property) {
1202 state->picture_aspect_ratio = val;
1203 } else if (property == connector->scaling_mode_property) {
1204 state->scaling_mode = val;
1205 } else if (connector->funcs->atomic_set_property) {
1206 return connector->funcs->atomic_set_property(connector,
1207 state, property, val);
1208 } else {
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213 }
1214
1215 static void drm_atomic_connector_print_state(struct drm_printer *p,
1216 const struct drm_connector_state *state)
1217 {
1218 struct drm_connector *connector = state->connector;
1219
1220 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
1221 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
1222
1223 if (connector->funcs->atomic_print_state)
1224 connector->funcs->atomic_print_state(p, state);
1225 }
1226
1227 /**
1228 * drm_atomic_connector_get_property - get property value from connector state
1229 * @connector: the drm connector to set a property on
1230 * @state: the state object to get the property value from
1231 * @property: the property to set
1232 * @val: return location for the property value
1233 *
1234 * This function handles generic/core properties and calls out to driver's
1235 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
1236 * consistent behavior you must call this function rather than the driver hook
1237 * directly.
1238 *
1239 * RETURNS:
1240 * Zero on success, error code on failure
1241 */
1242 static int
1243 drm_atomic_connector_get_property(struct drm_connector *connector,
1244 const struct drm_connector_state *state,
1245 struct drm_property *property, uint64_t *val)
1246 {
1247 struct drm_device *dev = connector->dev;
1248 struct drm_mode_config *config = &dev->mode_config;
1249
1250 if (property == config->prop_crtc_id) {
1251 *val = (state->crtc) ? state->crtc->base.id : 0;
1252 } else if (property == config->dpms_property) {
1253 *val = connector->dpms;
1254 } else if (property == config->tv_select_subconnector_property) {
1255 *val = state->tv.subconnector;
1256 } else if (property == config->tv_left_margin_property) {
1257 *val = state->tv.margins.left;
1258 } else if (property == config->tv_right_margin_property) {
1259 *val = state->tv.margins.right;
1260 } else if (property == config->tv_top_margin_property) {
1261 *val = state->tv.margins.top;
1262 } else if (property == config->tv_bottom_margin_property) {
1263 *val = state->tv.margins.bottom;
1264 } else if (property == config->tv_mode_property) {
1265 *val = state->tv.mode;
1266 } else if (property == config->tv_brightness_property) {
1267 *val = state->tv.brightness;
1268 } else if (property == config->tv_contrast_property) {
1269 *val = state->tv.contrast;
1270 } else if (property == config->tv_flicker_reduction_property) {
1271 *val = state->tv.flicker_reduction;
1272 } else if (property == config->tv_overscan_property) {
1273 *val = state->tv.overscan;
1274 } else if (property == config->tv_saturation_property) {
1275 *val = state->tv.saturation;
1276 } else if (property == config->tv_hue_property) {
1277 *val = state->tv.hue;
1278 } else if (property == config->link_status_property) {
1279 *val = state->link_status;
1280 } else if (property == config->aspect_ratio_property) {
1281 *val = state->picture_aspect_ratio;
1282 } else if (property == connector->scaling_mode_property) {
1283 *val = state->scaling_mode;
1284 } else if (connector->funcs->atomic_get_property) {
1285 return connector->funcs->atomic_get_property(connector,
1286 state, property, val);
1287 } else {
1288 return -EINVAL;
1289 }
1290
1291 return 0;
1292 }
1293
1294 int drm_atomic_get_property(struct drm_mode_object *obj,
1295 struct drm_property *property, uint64_t *val)
1296 {
1297 struct drm_device *dev = property->dev;
1298 int ret;
1299
1300 switch (obj->type) {
1301 case DRM_MODE_OBJECT_CONNECTOR: {
1302 struct drm_connector *connector = obj_to_connector(obj);
1303 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1304 ret = drm_atomic_connector_get_property(connector,
1305 connector->state, property, val);
1306 break;
1307 }
1308 case DRM_MODE_OBJECT_CRTC: {
1309 struct drm_crtc *crtc = obj_to_crtc(obj);
1310 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1311 ret = drm_atomic_crtc_get_property(crtc,
1312 crtc->state, property, val);
1313 break;
1314 }
1315 case DRM_MODE_OBJECT_PLANE: {
1316 struct drm_plane *plane = obj_to_plane(obj);
1317 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1318 ret = drm_atomic_plane_get_property(plane,
1319 plane->state, property, val);
1320 break;
1321 }
1322 default:
1323 ret = -EINVAL;
1324 break;
1325 }
1326
1327 return ret;
1328 }
1329
1330 /**
1331 * drm_atomic_set_crtc_for_plane - set crtc for plane
1332 * @plane_state: the plane whose incoming state to update
1333 * @crtc: crtc to use for the plane
1334 *
1335 * Changing the assigned crtc for a plane requires us to grab the lock and state
1336 * for the new crtc, as needed. This function takes care of all these details
1337 * besides updating the pointer in the state object itself.
1338 *
1339 * Returns:
1340 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1341 * then the w/w mutex code has detected a deadlock and the entire atomic
1342 * sequence must be restarted. All other errors are fatal.
1343 */
1344 int
1345 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1346 struct drm_crtc *crtc)
1347 {
1348 struct drm_plane *plane = plane_state->plane;
1349 struct drm_crtc_state *crtc_state;
1350
1351 if (plane_state->crtc) {
1352 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1353 plane_state->crtc);
1354 if (WARN_ON(IS_ERR(crtc_state)))
1355 return PTR_ERR(crtc_state);
1356
1357 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
1358 }
1359
1360 plane_state->crtc = crtc;
1361
1362 if (crtc) {
1363 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1364 crtc);
1365 if (IS_ERR(crtc_state))
1366 return PTR_ERR(crtc_state);
1367 crtc_state->plane_mask |= (1 << drm_plane_index(plane));
1368 }
1369
1370 if (crtc)
1371 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
1372 plane_state, crtc->base.id, crtc->name);
1373 else
1374 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
1375 plane_state);
1376
1377 return 0;
1378 }
1379 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
1380
1381 /**
1382 * drm_atomic_set_fb_for_plane - set framebuffer for plane
1383 * @plane_state: atomic state object for the plane
1384 * @fb: fb to use for the plane
1385 *
1386 * Changing the assigned framebuffer for a plane requires us to grab a reference
1387 * to the new fb and drop the reference to the old fb, if there is one. This
1388 * function takes care of all these details besides updating the pointer in the
1389 * state object itself.
1390 */
1391 void
1392 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
1393 struct drm_framebuffer *fb)
1394 {
1395 if (fb)
1396 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
1397 fb->base.id, plane_state);
1398 else
1399 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
1400 plane_state);
1401
1402 drm_framebuffer_assign(&plane_state->fb, fb);
1403 }
1404 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1405
1406 /**
1407 * drm_atomic_set_fence_for_plane - set fence for plane
1408 * @plane_state: atomic state object for the plane
1409 * @fence: dma_fence to use for the plane
1410 *
1411 * Helper to setup the plane_state fence in case it is not set yet.
1412 * By using this drivers doesn't need to worry if the user choose
1413 * implicit or explicit fencing.
1414 *
1415 * This function will not set the fence to the state if it was set
1416 * via explicit fencing interfaces on the atomic ioctl. In that case it will
1417 * drop the reference to the fence as we are not storing it anywhere.
1418 * Otherwise, if &drm_plane_state.fence is not set this function we just set it
1419 * with the received implicit fence. In both cases this function consumes a
1420 * reference for @fence.
1421 */
1422 void
1423 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
1424 struct dma_fence *fence)
1425 {
1426 if (plane_state->fence) {
1427 dma_fence_put(fence);
1428 return;
1429 }
1430
1431 plane_state->fence = fence;
1432 }
1433 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
1434
1435 /**
1436 * drm_atomic_set_crtc_for_connector - set crtc for connector
1437 * @conn_state: atomic state object for the connector
1438 * @crtc: crtc to use for the connector
1439 *
1440 * Changing the assigned crtc for a connector requires us to grab the lock and
1441 * state for the new crtc, as needed. This function takes care of all these
1442 * details besides updating the pointer in the state object itself.
1443 *
1444 * Returns:
1445 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1446 * then the w/w mutex code has detected a deadlock and the entire atomic
1447 * sequence must be restarted. All other errors are fatal.
1448 */
1449 int
1450 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1451 struct drm_crtc *crtc)
1452 {
1453 struct drm_crtc_state *crtc_state;
1454
1455 if (conn_state->crtc == crtc)
1456 return 0;
1457
1458 if (conn_state->crtc) {
1459 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
1460 conn_state->crtc);
1461
1462 crtc_state->connector_mask &=
1463 ~(1 << drm_connector_index(conn_state->connector));
1464
1465 drm_connector_put(conn_state->connector);
1466 conn_state->crtc = NULL;
1467 }
1468
1469 if (crtc) {
1470 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1471 if (IS_ERR(crtc_state))
1472 return PTR_ERR(crtc_state);
1473
1474 crtc_state->connector_mask |=
1475 1 << drm_connector_index(conn_state->connector);
1476
1477 drm_connector_get(conn_state->connector);
1478 conn_state->crtc = crtc;
1479
1480 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
1481 conn_state, crtc->base.id, crtc->name);
1482 } else {
1483 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1484 conn_state);
1485 }
1486
1487 return 0;
1488 }
1489 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
1490
1491 /**
1492 * drm_atomic_add_affected_connectors - add connectors for crtc
1493 * @state: atomic state
1494 * @crtc: DRM crtc
1495 *
1496 * This function walks the current configuration and adds all connectors
1497 * currently using @crtc to the atomic configuration @state. Note that this
1498 * function must acquire the connection mutex. This can potentially cause
1499 * unneeded seralization if the update is just for the planes on one crtc. Hence
1500 * drivers and helpers should only call this when really needed (e.g. when a
1501 * full modeset needs to happen due to some change).
1502 *
1503 * Returns:
1504 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1505 * then the w/w mutex code has detected a deadlock and the entire atomic
1506 * sequence must be restarted. All other errors are fatal.
1507 */
1508 int
1509 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1510 struct drm_crtc *crtc)
1511 {
1512 struct drm_mode_config *config = &state->dev->mode_config;
1513 struct drm_connector *connector;
1514 struct drm_connector_state *conn_state;
1515 struct drm_connector_list_iter conn_iter;
1516 struct drm_crtc_state *crtc_state;
1517 int ret;
1518
1519 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1520 if (IS_ERR(crtc_state))
1521 return PTR_ERR(crtc_state);
1522
1523 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1524 if (ret)
1525 return ret;
1526
1527 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1528 crtc->base.id, crtc->name, state);
1529
1530 /*
1531 * Changed connectors are already in @state, so only need to look
1532 * at the connector_mask in crtc_state.
1533 */
1534 drm_connector_list_iter_begin(state->dev, &conn_iter);
1535 drm_for_each_connector_iter(connector, &conn_iter) {
1536 if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
1537 continue;
1538
1539 conn_state = drm_atomic_get_connector_state(state, connector);
1540 if (IS_ERR(conn_state)) {
1541 drm_connector_list_iter_end(&conn_iter);
1542 return PTR_ERR(conn_state);
1543 }
1544 }
1545 drm_connector_list_iter_end(&conn_iter);
1546
1547 return 0;
1548 }
1549 EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
1550
1551 /**
1552 * drm_atomic_add_affected_planes - add planes for crtc
1553 * @state: atomic state
1554 * @crtc: DRM crtc
1555 *
1556 * This function walks the current configuration and adds all planes
1557 * currently used by @crtc to the atomic configuration @state. This is useful
1558 * when an atomic commit also needs to check all currently enabled plane on
1559 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
1560 * to avoid special code to force-enable all planes.
1561 *
1562 * Since acquiring a plane state will always also acquire the w/w mutex of the
1563 * current CRTC for that plane (if there is any) adding all the plane states for
1564 * a CRTC will not reduce parallism of atomic updates.
1565 *
1566 * Returns:
1567 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1568 * then the w/w mutex code has detected a deadlock and the entire atomic
1569 * sequence must be restarted. All other errors are fatal.
1570 */
1571 int
1572 drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1573 struct drm_crtc *crtc)
1574 {
1575 struct drm_plane *plane;
1576
1577 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
1578
1579 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
1580 struct drm_plane_state *plane_state =
1581 drm_atomic_get_plane_state(state, plane);
1582
1583 if (IS_ERR(plane_state))
1584 return PTR_ERR(plane_state);
1585 }
1586 return 0;
1587 }
1588 EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1589
1590 /**
1591 * drm_atomic_check_only - check whether a given config would work
1592 * @state: atomic configuration to check
1593 *
1594 * Note that this function can return -EDEADLK if the driver needed to acquire
1595 * more locks but encountered a deadlock. The caller must then do the usual w/w
1596 * backoff dance and restart. All other errors are fatal.
1597 *
1598 * Returns:
1599 * 0 on success, negative error code on failure.
1600 */
1601 int drm_atomic_check_only(struct drm_atomic_state *state)
1602 {
1603 struct drm_device *dev = state->dev;
1604 struct drm_mode_config *config = &dev->mode_config;
1605 struct drm_plane *plane;
1606 struct drm_plane_state *plane_state;
1607 struct drm_crtc *crtc;
1608 struct drm_crtc_state *crtc_state;
1609 int i, ret = 0;
1610
1611 DRM_DEBUG_ATOMIC("checking %p\n", state);
1612
1613 for_each_new_plane_in_state(state, plane, plane_state, i) {
1614 ret = drm_atomic_plane_check(plane, plane_state);
1615 if (ret) {
1616 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1617 plane->base.id, plane->name);
1618 return ret;
1619 }
1620 }
1621
1622 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1623 ret = drm_atomic_crtc_check(crtc, crtc_state);
1624 if (ret) {
1625 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1626 crtc->base.id, crtc->name);
1627 return ret;
1628 }
1629 }
1630
1631 if (config->funcs->atomic_check)
1632 ret = config->funcs->atomic_check(state->dev, state);
1633
1634 if (ret)
1635 return ret;
1636
1637 if (!state->allow_modeset) {
1638 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1639 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1640 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1641 crtc->base.id, crtc->name);
1642 return -EINVAL;
1643 }
1644 }
1645 }
1646
1647 return 0;
1648 }
1649 EXPORT_SYMBOL(drm_atomic_check_only);
1650
1651 /**
1652 * drm_atomic_commit - commit configuration atomically
1653 * @state: atomic configuration to check
1654 *
1655 * Note that this function can return -EDEADLK if the driver needed to acquire
1656 * more locks but encountered a deadlock. The caller must then do the usual w/w
1657 * backoff dance and restart. All other errors are fatal.
1658 *
1659 * This function will take its own reference on @state.
1660 * Callers should always release their reference with drm_atomic_state_put().
1661 *
1662 * Returns:
1663 * 0 on success, negative error code on failure.
1664 */
1665 int drm_atomic_commit(struct drm_atomic_state *state)
1666 {
1667 struct drm_mode_config *config = &state->dev->mode_config;
1668 int ret;
1669
1670 ret = drm_atomic_check_only(state);
1671 if (ret)
1672 return ret;
1673
1674 DRM_DEBUG_ATOMIC("committing %p\n", state);
1675
1676 return config->funcs->atomic_commit(state->dev, state, false);
1677 }
1678 EXPORT_SYMBOL(drm_atomic_commit);
1679
1680 /**
1681 * drm_atomic_nonblocking_commit - atomic nonblocking commit
1682 * @state: atomic configuration to check
1683 *
1684 * Note that this function can return -EDEADLK if the driver needed to acquire
1685 * more locks but encountered a deadlock. The caller must then do the usual w/w
1686 * backoff dance and restart. All other errors are fatal.
1687 *
1688 * This function will take its own reference on @state.
1689 * Callers should always release their reference with drm_atomic_state_put().
1690 *
1691 * Returns:
1692 * 0 on success, negative error code on failure.
1693 */
1694 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
1695 {
1696 struct drm_mode_config *config = &state->dev->mode_config;
1697 int ret;
1698
1699 ret = drm_atomic_check_only(state);
1700 if (ret)
1701 return ret;
1702
1703 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
1704
1705 return config->funcs->atomic_commit(state->dev, state, true);
1706 }
1707 EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1708
1709 static void drm_atomic_print_state(const struct drm_atomic_state *state)
1710 {
1711 struct drm_printer p = drm_info_printer(state->dev->dev);
1712 struct drm_plane *plane;
1713 struct drm_plane_state *plane_state;
1714 struct drm_crtc *crtc;
1715 struct drm_crtc_state *crtc_state;
1716 struct drm_connector *connector;
1717 struct drm_connector_state *connector_state;
1718 int i;
1719
1720 DRM_DEBUG_ATOMIC("checking %p\n", state);
1721
1722 for_each_new_plane_in_state(state, plane, plane_state, i)
1723 drm_atomic_plane_print_state(&p, plane_state);
1724
1725 for_each_new_crtc_in_state(state, crtc, crtc_state, i)
1726 drm_atomic_crtc_print_state(&p, crtc_state);
1727
1728 for_each_new_connector_in_state(state, connector, connector_state, i)
1729 drm_atomic_connector_print_state(&p, connector_state);
1730 }
1731
1732 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
1733 bool take_locks)
1734 {
1735 struct drm_mode_config *config = &dev->mode_config;
1736 struct drm_plane *plane;
1737 struct drm_crtc *crtc;
1738 struct drm_connector *connector;
1739 struct drm_connector_list_iter conn_iter;
1740
1741 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1742 return;
1743
1744 list_for_each_entry(plane, &config->plane_list, head) {
1745 if (take_locks)
1746 drm_modeset_lock(&plane->mutex, NULL);
1747 drm_atomic_plane_print_state(p, plane->state);
1748 if (take_locks)
1749 drm_modeset_unlock(&plane->mutex);
1750 }
1751
1752 list_for_each_entry(crtc, &config->crtc_list, head) {
1753 if (take_locks)
1754 drm_modeset_lock(&crtc->mutex, NULL);
1755 drm_atomic_crtc_print_state(p, crtc->state);
1756 if (take_locks)
1757 drm_modeset_unlock(&crtc->mutex);
1758 }
1759
1760 drm_connector_list_iter_begin(dev, &conn_iter);
1761 if (take_locks)
1762 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1763 drm_for_each_connector_iter(connector, &conn_iter)
1764 drm_atomic_connector_print_state(p, connector->state);
1765 if (take_locks)
1766 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1767 drm_connector_list_iter_end(&conn_iter);
1768 }
1769
1770 /**
1771 * drm_state_dump - dump entire device atomic state
1772 * @dev: the drm device
1773 * @p: where to print the state to
1774 *
1775 * Just for debugging. Drivers might want an option to dump state
1776 * to dmesg in case of error irq's. (Hint, you probably want to
1777 * ratelimit this!)
1778 *
1779 * The caller must drm_modeset_lock_all(), or if this is called
1780 * from error irq handler, it should not be enabled by default.
1781 * (Ie. if you are debugging errors you might not care that this
1782 * is racey. But calling this without all modeset locks held is
1783 * not inherently safe.)
1784 */
1785 void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1786 {
1787 __drm_state_dump(dev, p, false);
1788 }
1789 EXPORT_SYMBOL(drm_state_dump);
1790
1791 #ifdef CONFIG_DEBUG_FS
1792 static int drm_state_info(struct seq_file *m, void *data)
1793 {
1794 struct drm_info_node *node = (struct drm_info_node *) m->private;
1795 struct drm_device *dev = node->minor->dev;
1796 struct drm_printer p = drm_seq_file_printer(m);
1797
1798 __drm_state_dump(dev, &p, true);
1799
1800 return 0;
1801 }
1802
1803 /* any use in debugfs files to dump individual planes/crtc/etc? */
1804 static const struct drm_info_list drm_atomic_debugfs_list[] = {
1805 {"state", drm_state_info, 0},
1806 };
1807
1808 int drm_atomic_debugfs_init(struct drm_minor *minor)
1809 {
1810 return drm_debugfs_create_files(drm_atomic_debugfs_list,
1811 ARRAY_SIZE(drm_atomic_debugfs_list),
1812 minor->debugfs_root, minor);
1813 }
1814 #endif
1815
1816 /*
1817 * The big monstor ioctl
1818 */
1819
1820 static struct drm_pending_vblank_event *create_vblank_event(
1821 struct drm_device *dev, uint64_t user_data)
1822 {
1823 struct drm_pending_vblank_event *e = NULL;
1824
1825 e = kzalloc(sizeof *e, GFP_KERNEL);
1826 if (!e)
1827 return NULL;
1828
1829 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
1830 e->event.base.length = sizeof(e->event);
1831 e->event.user_data = user_data;
1832
1833 return e;
1834 }
1835
1836 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
1837 struct drm_connector *connector,
1838 int mode)
1839 {
1840 struct drm_connector *tmp_connector;
1841 struct drm_connector_state *new_conn_state;
1842 struct drm_crtc *crtc;
1843 struct drm_crtc_state *crtc_state;
1844 int i, ret, old_mode = connector->dpms;
1845 bool active = false;
1846
1847 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
1848 state->acquire_ctx);
1849 if (ret)
1850 return ret;
1851
1852 if (mode != DRM_MODE_DPMS_ON)
1853 mode = DRM_MODE_DPMS_OFF;
1854 connector->dpms = mode;
1855
1856 crtc = connector->state->crtc;
1857 if (!crtc)
1858 goto out;
1859 ret = drm_atomic_add_affected_connectors(state, crtc);
1860 if (ret)
1861 goto out;
1862
1863 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1864 if (IS_ERR(crtc_state)) {
1865 ret = PTR_ERR(crtc_state);
1866 goto out;
1867 }
1868
1869 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
1870 if (new_conn_state->crtc != crtc)
1871 continue;
1872 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
1873 active = true;
1874 break;
1875 }
1876 }
1877
1878 crtc_state->active = active;
1879 ret = drm_atomic_commit(state);
1880 out:
1881 if (ret != 0)
1882 connector->dpms = old_mode;
1883 return ret;
1884 }
1885
1886 int drm_atomic_set_property(struct drm_atomic_state *state,
1887 struct drm_mode_object *obj,
1888 struct drm_property *prop,
1889 uint64_t prop_value)
1890 {
1891 struct drm_mode_object *ref;
1892 int ret;
1893
1894 if (!drm_property_change_valid_get(prop, prop_value, &ref))
1895 return -EINVAL;
1896
1897 switch (obj->type) {
1898 case DRM_MODE_OBJECT_CONNECTOR: {
1899 struct drm_connector *connector = obj_to_connector(obj);
1900 struct drm_connector_state *connector_state;
1901
1902 connector_state = drm_atomic_get_connector_state(state, connector);
1903 if (IS_ERR(connector_state)) {
1904 ret = PTR_ERR(connector_state);
1905 break;
1906 }
1907
1908 ret = drm_atomic_connector_set_property(connector,
1909 connector_state, prop, prop_value);
1910 break;
1911 }
1912 case DRM_MODE_OBJECT_CRTC: {
1913 struct drm_crtc *crtc = obj_to_crtc(obj);
1914 struct drm_crtc_state *crtc_state;
1915
1916 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1917 if (IS_ERR(crtc_state)) {
1918 ret = PTR_ERR(crtc_state);
1919 break;
1920 }
1921
1922 ret = drm_atomic_crtc_set_property(crtc,
1923 crtc_state, prop, prop_value);
1924 break;
1925 }
1926 case DRM_MODE_OBJECT_PLANE: {
1927 struct drm_plane *plane = obj_to_plane(obj);
1928 struct drm_plane_state *plane_state;
1929
1930 plane_state = drm_atomic_get_plane_state(state, plane);
1931 if (IS_ERR(plane_state)) {
1932 ret = PTR_ERR(plane_state);
1933 break;
1934 }
1935
1936 ret = drm_atomic_plane_set_property(plane,
1937 plane_state, prop, prop_value);
1938 break;
1939 }
1940 default:
1941 ret = -EINVAL;
1942 break;
1943 }
1944
1945 drm_property_change_valid_put(prop, ref);
1946 return ret;
1947 }
1948
1949 /**
1950 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
1951 *
1952 * @dev: drm device to check.
1953 * @plane_mask: plane mask for planes that were updated.
1954 * @ret: return value, can be -EDEADLK for a retry.
1955 *
1956 * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
1957 * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
1958 * is a common operation for each atomic update, so this call is split off as a
1959 * helper.
1960 */
1961 void drm_atomic_clean_old_fb(struct drm_device *dev,
1962 unsigned plane_mask,
1963 int ret)
1964 {
1965 struct drm_plane *plane;
1966
1967 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1968 * locks (ie. while it is still safe to deref plane->state). We
1969 * need to do this here because the driver entry points cannot
1970 * distinguish between legacy and atomic ioctls.
1971 */
1972 drm_for_each_plane_mask(plane, dev, plane_mask) {
1973 if (ret == 0) {
1974 struct drm_framebuffer *new_fb = plane->state->fb;
1975 if (new_fb)
1976 drm_framebuffer_get(new_fb);
1977 plane->fb = new_fb;
1978 plane->crtc = plane->state->crtc;
1979
1980 if (plane->old_fb)
1981 drm_framebuffer_put(plane->old_fb);
1982 }
1983 plane->old_fb = NULL;
1984 }
1985 }
1986 EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1987
1988 /**
1989 * DOC: explicit fencing properties
1990 *
1991 * Explicit fencing allows userspace to control the buffer synchronization
1992 * between devices. A Fence or a group of fences are transfered to/from
1993 * userspace using Sync File fds and there are two DRM properties for that.
1994 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
1995 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
1996 *
1997 * As a contrast, with implicit fencing the kernel keeps track of any
1998 * ongoing rendering, and automatically ensures that the atomic update waits
1999 * for any pending rendering to complete. For shared buffers represented with
2000 * a &struct dma_buf this is tracked in &struct reservation_object.
2001 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
2002 * whereas explicit fencing is what Android wants.
2003 *
2004 * "IN_FENCE_FD”:
2005 * Use this property to pass a fence that DRM should wait on before
2006 * proceeding with the Atomic Commit request and show the framebuffer for
2007 * the plane on the screen. The fence can be either a normal fence or a
2008 * merged one, the sync_file framework will handle both cases and use a
2009 * fence_array if a merged fence is received. Passing -1 here means no
2010 * fences to wait on.
2011 *
2012 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
2013 * it will only check if the Sync File is a valid one.
2014 *
2015 * On the driver side the fence is stored on the @fence parameter of
2016 * &struct drm_plane_state. Drivers which also support implicit fencing
2017 * should set the implicit fence using drm_atomic_set_fence_for_plane(),
2018 * to make sure there's consistent behaviour between drivers in precedence
2019 * of implicit vs. explicit fencing.
2020 *
2021 * "OUT_FENCE_PTR”:
2022 * Use this property to pass a file descriptor pointer to DRM. Once the
2023 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
2024 * the file descriptor number of a Sync File. This Sync File contains the
2025 * CRTC fence that will be signaled when all framebuffers present on the
2026 * Atomic Commit * request for that given CRTC are scanned out on the
2027 * screen.
2028 *
2029 * The Atomic Commit request fails if a invalid pointer is passed. If the
2030 * Atomic Commit request fails for any other reason the out fence fd
2031 * returned will be -1. On a Atomic Commit with the
2032 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
2033 *
2034 * Note that out-fences don't have a special interface to drivers and are
2035 * internally represented by a &struct drm_pending_vblank_event in struct
2036 * &drm_crtc_state, which is also used by the nonblocking atomic commit
2037 * helpers and for the DRM event handling for existing userspace.
2038 */
2039
2040 struct drm_out_fence_state {
2041 s32 __user *out_fence_ptr;
2042 struct sync_file *sync_file;
2043 int fd;
2044 };
2045
2046 static int setup_out_fence(struct drm_out_fence_state *fence_state,
2047 struct dma_fence *fence)
2048 {
2049 fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
2050 if (fence_state->fd < 0)
2051 return fence_state->fd;
2052
2053 if (put_user(fence_state->fd, fence_state->out_fence_ptr))
2054 return -EFAULT;
2055
2056 fence_state->sync_file = sync_file_create(fence);
2057 if (!fence_state->sync_file)
2058 return -ENOMEM;
2059
2060 return 0;
2061 }
2062
2063 static int prepare_crtc_signaling(struct drm_device *dev,
2064 struct drm_atomic_state *state,
2065 struct drm_mode_atomic *arg,
2066 struct drm_file *file_priv,
2067 struct drm_out_fence_state **fence_state,
2068 unsigned int *num_fences)
2069 {
2070 struct drm_crtc *crtc;
2071 struct drm_crtc_state *crtc_state;
2072 int i, c = 0, ret;
2073
2074 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
2075 return 0;
2076
2077 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2078 s32 __user *fence_ptr;
2079
2080 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
2081
2082 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
2083 struct drm_pending_vblank_event *e;
2084
2085 e = create_vblank_event(dev, arg->user_data);
2086 if (!e)
2087 return -ENOMEM;
2088
2089 crtc_state->event = e;
2090 }
2091
2092 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
2093 struct drm_pending_vblank_event *e = crtc_state->event;
2094
2095 if (!file_priv)
2096 continue;
2097
2098 ret = drm_event_reserve_init(dev, file_priv, &e->base,
2099 &e->event.base);
2100 if (ret) {
2101 kfree(e);
2102 crtc_state->event = NULL;
2103 return ret;
2104 }
2105 }
2106
2107 if (fence_ptr) {
2108 struct dma_fence *fence;
2109 struct drm_out_fence_state *f;
2110
2111 f = krealloc(*fence_state, sizeof(**fence_state) *
2112 (*num_fences + 1), GFP_KERNEL);
2113 if (!f)
2114 return -ENOMEM;
2115
2116 memset(&f[*num_fences], 0, sizeof(*f));
2117
2118 f[*num_fences].out_fence_ptr = fence_ptr;
2119 *fence_state = f;
2120
2121 fence = drm_crtc_create_fence(crtc);
2122 if (!fence)
2123 return -ENOMEM;
2124
2125 ret = setup_out_fence(&f[(*num_fences)++], fence);
2126 if (ret) {
2127 dma_fence_put(fence);
2128 return ret;
2129 }
2130
2131 crtc_state->event->base.fence = fence;
2132 }
2133
2134 c++;
2135 }
2136
2137 /*
2138 * Having this flag means user mode pends on event which will never
2139 * reach due to lack of at least one CRTC for signaling
2140 */
2141 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
2142 return -EINVAL;
2143
2144 return 0;
2145 }
2146
2147 static void complete_crtc_signaling(struct drm_device *dev,
2148 struct drm_atomic_state *state,
2149 struct drm_out_fence_state *fence_state,
2150 unsigned int num_fences,
2151 bool install_fds)
2152 {
2153 struct drm_crtc *crtc;
2154 struct drm_crtc_state *crtc_state;
2155 int i;
2156
2157 if (install_fds) {
2158 for (i = 0; i < num_fences; i++)
2159 fd_install(fence_state[i].fd,
2160 fence_state[i].sync_file->file);
2161
2162 kfree(fence_state);
2163 return;
2164 }
2165
2166 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2167 struct drm_pending_vblank_event *event = crtc_state->event;
2168 /*
2169 * Free the allocated event. drm_atomic_helper_setup_commit
2170 * can allocate an event too, so only free it if it's ours
2171 * to prevent a double free in drm_atomic_state_clear.
2172 */
2173 if (event && (event->base.fence || event->base.file_priv)) {
2174 drm_event_cancel_free(dev, &event->base);
2175 crtc_state->event = NULL;
2176 }
2177 }
2178
2179 if (!fence_state)
2180 return;
2181
2182 for (i = 0; i < num_fences; i++) {
2183 if (fence_state[i].sync_file)
2184 fput(fence_state[i].sync_file->file);
2185 if (fence_state[i].fd >= 0)
2186 put_unused_fd(fence_state[i].fd);
2187
2188 /* If this fails log error to the user */
2189 if (fence_state[i].out_fence_ptr &&
2190 put_user(-1, fence_state[i].out_fence_ptr))
2191 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
2192 }
2193
2194 kfree(fence_state);
2195 }
2196
2197 int drm_mode_atomic_ioctl(struct drm_device *dev,
2198 void *data, struct drm_file *file_priv)
2199 {
2200 struct drm_mode_atomic *arg = data;
2201 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
2202 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
2203 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
2204 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
2205 unsigned int copied_objs, copied_props;
2206 struct drm_atomic_state *state;
2207 struct drm_modeset_acquire_ctx ctx;
2208 struct drm_plane *plane;
2209 struct drm_out_fence_state *fence_state;
2210 unsigned plane_mask;
2211 int ret = 0;
2212 unsigned int i, j, num_fences;
2213
2214 /* disallow for drivers not supporting atomic: */
2215 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
2216 return -EINVAL;
2217
2218 /* disallow for userspace that has not enabled atomic cap (even
2219 * though this may be a bit overkill, since legacy userspace
2220 * wouldn't know how to call this ioctl)
2221 */
2222 if (!file_priv->atomic)
2223 return -EINVAL;
2224
2225 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
2226 return -EINVAL;
2227
2228 if (arg->reserved)
2229 return -EINVAL;
2230
2231 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
2232 !dev->mode_config.async_page_flip)
2233 return -EINVAL;
2234
2235 /* can't test and expect an event at the same time. */
2236 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
2237 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
2238 return -EINVAL;
2239
2240 drm_modeset_acquire_init(&ctx, 0);
2241
2242 state = drm_atomic_state_alloc(dev);
2243 if (!state)
2244 return -ENOMEM;
2245
2246 state->acquire_ctx = &ctx;
2247 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
2248
2249 retry:
2250 plane_mask = 0;
2251 copied_objs = 0;
2252 copied_props = 0;
2253 fence_state = NULL;
2254 num_fences = 0;
2255
2256 for (i = 0; i < arg->count_objs; i++) {
2257 uint32_t obj_id, count_props;
2258 struct drm_mode_object *obj;
2259
2260 if (get_user(obj_id, objs_ptr + copied_objs)) {
2261 ret = -EFAULT;
2262 goto out;
2263 }
2264
2265 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
2266 if (!obj) {
2267 ret = -ENOENT;
2268 goto out;
2269 }
2270
2271 if (!obj->properties) {
2272 drm_mode_object_put(obj);
2273 ret = -ENOENT;
2274 goto out;
2275 }
2276
2277 if (get_user(count_props, count_props_ptr + copied_objs)) {
2278 drm_mode_object_put(obj);
2279 ret = -EFAULT;
2280 goto out;
2281 }
2282
2283 copied_objs++;
2284
2285 for (j = 0; j < count_props; j++) {
2286 uint32_t prop_id;
2287 uint64_t prop_value;
2288 struct drm_property *prop;
2289
2290 if (get_user(prop_id, props_ptr + copied_props)) {
2291 drm_mode_object_put(obj);
2292 ret = -EFAULT;
2293 goto out;
2294 }
2295
2296 prop = drm_mode_obj_find_prop_id(obj, prop_id);
2297 if (!prop) {
2298 drm_mode_object_put(obj);
2299 ret = -ENOENT;
2300 goto out;
2301 }
2302
2303 if (copy_from_user(&prop_value,
2304 prop_values_ptr + copied_props,
2305 sizeof(prop_value))) {
2306 drm_mode_object_put(obj);
2307 ret = -EFAULT;
2308 goto out;
2309 }
2310
2311 ret = drm_atomic_set_property(state, obj, prop,
2312 prop_value);
2313 if (ret) {
2314 drm_mode_object_put(obj);
2315 goto out;
2316 }
2317
2318 copied_props++;
2319 }
2320
2321 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
2322 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
2323 plane = obj_to_plane(obj);
2324 plane_mask |= (1 << drm_plane_index(plane));
2325 plane->old_fb = plane->fb;
2326 }
2327 drm_mode_object_put(obj);
2328 }
2329
2330 ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
2331 &num_fences);
2332 if (ret)
2333 goto out;
2334
2335 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
2336 ret = drm_atomic_check_only(state);
2337 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
2338 ret = drm_atomic_nonblocking_commit(state);
2339 } else {
2340 if (unlikely(drm_debug & DRM_UT_STATE))
2341 drm_atomic_print_state(state);
2342
2343 ret = drm_atomic_commit(state);
2344 }
2345
2346 out:
2347 drm_atomic_clean_old_fb(dev, plane_mask, ret);
2348
2349 complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
2350
2351 if (ret == -EDEADLK) {
2352 drm_atomic_state_clear(state);
2353 drm_modeset_backoff(&ctx);
2354 goto retry;
2355 }
2356
2357 drm_atomic_state_put(state);
2358
2359 drm_modeset_drop_locks(&ctx);
2360 drm_modeset_acquire_fini(&ctx);
2361
2362 return ret;
2363 }