#define to_dm_connector_state(x)\
container_of((x), struct dm_connector_state, base)
+static bool modeset_required(struct drm_crtc_state *crtc_state)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ if (!crtc_state->enable)
+ return false;
+
+ return crtc_state->active;
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ return !crtc_state->enable || !crtc_state->active;
+}
void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
{
struct drm_crtc *crtc = &amdgpu_crtc->base;
int x, y;
int xorigin = 0, yorigin = 0;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
position.y_hotspot = yorigin;
if (!dc_stream_set_cursor_attributes(
- amdgpu_crtc->stream,
+ acrtc_state->stream,
&attributes)) {
DRM_ERROR("DC failed to set cursor attributes\n");
}
if (!dc_stream_set_cursor_position(
- amdgpu_crtc->stream,
+ acrtc_state->stream,
&position)) {
DRM_ERROR("DC failed to set cursor position\n");
}
}
-static int dm_crtc_unpin_cursor_bo_old(
- struct amdgpu_crtc *amdgpu_crtc)
-{
- struct amdgpu_bo *robj;
- int ret = 0;
-
- if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
- robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-
- ret = amdgpu_bo_reserve(robj, false);
-
- if (likely(ret == 0)) {
- ret = amdgpu_bo_unpin(robj);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR(
- "%s: unpin failed (ret=%d), bo %p\n",
- __func__,
- ret,
- amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_bo_unreserve(robj);
- } else {
- DRM_ERROR(
- "%s: reserve failed (ret=%d), bo %p\n",
- __func__,
- ret,
- amdgpu_crtc->cursor_bo);
- }
-
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
- amdgpu_crtc->cursor_bo = NULL;
- }
-
- return ret;
-}
-
-static int dm_crtc_pin_cursor_bo_new(
- struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- struct amdgpu_bo **ret_obj)
-{
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_bo *robj;
- struct drm_gem_object *obj;
- int ret = -EINVAL;
-
- if (NULL != crtc) {
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- uint64_t gpu_addr;
-
- amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- obj = drm_gem_object_lookup(file_priv, handle);
-
- if (!obj) {
- DRM_ERROR(
- "Cannot find cursor object %x for crtc %d\n",
- handle,
- amdgpu_crtc->crtc_id);
- goto release;
- }
- robj = gem_to_amdgpu_bo(obj);
-
- ret = amdgpu_bo_reserve(robj, false);
-
- if (unlikely(ret != 0)) {
- drm_gem_object_unreference_unlocked(obj);
- DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
- ret, handle);
- goto release;
- }
-
- ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
- adev->mc.visible_vram_size,
- &gpu_addr);
-
- if (ret == 0) {
- amdgpu_crtc->cursor_addr = gpu_addr;
- *ret_obj = robj;
- }
- amdgpu_bo_unreserve(robj);
- if (ret)
- drm_gem_object_unreference_unlocked(obj);
-
- }
-release:
-
- return ret;
-}
-
static int dm_crtc_cursor_set(
struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
+ uint64_t address,
uint32_t width,
uint32_t height)
{
- struct amdgpu_bo *new_cursor_bo;
struct dc_cursor_position position;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
int ret;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
ret = EINVAL;
- new_cursor_bo = NULL;
DRM_DEBUG_KMS(
- "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
+ "%s: crtc_id=%d with size %d to %d \n",
__func__,
amdgpu_crtc->crtc_id,
- handle,
width,
- height,
- amdgpu_crtc->cursor_bo);
+ height);
- if (!handle) {
+ if (!address) {
/* turn off cursor */
position.enable = false;
position.x = 0;
position.y = 0;
- if (amdgpu_crtc->stream) {
+ if (acrtc_state->stream) {
/*set cursor visible false*/
dc_stream_set_cursor_position(
- amdgpu_crtc->stream,
+ acrtc_state->stream,
&position);
}
- /*unpin old cursor buffer and update cache*/
- ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
goto release;
}
height);
goto release;
}
- /*try to pin new cursor bo*/
- ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
- /*if map not successful then return an error*/
- if (ret)
- goto release;
/*program new cursor bo to hardware*/
- dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
-
- /*un map old, not used anymore cursor bo ,
- * return memory and mapping back */
- dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
-
- /*assign new cursor bo to our internal cache*/
- amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
+ dm_set_cursor(amdgpu_crtc, address, width, height);
release:
return ret;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int xorigin = 0, yorigin = 0;
struct dc_cursor_position position;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
position.x_hotspot = xorigin;
position.y_hotspot = yorigin;
- if (amdgpu_crtc->stream) {
+ if (acrtc_state->stream) {
if (!dc_stream_set_cursor_position(
- amdgpu_crtc->stream,
+ acrtc_state->stream,
&position)) {
DRM_ERROR("DC failed to set cursor position\n");
return -EINVAL;
return 0;
}
-static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- DRM_DEBUG_KMS(
- "%s: with cursor_bo %p\n",
- __func__,
- amdgpu_crtc->cursor_bo);
-
- if (amdgpu_crtc->cursor_bo && amdgpu_crtc->stream) {
- dm_set_cursor(
- amdgpu_crtc,
- amdgpu_crtc->cursor_addr,
- amdgpu_crtc->cursor_width,
- amdgpu_crtc->cursor_height);
- }
-}
static bool fill_rects_from_plane_state(
const struct drm_plane_state *state,
struct dc_surface *surface)
return true;
}
-static bool get_fb_info(
+static int get_fb_info(
const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags,
uint64_t *fb_location)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
int r = amdgpu_bo_reserve(rbo, false);
- if (unlikely(r != 0)){
+ if (unlikely(r)) {
DRM_ERROR("Unable to reserve buffer\n");
- return false;
+ return r;
}
if (fb_location)
amdgpu_bo_unreserve(rbo);
- return true;
+ return r;
}
-static void fill_plane_attributes_from_fb(
+
+static int fill_plane_attributes_from_fb(
struct amdgpu_device *adev,
struct dc_surface *surface,
const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
{
uint64_t tiling_flags;
uint64_t fb_location = 0;
+ unsigned int awidth;
const struct drm_framebuffer *fb = &amdgpu_fb->base;
+ int ret = 0;
struct drm_format_name_buf format_name;
- get_fb_info(
+ ret = get_fb_info(
amdgpu_fb,
&tiling_flags,
addReq == true ? &fb_location:NULL);
+ if (ret)
+ return ret;
switch (fb->format->format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_ABGR2101010:
surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
break;
- case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_NV21:
surface->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
break;
- case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
surface->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(fb->format->format, &format_name));
- return;
+ return -EINVAL;
}
if (surface->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
surface->color_space = COLOR_SPACE_SRGB;
} else {
+ awidth = ALIGN(fb->width, 64);
surface->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
surface->address.video_progressive.luma_addr.low_part
= lower_32_bits(fb_location);
surface->address.video_progressive.chroma_addr.low_part
= lower_32_bits(fb_location) +
- (fb->width * fb->height);
+ (awidth * fb->height);
surface->plane_size.video.luma_size.x = 0;
surface->plane_size.video.luma_size.y = 0;
- surface->plane_size.video.luma_size.width = fb->width;
+ surface->plane_size.video.luma_size.width = awidth;
surface->plane_size.video.luma_size.height = fb->height;
/* TODO: unhardcode */
- surface->plane_size.video.luma_pitch = ALIGN(fb->width, 64);
+ surface->plane_size.video.luma_pitch = awidth;
surface->plane_size.video.chroma_size.x = 0;
surface->plane_size.video.chroma_size.y = 0;
- surface->plane_size.video.chroma_size.width = fb->width / 2;
- surface->plane_size.video.chroma_size.height = fb->height / 2;
- surface->plane_size.video.chroma_pitch = ALIGN(fb->width, 64) / 2;
+ surface->plane_size.video.chroma_size.width = awidth;
+ surface->plane_size.video.chroma_size.height = fb->height;
+ surface->plane_size.video.chroma_pitch = awidth / 2;
/* TODO: unhardcode */
surface->color_space = COLOR_SPACE_YCBCR709;
surface->scaling_quality.v_taps = 0;
surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
+ return ret;
+
}
#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
-static void fill_gamma_from_crtc(
- const struct drm_crtc *crtc,
+static void fill_gamma_from_crtc_state(
+ const struct drm_crtc_state *crtc_state,
struct dc_surface *dc_surface)
{
int i;
struct dc_gamma *gamma;
- struct drm_crtc_state *state = crtc->state;
- struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
+ struct drm_color_lut *lut = (struct drm_color_lut *) crtc_state->gamma_lut->data;
gamma = dc_create_gamma();
- if (gamma == NULL)
+ if (gamma == NULL) {
+ WARN_ON(1);
return;
+ }
for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
gamma->red[i] = lut[i].red;
dc_surface->gamma_correction = gamma;
}
-static void fill_plane_attributes(
+static int fill_plane_attributes(
struct amdgpu_device *adev,
struct dc_surface *surface,
- struct drm_plane_state *state, bool addrReq)
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state,
+ bool addrReq)
{
const struct amdgpu_framebuffer *amdgpu_fb =
- to_amdgpu_framebuffer(state->fb);
- const struct drm_crtc *crtc = state->crtc;
+ to_amdgpu_framebuffer(plane_state->fb);
+ const struct drm_crtc *crtc = plane_state->crtc;
struct dc_transfer_func *input_tf;
+ int ret = 0;
+
+ if (!fill_rects_from_plane_state(plane_state, surface))
+ return -EINVAL;
- fill_rects_from_plane_state(state, surface);
- fill_plane_attributes_from_fb(
+ ret = fill_plane_attributes_from_fb(
crtc->dev->dev_private,
surface,
amdgpu_fb,
addrReq);
+ if (ret)
+ return ret;
+
input_tf = dc_create_transfer_func();
if (input_tf == NULL)
- return;
+ return -ENOMEM;
input_tf->type = TF_TYPE_PREDEFINED;
input_tf->tf = TRANSFER_FUNCTION_SRGB;
surface->in_transfer_func = input_tf;
/* In case of gamma set, update gamma value */
- if (state->crtc->state->gamma_lut) {
- fill_gamma_from_crtc(crtc, surface);
- }
+ if (crtc_state->gamma_lut)
+ fill_gamma_from_crtc_state(crtc_state, surface);
+
+ return ret;
}
/*****************************************************************************/
static void update_stream_scaling_settings(
const struct drm_display_mode *mode,
const struct dm_connector_state *dm_state,
- const struct dc_stream *stream)
+ struct dc_stream *stream)
{
- struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
enum amdgpu_rmx_type rmx_type;
struct rect src = { 0 }; /* viewport in composition space*/
dst.height -= dm_state->underscan_vborder;
}
- adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
+ stream->src = src;
+ stream->dst = dst;
DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
dst.x, dst.y, dst.width, dst.height);
}
-static void add_surface(struct dc *dc,
- struct drm_crtc *crtc,
- struct drm_plane *plane,
- const struct dc_surface **dc_surfaces)
-{
- struct dc_surface *dc_surface;
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- const struct dc_stream *dc_stream = acrtc->stream;
- unsigned long flags;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
- DRM_ERROR("add_surface: acrtc %d, already busy\n",
- acrtc->crtc_id);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- /* In comit tail framework this cannot happen */
- BUG_ON(0);
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- if (!dc_stream) {
- dm_error(
- "%s: Failed to obtain stream on crtc (%d)!\n",
- __func__,
- acrtc->crtc_id);
- goto fail;
- }
-
- dc_surface = dc_create_surface(dc);
-
- if (!dc_surface) {
- dm_error(
- "%s: Failed to create a surface!\n",
- __func__);
- goto fail;
- }
-
- /* Surface programming */
- fill_plane_attributes(
- crtc->dev->dev_private,
- dc_surface,
- plane->state,
- true);
-
- *dc_surfaces = dc_surface;
-
-fail:
- return;
-}
-
static enum dc_color_depth convert_color_depth_from_display_info(
const struct drm_connector *connector)
{
kfree(crtc);
}
+static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dm_crtc_state *cur = to_dm_crtc_state(state);
+
+ /* TODO Destroy dc_stream objects are stream object is flattened */
+ if (cur->stream)
+ dc_stream_release(cur->stream);
+
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+
+ kfree(state);
+}
+
+static void dm_crtc_reset_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state;
+
+ if (crtc->state)
+ dm_crtc_destroy_state(crtc, crtc->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (WARN_ON(!state))
+ return;
+
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+
+}
+
+static struct drm_crtc_state *
+dm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state, *cur;
+
+ cur = to_dm_crtc_state(crtc->state);
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = dm_alloc(sizeof(*state));
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ if (cur->stream) {
+ state->stream = cur->stream;
+ dc_stream_retain(state->stream);
+ }
+
+ /* TODO Duplicate dc_stream after objects are stream object is flattened */
+
+ return &state->base;
+}
+
/* Implemented only the options currently availible for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
- .cursor_set = dm_crtc_cursor_set,
- .cursor_move = dm_crtc_cursor_move,
+ .reset = dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = dm_crtc_duplicate_state,
+ .atomic_destroy_state = dm_crtc_destroy_state,
};
static enum drm_connector_status
struct dm_connector_state *dm_new_state =
to_dm_connector_state(connector_state);
- struct drm_crtc_state *new_crtc_state;
- struct drm_crtc *crtc;
- int i;
int ret = -EINVAL;
if (property == dev->mode_config.scaling_mode_property) {
ret = 0;
}
- for_each_crtc_in_state(
- connector_state->state,
- crtc,
- new_crtc_state,
- i) {
-
- if (crtc == connector_state->crtc) {
- struct drm_plane_state *plane_state;
-
- /*
- * Bit of magic done here. We need to ensure
- * that planes get update after mode is set.
- * So, we need to add primary plane to state,
- * and this way atomic_update would be called
- * for it
- */
- plane_state =
- drm_atomic_get_plane_state(
- connector_state->state,
- crtc->primary);
-
- if (!plane_state)
- return -EINVAL;
- }
- }
-
return ret;
}
struct drm_display_mode *mode)
{
int result = MODE_ERROR;
- const struct dc_sink *dc_sink;
+ struct dc_sink *dc_sink;
struct amdgpu_device *adev = connector->dev->dev_private;
- struct dc_validation_set val_set = { 0 };
/* TODO: Unhardcode stream count */
struct dc_stream *stream;
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
- struct validate_context *context;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN))
if (NULL == dc_sink) {
DRM_ERROR("dc_sink is NULL!\n");
- goto null_sink;
+ goto fail;
}
stream = dc_create_stream_for_sink(dc_sink);
if (NULL == stream) {
DRM_ERROR("Failed to create stream for sink!\n");
- goto stream_create_fail;
+ goto fail;
}
drm_mode_set_crtcinfo(mode, 0);
fill_stream_properties_from_drm_display_mode(stream, mode, connector);
- val_set.stream = stream;
- val_set.surface_count = 0;
stream->src.width = mode->hdisplay;
stream->src.height = mode->vdisplay;
stream->dst = stream->src;
- context = dc_get_validate_context(adev->dm.dc, &val_set, 1);
-
- if (context) {
+ if (dc_validate_stream(adev->dm.dc, stream))
result = MODE_OK;
- dc_resource_validate_ctx_destruct(context);
- dm_free(context);
- }
dc_stream_release(stream);
-stream_create_fail:
-null_sink:
+fail:
/* TODO: error handling*/
return result;
}
struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- return 0;
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ int ret = -EINVAL;
+
+ if (unlikely(!dm_crtc_state->stream && modeset_required(state))) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
+ if (dc_validate_stream(dc, dm_crtc_state->stream))
+ return 0;
+
+ return ret;
}
static bool dm_crtc_helper_mode_fixup(
static void dm_drm_plane_reset(struct drm_plane *plane)
{
- struct amdgpu_drm_plane_state *amdgpu_state;
+ struct dm_plane_state *amdgpu_state = NULL;
- if (plane->state) {
- amdgpu_state = to_amdgpu_plane_state(plane->state);
- if (amdgpu_state->base.fb)
- drm_framebuffer_unreference(amdgpu_state->base.fb);
- kfree(amdgpu_state);
- plane->state = NULL;
- }
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+
if (amdgpu_state) {
plane->state = &amdgpu_state->base;
plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
}
+ else
+ WARN_ON(1);
}
static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane *plane)
{
- struct amdgpu_drm_plane_state *amdgpu_state;
- struct amdgpu_drm_plane_state *copy;
+ struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
- amdgpu_state = to_amdgpu_plane_state(plane->state);
- copy = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
- if (!copy)
+ old_dm_plane_state = to_dm_plane_state(plane->state);
+ dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+ if (!dm_plane_state)
return NULL;
- __drm_atomic_helper_plane_duplicate_state(plane, ©->base);
- return ©->base;
+ __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+
+ if (old_dm_plane_state->surface) {
+ dm_plane_state->surface = old_dm_plane_state->surface;
+ dc_surface_retain(dm_plane_state->surface);
+ }
+
+ return &dm_plane_state->base;
}
-static void dm_drm_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- struct amdgpu_drm_plane_state *old_amdgpu_state =
- to_amdgpu_plane_state(old_state);
- __drm_atomic_helper_plane_destroy_state(old_state);
- kfree(old_amdgpu_state);
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (dm_plane_state->surface)
+ dc_surface_release(dm_plane_state->surface);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(dm_plane_state);
}
static const struct drm_plane_funcs dm_plane_funcs = {
struct drm_gem_object *obj;
struct amdgpu_bo *rbo;
int r;
+ struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ unsigned int awidth;
+
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
if (!new_state->fb) {
DRM_DEBUG_KMS("No FB bound\n");
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
amdgpu_bo_unreserve(rbo);
if (unlikely(r != 0)) {
}
amdgpu_bo_ref(rbo);
+
+ if (dm_plane_state_new->surface &&
+ dm_plane_state_old->surface != dm_plane_state_new->surface) {
+ struct dc_surface *surface = dm_plane_state_new->surface;
+
+ if (surface->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ surface->address.grph.addr.low_part = lower_32_bits(afb->address);
+ surface->address.grph.addr.high_part = upper_32_bits(afb->address);
+ } else {
+ awidth = ALIGN(new_state->fb->width, 64);
+ surface->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(afb->address);
+ surface->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(afb->address) +
+ (awidth * new_state->fb->height);
+ }
+ }
+
+ /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+ * prepare and cleanup in drm_atomic_helper_prepare_planes
+ * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+ * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+ * code touching fram buffers should be avoided for DC.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+
+ acrtc->cursor_bo = obj;
+ }
return 0;
}
struct drm_display_mode *mode, struct dc_validation_set *val_set)
{
int result = MODE_ERROR;
- const struct dc_sink *dc_sink =
+ struct dc_sink *dc_sink =
to_amdgpu_connector(connector)->dc_sink;
/* TODO: Unhardcode stream count */
struct dc_stream *stream;
* check will succeed, and let DC to implement proper check
*/
static uint32_t rgb_formats[] = {
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static uint32_t yuv_formats[] = {
- DRM_FORMAT_YUV420,
- DRM_FORMAT_YVU420,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
+static const u32 cursor_formats[] = {
+ DRM_FORMAT_ARGB8888
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
{
int res = -EPERM;
- switch (aplane->plane_type) {
+ switch (aplane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
aplane->base.format_default = true;
&dm_plane_funcs,
rgb_formats,
ARRAY_SIZE(rgb_formats),
- NULL, aplane->plane_type, NULL);
+ NULL, aplane->base.type, NULL);
break;
case DRM_PLANE_TYPE_OVERLAY:
res = drm_universal_plane_init(
&dm_plane_funcs,
yuv_formats,
ARRAY_SIZE(yuv_formats),
- NULL, aplane->plane_type, NULL);
+ NULL, aplane->base.type, NULL);
break;
case DRM_PLANE_TYPE_CURSOR:
- DRM_ERROR("KMS: Cursor plane not implemented.");
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+ ARRAY_SIZE(cursor_formats),
+ NULL, aplane->base.type, NULL);
break;
}
struct drm_plane *plane,
uint32_t crtc_index)
{
- struct amdgpu_crtc *acrtc;
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_plane *cursor_plane;
+
int res = -ENOMEM;
+ cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+ if (!cursor_plane)
+ goto fail;
+
+ cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
if (!acrtc)
goto fail;
dm->ddev,
&acrtc->base,
plane,
- NULL,
+ &cursor_plane->base,
&amdgpu_dm_crtc_funcs, NULL);
if (res)
drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
return 0;
+
fail:
- kfree(acrtc);
+ if (acrtc)
+ kfree(acrtc);
+ if (cursor_plane)
+ kfree(cursor_plane);
acrtc->crtc_id = -1;
return res;
}
+
static int to_drm_connector_type(enum signal_type st)
{
switch (st) {
struct amdgpu_display_manager *dm,
struct amdgpu_connector *aconnector,
int connector_type,
- const struct dc_link *link,
+ struct dc_link *link,
int link_index)
{
struct amdgpu_device *adev = dm->ddev->dev_private;
int res = 0;
int connector_type;
struct dc *dc = dm->dc;
- const struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
struct amdgpu_i2c_adapter *i2c;
((struct dc_link *)link)->priv = aconnector;
return res;
}
-enum dm_commit_action {
- DM_COMMIT_ACTION_NOTHING,
- DM_COMMIT_ACTION_RESET,
- DM_COMMIT_ACTION_DPMS_ON,
- DM_COMMIT_ACTION_DPMS_OFF,
- DM_COMMIT_ACTION_SET
-};
-
-static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
-{
- /* mode changed means either actually mode changed or enabled changed */
- /* active changed means dpms changed */
-
- DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
- state->enable,
- state->active,
- state->planes_changed,
- state->mode_changed,
- state->active_changed,
- state->connectors_changed);
-
- if (state->mode_changed) {
- /* if it is got disabled - call reset mode */
- if (!state->enable)
- return DM_COMMIT_ACTION_RESET;
-
- if (state->active)
- return DM_COMMIT_ACTION_SET;
- else
- return DM_COMMIT_ACTION_RESET;
- } else {
- /* ! mode_changed */
-
- /* if it is remain disable - skip it */
- if (!state->enable)
- return DM_COMMIT_ACTION_NOTHING;
-
- if (state->active && state->connectors_changed)
- return DM_COMMIT_ACTION_SET;
-
- if (state->active_changed) {
- if (state->active) {
- return DM_COMMIT_ACTION_DPMS_ON;
- } else {
- return DM_COMMIT_ACTION_DPMS_OFF;
- }
- } else {
- /* ! active_changed */
- return DM_COMMIT_ACTION_NOTHING;
- }
- }
-}
-
static void manage_dm_interrupts(
struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
return false;
}
-static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
+static void remove_stream(
+ struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream *stream)
{
- /*
- * we evade vblanks and pflips on crtc that
- * should be changed
- */
- manage_dm_interrupts(adev, acrtc, false);
-
/* this is the update mode case */
if (adev->dm.freesync_module)
- mod_freesync_remove_stream(adev->dm.freesync_module,
- acrtc->stream);
+ mod_freesync_remove_stream(adev->dm.freesync_module, stream);
- dc_stream_release(acrtc->stream);
- acrtc->stream = NULL;
acrtc->otg_inst = -1;
acrtc->enabled = false;
}
+static void handle_cursor_update(
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state)
+{
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
+
+ /* Check if it's a cursor on/off update or just cursor move*/
+ if (plane->state->fb == old_plane_state->fb)
+ dm_crtc_cursor_move(
+ plane->state->crtc,
+ plane->state->crtc_x,
+ plane->state->crtc_y);
+ else {
+ struct amdgpu_framebuffer *afb =
+ to_amdgpu_framebuffer(plane->state->fb);
+ dm_crtc_cursor_set(
+ (!!plane->state->fb) ?
+ plane->state->crtc :
+ old_plane_state->crtc,
+ (!!plane->state->fb) ?
+ afb->address :
+ 0,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+ }
+}
+
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
/*
* Executes flip
struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
struct amdgpu_device *adev = crtc->dev->dev_private;
bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ struct dc_flip_addrs addr = { {0} };
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+
/* Prepare wait for target vblank early - before the fence-waits */
target_vblank = target - drm_crtc_vblank_count(crtc) +
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve buffer before flip\n");
- BUG_ON(0);
+ WARN_ON(1);
}
/* Wait for all fences on this FB */
/* update crtc fb */
crtc->primary->fb = fb;
- /* Do the flip (mmio) */
- adev->mode_info.funcs->page_flip(adev, acrtc->crtc_id, afb->address, async_flip);
+ WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+ WARN_ON(!acrtc_state->stream);
+
+ addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+ addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+ addr.flip_immediate = async_flip;
+
+
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
+
+ surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->surfaces[0];
+ surface_updates->flip_addr = &addr;
+
+
+ dc_update_surfaces_and_stream(adev->dm.dc, surface_updates, 1, acrtc_state->stream, NULL);
+
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
- acrtc->crtc_id);
}
static void amdgpu_dm_commit_surfaces(struct drm_atomic_state *state,
uint32_t i;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
- const struct dc_stream *dc_stream_attach;
- const struct dc_surface *dc_surfaces_constructed[MAX_SURFACES];
+ struct dc_stream *dc_stream_attach;
+ struct dc_surface *dc_surfaces_constructed[MAX_SURFACES];
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(pcrtc->state);
int planes_count = 0;
+ unsigned long flags;
/* update planes when needed */
for_each_plane_in_state(state, plane, old_plane_state, i) {
struct drm_plane_state *plane_state = plane->state;
struct drm_crtc *crtc = plane_state->crtc;
- struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = plane_state->fb;
- struct drm_connector *connector;
- struct dm_connector_state *dm_state = NULL;
-
- enum dm_commit_action action;
bool pflip_needed;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
- if (!fb || !crtc || !crtc->state->active)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ handle_cursor_update(plane, old_plane_state);
continue;
+ }
- action = get_dm_commit_action(crtc->state);
+ if (!fb || !crtc || pcrtc != crtc || !crtc->state->active ||
+ (!crtc->state->planes_changed &&
+ !pcrtc->state->color_mgmt_changed))
+ continue;
- /*
- * TODO - TO decide if it's a flip or surface update
- * stop relying on allow_modeset flag and query DC
- * using dc_check_update_surfaces_for_stream.
- */
pflip_needed = !state->allow_modeset;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+ DRM_ERROR("add_surface: acrtc %d, already busy\n",
+ acrtc_attach->crtc_id);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ /* In comit tail framework this cannot happen */
+ WARN_ON(1);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
if (!pflip_needed) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (connector->state->crtc == crtc) {
- dm_state = to_dm_connector_state(
- connector->state);
- break;
- }
- }
+ WARN_ON(!dm_plane_state->surface);
+
+ dc_surfaces_constructed[planes_count] = dm_plane_state->surface;
+
+ dc_stream_attach = acrtc_state->stream;
+ planes_count++;
- /*
- * This situation happens in the following case:
- * we are about to get set mode for connector who's only
- * possible crtc (in encoder crtc mask) is used by
- * another connector, that is why it will try to
- * re-assing crtcs in order to make configuration
- * supported. For our implementation we need to make all
- * encoders support all crtcs, then this issue will
- * never arise again. But to guard code from this issue
- * check is left.
- *
- * Also it should be needed when used with actual
- * drm_atomic_commit ioctl in future
- */
- if (!dm_state)
- continue;
- if (crtc == pcrtc) {
- add_surface(dm->dc, crtc, plane,
- &dc_surfaces_constructed[planes_count]);
- dc_stream_attach = acrtc_attach->stream;
- planes_count++;
- }
} else if (crtc->state->planes_changed) {
+ /* Assume even ONE crtc with immediate flip means
+ * entire can't wait for VBLANK
+ * TODO Check if it's correct
+ */
*wait_for_vblank =
acrtc_attach->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
false : true;
+ /* TODO: Needs rework for multiplane flip */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_crtc_vblank_get(crtc);
+
amdgpu_dm_do_flip(
crtc,
fb,
drm_crtc_vblank_count(crtc) + *wait_for_vblank);
+ /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */
+
/*clean up the flags for next usage*/
acrtc_attach->flip_flags = 0;
}
}
if (planes_count) {
+ unsigned long flags;
+
+ if (pcrtc->state->event) {
+
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ prepare_flip_isr(acrtc_attach);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
if (false == dc_commit_surfaces_to_stream(dm->dc,
dc_surfaces_constructed,
planes_count,
- dc_stream_attach)) {
+ dc_stream_attach))
dm_error("%s: Failed to attach surface!\n", __func__);
- return;
- }
- for (i = 0; i < planes_count; i++)
- dc_surface_release(dc_surfaces_constructed[i]);
+ } else {
+ /*TODO BUG Here should go disable planes on CRTC. */
+ }
+}
+
+
+int amdgpu_dm_atomic_commit(
+ struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_state;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ /*
+ * We evade vblanks and pflips on crtc that
+ * should be changed. We do it here to flush & disable
+ * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+ * it will update crtc->dm_crtc_state->stream pointer which is used in
+ * the ISRs.
+ */
+ for_each_crtc_in_state(state, crtc, new_state, i) {
+ struct dm_crtc_state *old_acrtc_state = to_dm_crtc_state(crtc->state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ if (drm_atomic_crtc_needs_modeset(new_state) && old_acrtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
}
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+ /*TODO Handle EINTR, reenable IRQ*/
}
void amdgpu_dm_atomic_commit_tail(
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
uint32_t i, j;
- uint32_t commit_streams_count = 0;
uint32_t new_crtcs_count = 0;
struct drm_crtc *crtc, *pcrtc;
struct drm_crtc_state *old_crtc_state;
- const struct dc_stream *commit_streams[MAX_STREAMS];
struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
- const struct dc_stream *new_stream;
+ struct dc_stream *new_stream = NULL;
unsigned long flags;
bool wait_for_vblank = true;
-
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ dm_state = to_dm_atomic_state(state);
+
/* update changed items */
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct amdgpu_crtc *acrtc;
- struct amdgpu_connector *aconnector = NULL;
- enum dm_commit_action action;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_crtc_state *new_state = crtc->state;
-
- acrtc = to_amdgpu_crtc(crtc);
-
- aconnector =
- amdgpu_dm_find_first_crct_matching_connector(
- state,
- crtc,
- false);
+ new_acrtc_state = to_dm_crtc_state(new_state);
+ old_acrtc_state = to_dm_crtc_state(old_crtc_state);
+
+ DRM_DEBUG_KMS(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_state->enable,
+ new_state->active,
+ new_state->planes_changed,
+ new_state->mode_changed,
+ new_state->active_changed,
+ new_state->connectors_changed);
/* handles headless hotplug case, updating new_state and
* aconnector as needed
*/
- action = get_dm_commit_action(new_state);
-
- switch (action) {
- case DM_COMMIT_ACTION_DPMS_ON:
- case DM_COMMIT_ACTION_SET: {
- struct dm_connector_state *dm_state = NULL;
- new_stream = NULL;
-
- if (aconnector)
- dm_state = to_dm_connector_state(aconnector->base.state);
-
- new_stream = create_stream_for_sink(
- aconnector,
- &crtc->state->mode,
- dm_state);
+ if (modeset_required(new_state)) {
DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
- if (!new_stream) {
+ if (!new_acrtc_state->stream) {
/*
- * this could happen because of issues with
- * userspace notifications delivery.
- * In this case userspace tries to set mode on
- * display which is disconnect in fact.
- * dc_sink in NULL in this case on aconnector.
- * We expect reset mode will come soon.
- *
- * This can also happen when unplug is done
- * during resume sequence ended
- *
- * In this case, we want to pretend we still
- * have a sink to keep the pipe running so that
- * hw state is consistent with the sw state
- */
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnect in fact.
+ * dc_sink in NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
- break;
+ continue;
}
- if (acrtc->stream)
- remove_stream(adev, acrtc);
+
+ if (old_acrtc_state->stream)
+ remove_stream(adev, acrtc, old_acrtc_state->stream);
+
/*
* this loop saves set mode crtcs
* we needed to enable vblanks once all
* resources acquired in dc after dc_commit_streams
*/
+
+ /*TODO move all this into dm_crtc_state, get rid of
+ * new_crtcs array and use old and new atomic states
+ * instead
+ */
new_crtcs[new_crtcs_count] = acrtc;
new_crtcs_count++;
- acrtc->stream = new_stream;
acrtc->enabled = true;
acrtc->hw_mode = crtc->state->mode;
crtc->hwmode = crtc->state->mode;
-
- break;
- }
-
- case DM_COMMIT_ACTION_NOTHING: {
- struct dm_connector_state *dm_state = NULL;
-
- if (!aconnector)
- break;
-
- dm_state = to_dm_connector_state(aconnector->base.state);
-
- /* Scaling update */
- update_stream_scaling_settings(&crtc->state->mode,
- dm_state, acrtc->stream);
-
- break;
- }
- case DM_COMMIT_ACTION_DPMS_OFF:
- case DM_COMMIT_ACTION_RESET:
+ } else if (modereset_required(new_state)) {
DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
- /* i.e. reset mode */
- if (acrtc->stream)
- remove_stream(adev, acrtc);
- break;
- } /* switch() */
- } /* for_each_crtc_in_state() */
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
- if (acrtc->stream) {
- commit_streams[commit_streams_count] = acrtc->stream;
- ++commit_streams_count;
+ /* i.e. reset mode */
+ if (old_acrtc_state->stream)
+ remove_stream(adev, acrtc, old_acrtc_state->stream);
}
- }
+ } /* for_each_crtc_in_state() */
/*
* Add streams after required streams from new and replaced streams
if (adev->dm.freesync_module) {
for (i = 0; i < new_crtcs_count; i++) {
struct amdgpu_connector *aconnector = NULL;
- new_stream = new_crtcs[i]->stream;
+ new_acrtc_state = to_dm_crtc_state(new_crtcs[i]->base.state);
+
+ new_stream = new_acrtc_state->stream;
aconnector =
amdgpu_dm_find_first_crct_matching_connector(
state,
}
}
- /* DC is optimized not to do anything if 'streams' didn't change. */
- WARN_ON(!dc_commit_streams(dm->dc, commit_streams, commit_streams_count));
+ if (dm_state->context)
+ WARN_ON(!dc_commit_context(dm->dc, dm_state->context));
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ new_acrtc_state = to_dm_crtc_state(crtc->state);
- if (acrtc->stream != NULL)
- acrtc->otg_inst =
- dc_stream_get_status(acrtc->stream)->primary_otg_inst;
+ if (new_acrtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(new_acrtc_state->stream);
+
+ if (!status)
+ DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
}
- /* update planes when needed per crtc*/
- for_each_crtc_in_state(state, pcrtc, old_crtc_state, j)
- amdgpu_dm_commit_surfaces(state, dev, dm, pcrtc, &wait_for_vblank);
+ /* Handle scaling and undersacn changes*/
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
+ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+ struct dm_connector_state *con_new_state =
+ to_dm_connector_state(aconnector->base.state);
+ struct dm_connector_state *con_old_state =
+ to_dm_connector_state(old_conn_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc);
+ struct dc_stream_status *status = NULL;
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(con_new_state, con_old_state))
+ continue;
+
+ new_acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ update_stream_scaling_settings(&con_new_state->base.crtc->mode,
+ con_new_state, (struct dc_stream *)new_acrtc_state->stream);
+
+ status = dc_stream_get_status(new_acrtc_state->stream);
+ WARN_ON(!status);
+ WARN_ON(!status->surface_count);
+
+ if (!new_acrtc_state->stream)
+ continue;
+
+ /*TODO How it works with MPO ?*/
+ if (!dc_commit_surfaces_to_stream(
+ dm->dc,
+ status->surfaces,
+ status->surface_count,
+ new_acrtc_state->stream))
+ dm_error("%s: Failed to update stream scaling!\n", __func__);
+ }
for (i = 0; i < new_crtcs_count; i++) {
/*
* loop to enable interrupts on newly arrived crtc
*/
struct amdgpu_crtc *acrtc = new_crtcs[i];
+ new_acrtc_state = to_dm_crtc_state(acrtc->base.state);
if (adev->dm.freesync_module)
mod_freesync_notify_mode_change(
- adev->dm.freesync_module, &acrtc->stream, 1);
+ adev->dm.freesync_module, &new_acrtc_state->stream, 1);
manage_dm_interrupts(adev, acrtc, true);
- dm_crtc_cursor_reset(&acrtc->base);
}
+ /* update planes when needed per crtc*/
+ for_each_crtc_in_state(state, pcrtc, old_crtc_state, j) {
+ new_acrtc_state = to_dm_crtc_state(pcrtc->state);
+
+ if (new_acrtc_state->stream)
+ amdgpu_dm_commit_surfaces(state, dev, dm, pcrtc, &wait_for_vblank);
+ }
- /*TODO mark consumed event on all crtc assigned event
- * in drm_atomic_helper_setup_commit just to signal completion
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
*/
spin_lock_irqsave(&adev->ddev->event_lock, flags);
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- if (acrtc->base.state->event &&
- acrtc->base.state->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
- acrtc->event = acrtc->base.state->event;
- acrtc->base.state->event = NULL;
- }
+ if (acrtc->base.state->event)
+ drm_send_event_locked(dev, &crtc->state->event->base);
+
+ acrtc->base.state->event = NULL;
}
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
if (wait_for_vblank)
drm_atomic_helper_wait_for_vblanks(dev, state);
- /*TODO send vblank event on all crtc assigned event
- * in drm_atomic_helper_setup_commit just to signal completion
- */
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
- if (acrtc->event &&
- acrtc->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
- drm_send_event_locked(dev, &acrtc->event->base);
- acrtc->event = NULL;
- }
- }
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-
- /*TODO Is it to early if actual flip haven't happened yet ?*/
- /* Release old FB */
drm_atomic_helper_cleanup_planes(dev, state);
}
{
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
if (!aconnector->dc_sink || !connector->state || !connector->encoder)
return;
disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
- if (!disconnected_acrtc || !disconnected_acrtc->stream)
+ if (!disconnected_acrtc || !acrtc_state->stream)
return;
/*
* we deduce we are in a state where we can not rely on usermode call
* to turn on the display, so we do it here
*/
- if (disconnected_acrtc->stream->sink != aconnector->dc_sink)
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
dm_force_atomic_commit(&aconnector->base);
}
struct dc_validation_set *val_sets,
uint32_t set_count,
const struct dc_stream *stream,
- const struct dc_surface *surface)
+ struct dc_surface *surface)
{
uint32_t i = 0, j = 0;
static uint32_t update_in_val_sets_stream(
struct dc_validation_set *val_sets,
- struct drm_crtc **crtcs,
uint32_t set_count,
- const struct dc_stream *old_stream,
- const struct dc_stream *new_stream,
+ struct dc_stream *old_stream,
+ struct dc_stream *new_stream,
struct drm_crtc *crtc)
{
uint32_t i = 0;
}
val_sets[i].stream = new_stream;
- crtcs[i] = crtc;
- if (i == set_count) {
+ if (i == set_count)
/* nothing found. add new one to the end */
return set_count + 1;
- }
return set_count;
}
return set_count;
}
+/*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(
+ struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
-static enum surface_update_type amdgpu_dm_check_surfaces_update_type(
- struct dc *dc,
- const struct dc_surface **new_surfaces,
- uint8_t new_surface_count,
- const struct dc_stream *dc_stream)
-{
- struct dc_surface_update srf_updates[MAX_SURFACES];
- struct dc_flip_addrs flip_addr[MAX_SURFACES];
- struct dc_plane_info plane_info[MAX_SURFACES];
- struct dc_scaling_info scaling_info[MAX_SURFACES];
- int i;
- const struct dc_stream_status *stream_status =
- dc_stream_get_status(dc_stream);
- enum surface_update_type update_type;
+ /* Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
- ASSERT(stream_status);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+ /* Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
- memset(srf_updates, 0, sizeof(srf_updates));
- memset(flip_addr, 0, sizeof(flip_addr));
- memset(plane_info, 0, sizeof(plane_info));
- memset(scaling_info, 0, sizeof(scaling_info));
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
- for (i = 0; i < new_surface_count; i++) {
- srf_updates[i].surface = new_surfaces[i];
- srf_updates[i].gamma =
- (struct dc_gamma *)new_surfaces[i]->gamma_correction;
- flip_addr[i].address = new_surfaces[i]->address;
- flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
- plane_info[i].color_space = new_surfaces[i]->color_space;
- plane_info[i].format = new_surfaces[i]->format;
- plane_info[i].plane_size = new_surfaces[i]->plane_size;
- plane_info[i].rotation = new_surfaces[i]->rotation;
- plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
- plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
- plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
- plane_info[i].visible = new_surfaces[i]->visible;
- plane_info[i].dcc = new_surfaces[i]->dcc;
- scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
- scaling_info[i].src_rect = new_surfaces[i]->src_rect;
- scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
- scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+ "timed out\n", crtc->base.id, crtc->name);
- srf_updates[i].flip_addr = &flip_addr[i];
- srf_updates[i].plane_info = &plane_info[i];
- srf_updates[i].scaling_info = &scaling_info[i];
+ drm_crtc_commit_put(commit);
}
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, new_surface_count, NULL, stream_status);
-
- return update_type;
+ return ret < 0 ? ret : 0;
}
int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct dm_atomic_state *dm_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int i, j;
int ret;
- int set_count;
- int new_stream_count;
- struct dc_validation_set set[MAX_STREAMS] = {{ 0 }};
- struct dc_stream *new_streams[MAX_STREAMS] = { 0 };
- struct drm_crtc *crtc_set[MAX_STREAMS] = { 0 };
struct amdgpu_device *adev = dev->dev_private;
struct dc *dc = adev->dm.dc;
- bool need_to_validate = false;
- struct validate_context *context;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int set_count;
+ struct dc_validation_set set[MAX_STREAMS] = { { 0 } };
+ struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
+
/*
* This bool will be set for true for any modeset/reset
- * or surface update which implies non fast surfae update.
+ * or surface update which implies non fast surface update.
*/
- bool wait_for_prev_commits = false;
+ bool lock_and_validation_needed = false;
- ret = drm_atomic_helper_check(dev, state);
+ ret = drm_atomic_helper_check_modeset(dev, state);
if (ret) {
- DRM_ERROR("Atomic state validation failed with error :%d !\n",
- ret);
+ DRM_ERROR("Atomic state validation failed with error :%d !\n", ret);
return ret;
}
- ret = -EINVAL;
+ dm_state = to_dm_atomic_state(state);
/* copy existing configuration */
- new_stream_count = 0;
set_count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ old_acrtc_state = to_dm_crtc_state(crtc->state);
- if (acrtc->stream) {
- set[set_count].stream = acrtc->stream;
- crtc_set[set_count] = crtc;
+ if (old_acrtc_state->stream) {
+ dc_stream_retain(old_acrtc_state->stream);
+ set[set_count].stream = old_acrtc_state->stream;
++set_count;
}
}
+ /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
/* update changed items */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_connector *aconnector = NULL;
- enum dm_commit_action action;
-
+ old_acrtc_state = to_dm_crtc_state(crtc->state);
+ new_acrtc_state = to_dm_crtc_state(crtc_state);
acrtc = to_amdgpu_crtc(crtc);
aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
- action = get_dm_commit_action(crtc_state);
+ DRM_DEBUG_KMS(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ crtc_state->enable,
+ crtc_state->active,
+ crtc_state->planes_changed,
+ crtc_state->mode_changed,
+ crtc_state->active_changed,
+ crtc_state->connectors_changed);
+
+ if (modeset_required(crtc_state)) {
- switch (action) {
- case DM_COMMIT_ACTION_DPMS_ON:
- case DM_COMMIT_ACTION_SET: {
struct dc_stream *new_stream = NULL;
struct drm_connector_state *conn_state = NULL;
- struct dm_connector_state *dm_state = NULL;
+ struct dm_connector_state *dm_conn_state = NULL;
if (aconnector) {
conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
- if (IS_ERR(conn_state))
- return ret;
- dm_state = to_dm_connector_state(conn_state);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ goto fail;
+ }
+
+ dm_conn_state = to_dm_connector_state(conn_state);
}
- new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
+ new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_conn_state);
/*
* we can have no stream on ACTION_SET if a display
break;
}
- new_streams[new_stream_count] = new_stream;
- set_count = update_in_val_sets_stream(
- set,
- crtc_set,
- set_count,
- acrtc->stream,
- new_stream,
- crtc);
-
- new_stream_count++;
- need_to_validate = true;
- wait_for_prev_commits = true;
- break;
- }
-
- case DM_COMMIT_ACTION_NOTHING: {
- const struct drm_connector *drm_connector = NULL;
- struct drm_connector_state *conn_state = NULL;
- struct dm_connector_state *dm_state = NULL;
- struct dm_connector_state *old_dm_state = NULL;
- struct dc_stream *new_stream;
+ if (new_acrtc_state->stream)
+ dc_stream_release(new_acrtc_state->stream);
- if (!aconnector)
- break;
-
- for_each_connector_in_state(
- state, drm_connector, conn_state, j) {
- if (&aconnector->base == drm_connector)
- break;
- }
+ new_acrtc_state->stream = new_stream;
- old_dm_state = to_dm_connector_state(drm_connector->state);
- dm_state = to_dm_connector_state(conn_state);
-
- /* Support underscan adjustment*/
- if (!is_scaling_state_different(dm_state, old_dm_state))
- break;
-
- new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
-
- if (!new_stream) {
- DRM_ERROR("%s: Failed to create new stream for crtc %d\n",
- __func__, acrtc->base.base.id);
- break;
- }
-
- new_streams[new_stream_count] = new_stream;
set_count = update_in_val_sets_stream(
set,
- crtc_set,
set_count,
- acrtc->stream,
- new_stream,
+ old_acrtc_state->stream,
+ new_acrtc_state->stream,
crtc);
- new_stream_count++;
- need_to_validate = true;
- wait_for_prev_commits = true;
+ lock_and_validation_needed = true;
+
+ } else if (modereset_required(crtc_state)) {
- break;
- }
- case DM_COMMIT_ACTION_DPMS_OFF:
- case DM_COMMIT_ACTION_RESET:
/* i.e. reset mode */
- if (acrtc->stream) {
+ if (new_acrtc_state->stream) {
set_count = remove_from_val_sets(
set,
set_count,
- acrtc->stream);
- wait_for_prev_commits = true;
+ new_acrtc_state->stream);
+
+ dc_stream_release(new_acrtc_state->stream);
+ new_acrtc_state->stream = NULL;
+
+ lock_and_validation_needed = true;
}
- break;
}
+
/*
- * TODO revisit when removing commit action
- * and looking at atomic flags directly
+ * Hack: Commit needs planes right now, specifically for gamma
+ * TODO rework commit to check CRTC for gamma change
*/
+ if (crtc_state->color_mgmt_changed) {
- /* commit needs planes right now (for gamma, eg.) */
- /* TODO rework commit to chack crtc for gamma change */
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ /* Check scaling and undersacn changes*/
+ /*TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+ struct dm_connector_state *con_old_state =
+ to_dm_connector_state(aconnector->base.state);
+ struct dm_connector_state *con_new_state =
+ to_dm_connector_state(conn_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state))
+ continue;
- ret = -EINVAL;
+ /* Skip any thing not scale or underscan chnages */
+ if (!is_scaling_state_different(con_new_state, con_old_state))
+ continue;
+
+ lock_and_validation_needed = true;
}
- for (i = 0; i < set_count; i++) {
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ new_acrtc_state = to_dm_crtc_state(crtc_state);
+
for_each_plane_in_state(state, plane, plane_state, j) {
- struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc *plane_crtc = plane_state->crtc;
struct drm_framebuffer *fb = plane_state->fb;
- struct drm_connector *connector;
- struct dm_connector_state *dm_state = NULL;
- enum dm_commit_action action;
- struct drm_crtc_state *crtc_state;
bool pflip_needed;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ /*TODO Implement atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
- if (!fb || !crtc || crtc_set[i] != crtc ||
- !crtc->state->planes_changed || !crtc->state->active)
+ if (!fb || !plane_crtc || crtc != plane_crtc || !crtc_state->active)
continue;
- action = get_dm_commit_action(crtc->state);
+ WARN_ON(!new_acrtc_state->stream);
- /* Surfaces are created under two scenarios:
- * 1. This commit is not a page flip.
- * 2. This commit is a page flip, and streams are created.
- */
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
pflip_needed = !state->allow_modeset;
- if (!pflip_needed ||
- action == DM_COMMIT_ACTION_DPMS_ON ||
- action == DM_COMMIT_ACTION_SET) {
+ if (!pflip_needed) {
struct dc_surface *surface;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (connector->state->crtc == crtc) {
- dm_state = to_dm_connector_state(
- connector->state);
- break;
- }
- }
-
- /*
- * This situation happens in the following case:
- * we are about to get set mode for connector who's only
- * possible crtc (in encoder crtc mask) is used by
- * another connector, that is why it will try to
- * re-assing crtcs in order to make configuration
- * supported. For our implementation we need to make all
- * encoders support all crtcs, then this issue will
- * never arise again. But to guard code from this issue
- * check is left.
- *
- * Also it should be needed when used with actual
- * drm_atomic_commit ioctl in future
- */
- if (!dm_state)
- continue;
-
surface = dc_create_surface(dc);
- fill_plane_attributes(
- crtc->dev->dev_private,
+
+ ret = fill_plane_attributes(
+ plane_crtc->dev->dev_private,
surface,
plane_state,
+ crtc_state,
false);
+ if (ret)
+ goto fail;
- add_val_sets_surface(
- set,
- set_count,
- set[i].stream,
- surface);
- need_to_validate = true;
- }
- }
- }
+ if (dm_plane_state->surface)
+ dc_surface_release(dm_plane_state->surface);
- context = dc_get_validate_context(dc, set, set_count);
+ dm_plane_state->surface = surface;
- for (i = 0; i < set_count; i++) {
- for (j = 0; j < set[i].surface_count; j++) {
- if (amdgpu_dm_check_surfaces_update_type(
- dc,
- set[i].surfaces,
- set[i].surface_count,
- set[i].stream) > UPDATE_TYPE_MED) {
- wait_for_prev_commits = true;
- break;
+ add_val_sets_surface(set,
+ set_count,
+ new_acrtc_state->stream,
+ surface);
+
+ lock_and_validation_needed = true;
}
}
}
- if (need_to_validate == false || set_count == 0 || context) {
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ goto fail;
- ret = 0;
- /*
- * For full updates case when
- * removing/adding/updateding streams on once CRTC while flipping
- * on another CRTC,
- * Adding all current active CRTC's states to the atomic commit in
- * amdgpu_dm_atomic_check will guarantee that any such full update commit
- * will wait for completion of any outstanding flip using DRMs
- * synchronization events.
- */
- if (wait_for_prev_commits) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- struct drm_crtc_state *crtc_state;
-
- if (acrtc->stream) {
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- break;
- }
- }
- }
- }
- }
+ /*
+ * For full updates case when
+ * removing/adding/updating streams on once CRTC while flipping
+ * on another CRTC,
+ * acquiring global lock will guarantee that any such full
+ * update commit
+ * will wait for completion of any outstanding flip using DRMs
+ * synchronization events.
+ */
- if (context) {
- dc_resource_validate_ctx_destruct(context);
- dm_free(context);
- }
+ if (lock_and_validation_needed) {
- for (i = 0; i < set_count; i++)
- for (j = 0; j < set[i].surface_count; j++)
- dc_surface_release(set[i].surfaces[j]);
+ ret = do_aquire_global_lock(dev, state);
+ if (ret)
+ goto fail;
+ WARN_ON(dm_state->context);
+ dm_state->context = dc_get_validate_context(dc, set, set_count);
+ if (!dm_state->context) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
- for (i = 0; i < new_stream_count; i++)
- dc_stream_release(new_streams[i]);
+ /* Must be success */
+ WARN_ON(ret);
+ return ret;
- if (ret != 0)
- DRM_ERROR("Atomic check failed.\n");
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
+ else
+ DRM_ERROR("Atomic check failed with err: %d .\n", ret);
return ret;
}