]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
drm/vmwgfx: Add kms helpers for dirty- and readback functions
authorThomas Hellstrom <thellstrom@vmware.com>
Fri, 26 Jun 2015 09:03:53 +0000 (02:03 -0700)
committerThomas Hellstrom <thellstrom@vmware.com>
Wed, 5 Aug 2015 12:01:08 +0000 (14:01 +0200)
We need to make the dirty- and readback functions callable without a struct
drm_file pointer. We also need to unify the handling of dirty- and readback
cliprects that are now implemented in various places across the kms system,
som add helpers to facilitate this.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c

index 12eaa6c805d88d7c486815f9232a937ef5352160..7504f92c767c0c30a5069711554176bedfc72775 100644 (file)
@@ -640,7 +640,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
 extern struct vmw_resource *
 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
-extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+                               bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                                  struct ttm_object_file *tfile,
@@ -860,6 +861,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
                                        *user_fence_rep,
                                        struct vmw_fence_obj *fence,
                                        uint32_t fence_handle);
+extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+                                     struct ttm_buffer_object *bo,
+                                     bool interruptible,
+                                     bool validate_as_mob);
+
 
 /**
  * IRQs and wating - vmwgfx_irq.c
@@ -965,7 +971,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
 int vmw_dumb_destroy(struct drm_file *file_priv,
                     struct drm_device *dev,
                     uint32_t handle);
-extern int vmw_resource_pin(struct vmw_resource *res);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
 extern void vmw_resource_unpin(struct vmw_resource *res);
 
 /**
index 92e89987b0d73c4db2ed4e119ca7e34e7e3c0e03..698a0e2add5396e249cd60157199a163743ece0f 100644 (file)
@@ -375,7 +375,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
        list_for_each_entry(val, &sw_context->resource_list, head) {
                struct vmw_resource *res = val->res;
 
-               ret = vmw_resource_reserve(res, val->no_buffer_needed);
+               ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
                if (unlikely(ret != 0))
                        return ret;
 
@@ -2234,9 +2234,10 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
                (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-                                     struct ttm_buffer_object *bo,
-                                     bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+                              struct ttm_buffer_object *bo,
+                              bool interruptible,
+                              bool validate_as_mob)
 {
        struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
                                                  base);
@@ -2246,7 +2247,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
                return 0;
 
        if (validate_as_mob)
-               return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+               return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+                                      false);
 
        /**
         * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2255,7 +2257,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * used as a GMR, this will return -ENOMEM.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+                             false);
        if (likely(ret == 0 || ret == -ERESTARTSYS))
                return ret;
 
@@ -2264,8 +2267,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * previous contents.
         */
 
-       DRM_INFO("Falling through to VRAM.\n");
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
        return ret;
 }
 
@@ -2277,6 +2279,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
 
        list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
                ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+                                                true,
                                                 entry->validate_as_mob);
                if (unlikely(ret != 0))
                        return ret;
index 99e2f5b9a023aea72d56a95a69e37c8791d5c076..c46c68846f0e9a4364b250cbabb17247100cc01e 100644 (file)
@@ -1821,3 +1821,295 @@ out_free:
        kfree(rects);
        return ret;
 }
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+                        struct vmw_framebuffer *framebuffer,
+                        const struct drm_clip_rect *clips,
+                        const struct drm_vmw_rect *vclips,
+                        s32 dest_x, s32 dest_y,
+                        int num_clips,
+                        int increment,
+                        struct vmw_kms_dirty *dirty)
+{
+       struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_crtc *crtc;
+       u32 num_units = 0;
+       u32 i, k;
+       int ret;
+
+       dirty->dev_priv = dev_priv;
+
+       list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+               if (crtc->primary->fb != &framebuffer->base)
+                       continue;
+               units[num_units++] = vmw_crtc_to_du(crtc);
+       }
+
+       for (k = 0; k < num_units; k++) {
+               struct vmw_display_unit *unit = units[k];
+               s32 crtc_x = unit->crtc.x;
+               s32 crtc_y = unit->crtc.y;
+               s32 crtc_width = unit->crtc.mode.hdisplay;
+               s32 crtc_height = unit->crtc.mode.vdisplay;
+               const struct drm_clip_rect *clips_ptr = clips;
+               const struct drm_vmw_rect *vclips_ptr = vclips;
+
+               dirty->unit = unit;
+               if (dirty->fifo_reserve_size > 0) {
+                       dirty->cmd = vmw_fifo_reserve(dev_priv,
+                                                     dirty->fifo_reserve_size);
+                       if (!dirty->cmd) {
+                               DRM_ERROR("Couldn't reserve fifo space "
+                                         "for dirty blits.\n");
+                               return ret;
+                       }
+                       memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+               }
+               dirty->num_hits = 0;
+               for (i = 0; i < num_clips; i++, clips_ptr += increment,
+                      vclips_ptr += increment) {
+                       s32 clip_left;
+                       s32 clip_top;
+
+                       /*
+                        * Select clip array type. Note that integer type
+                        * in @clips is unsigned short, whereas in @vclips
+                        * it's 32-bit.
+                        */
+                       if (clips) {
+                               dirty->fb_x = (s32) clips_ptr->x1;
+                               dirty->fb_y = (s32) clips_ptr->y1;
+                               dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+                                       crtc_x;
+                               dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+                                       crtc_y;
+                       } else {
+                               dirty->fb_x = vclips_ptr->x;
+                               dirty->fb_y = vclips_ptr->y;
+                               dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+                                       dest_x - crtc_x;
+                               dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+                                       dest_y - crtc_y;
+                       }
+
+                       dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+                       dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+                       /* Skip this clip if it's outside the crtc region */
+                       if (dirty->unit_x1 >= crtc_width ||
+                           dirty->unit_y1 >= crtc_height ||
+                           dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+                               continue;
+
+                       /* Clip right and bottom to crtc limits */
+                       dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+                                              crtc_width);
+                       dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+                                              crtc_height);
+
+                       /* Clip left and top to crtc limits */
+                       clip_left = min_t(s32, dirty->unit_x1, 0);
+                       clip_top = min_t(s32, dirty->unit_y1, 0);
+                       dirty->unit_x1 -= clip_left;
+                       dirty->unit_y1 -= clip_top;
+                       dirty->fb_x -= clip_left;
+                       dirty->fb_y -= clip_top;
+
+                       dirty->clip(dirty);
+               }
+
+               dirty->fifo_commit(dirty);
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+                                 struct vmw_dma_buffer *buf,
+                                 bool interruptible,
+                                 bool validate_as_mob)
+{
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+
+       ttm_bo_reserve(bo, false, false, interruptible, 0);
+       ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+                                        validate_as_mob);
+       if (ret)
+               ttm_bo_unreserve(bo);
+
+       return ret;
+}
+
+/**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+{
+       if (buf)
+               ttm_bo_unreserve(&buf->base);
+}
+
+/**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+                                 struct drm_file *file_priv,
+                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_fence_obj **out_fence,
+                                 struct drm_vmw_fence_rep __user *
+                                 user_fence_rep)
+{
+       struct vmw_fence_obj *fence;
+       uint32_t handle;
+       int ret;
+
+       ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+                                        file_priv ? &handle : NULL);
+       if (buf)
+               vmw_fence_single_bo(&buf->base, fence);
+       if (file_priv)
+               vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+                                           ret, user_fence_rep, fence,
+                                           handle);
+       if (out_fence)
+               *out_fence = fence;
+       else
+               vmw_fence_obj_unreference(&fence);
+
+       vmw_kms_helper_buffer_revert(buf);
+}
+
+
+/**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+{
+       vmw_kms_helper_buffer_revert(res->backup);
+       vmw_resource_unreserve(res, NULL, 0);
+       mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+                                   bool interruptible)
+{
+       int ret = 0;
+
+       if (interruptible)
+               ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+       else
+               mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+       if (unlikely(ret != 0))
+               return -ERESTARTSYS;
+
+       ret = vmw_resource_reserve(res, interruptible, false);
+       if (ret)
+               goto out_unlock;
+
+       if (res->backup) {
+               ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+                                                   interruptible,
+                                                   res->dev_priv->has_mob);
+               if (ret)
+                       goto out_unreserve;
+       }
+       ret = vmw_resource_validate(res);
+       if (ret)
+               goto out_revert;
+       return 0;
+
+out_revert:
+       vmw_kms_helper_buffer_revert(res->backup);
+out_unreserve:
+       vmw_resource_unreserve(res, NULL, 0);
+out_unlock:
+       mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+       return ret;
+}
+
+/**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+                            struct vmw_fence_obj **out_fence)
+{
+       if (res->backup || out_fence)
+               vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+                                            out_fence, NULL);
+
+       vmw_resource_unreserve(res, NULL, 0);
+       mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
index db8ae94c403c6f2e4987037fc05689df6f8dfc18..c19a515b139b144979966e163ea5feb4a3607d57 100644 (file)
 #include <drm/drm_crtc_helper.h>
 #include "vmwgfx_drv.h"
 
-
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+       void (*fifo_commit)(struct vmw_kms_dirty *);
+       void (*clip)(struct vmw_kms_dirty *);
+       size_t fifo_reserve_size;
+       struct vmw_private *dev_priv;
+       struct vmw_display_unit *unit;
+       void *cmd;
+       u32 num_hits;
+       s32 fb_x;
+       s32 fb_y;
+       s32 unit_x1;
+       s32 unit_y1;
+       s32 unit_x2;
+       s32 unit_y2;
+};
 
 #define VMWGFX_NUM_DISPLAY_UNITS 8
 
@@ -173,7 +216,31 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
 int vmw_du_connector_set_property(struct drm_connector *connector,
                                  struct drm_property *property,
                                  uint64_t val);
-
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+                        struct vmw_framebuffer *framebuffer,
+                        const struct drm_clip_rect *clips,
+                        const struct drm_vmw_rect *vclips,
+                        s32 dest_x, s32 dest_y,
+                        int num_clips,
+                        int increment,
+                        struct vmw_kms_dirty *dirty);
+
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+                                 struct vmw_dma_buffer *buf,
+                                 bool interruptible,
+                                 bool validate_as_mob);
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+                                 struct drm_file *file_priv,
+                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_fence_obj **out_fence,
+                                 struct drm_vmw_fence_rep __user *
+                                 user_fence_rep);
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+                                   bool interruptible);
+void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+                                   struct vmw_fence_obj **out_fence);
 
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
index 271bc900d83a01ac5f931b42ca8d54bccb23287e..521f1947b4e9de86823511bc5ab868664ece8fd2 100644 (file)
@@ -1259,7 +1259,8 @@ out_no_reserve:
  * the buffer may not be bound to the resource at this point.
  *
  */
-int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+                        bool no_backup)
 {
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
@@ -1270,7 +1271,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
 
        if (res->func->needs_backup && res->backup == NULL &&
            !no_backup) {
-               ret = vmw_resource_buf_alloc(res, true);
+               ret = vmw_resource_buf_alloc(res, interruptible);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -1584,14 +1585,14 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
  * its id will never change as long as there is a pin reference.
  * This function returns 0 on success and a negative error code on failure.
  */
-int vmw_resource_pin(struct vmw_resource *res)
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 {
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
 
-       ttm_write_lock(&dev_priv->reservation_sem, false);
+       ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        mutex_lock(&dev_priv->cmdbuf_mutex);
-       ret = vmw_resource_reserve(res, false);
+       ret = vmw_resource_reserve(res, interruptible, false);
        if (ret)
                goto out_no_reserve;
 
@@ -1601,12 +1602,13 @@ int vmw_resource_pin(struct vmw_resource *res)
                if (res->backup) {
                        vbo = res->backup;
 
-                       ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+                       ttm_bo_reserve(&vbo->base, interruptible, false, false,
+                                      NULL);
                        if (!vbo->pin_count) {
                                ret = ttm_bo_validate
                                        (&vbo->base,
                                         res->func->backup_placement,
-                                        false, false);
+                                        interruptible, false);
                                if (ret) {
                                        ttm_bo_unreserve(&vbo->base);
                                        goto out_no_validate;
@@ -1649,7 +1651,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
        ttm_read_lock(&dev_priv->reservation_sem, false);
        mutex_lock(&dev_priv->cmdbuf_mutex);
 
-       ret = vmw_resource_reserve(res, true);
+       ret = vmw_resource_reserve(res, false, true);
        WARN_ON(ret);
 
        WARN_ON(res->pin_count == 0);
index ef99df7463f3cffa038331999d4b510e4d68c7ed..becf9650c22844aed12768f635532057f9ed0eff 100644 (file)
@@ -91,7 +91,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
  */
 static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
 {
-       return vmw_resource_pin(&stdu->display_srf->res);
+       return vmw_resource_pin(&stdu->display_srf->res, false);
 }