1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <linux/export.h>
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
35 #include <drm/ttm/ttm_placement.h>
37 #define VMW_DIRTY_DELAY (HZ / 30)
40 struct vmw_private
*vmw_priv
;
44 struct mutex bo_mutex
;
45 struct vmw_dma_buffer
*vmw_bo
;
47 struct drm_framebuffer
*set_fb
;
48 struct drm_display_mode
*set_mode
;
53 u32 pseudo_palette
[17];
67 struct drm_crtc
*crtc
;
68 struct drm_connector
*con
;
69 struct delayed_work local_work
;
72 static int vmw_fb_setcolreg(unsigned regno
, unsigned red
, unsigned green
,
73 unsigned blue
, unsigned transp
,
76 struct vmw_fb_par
*par
= info
->par
;
77 u32
*pal
= par
->pseudo_palette
;
80 DRM_ERROR("Bad regno %u.\n", regno
);
84 switch (par
->set_fb
->format
->depth
) {
87 pal
[regno
] = ((red
& 0xff00) << 8) |
89 ((blue
& 0xff00) >> 8);
92 DRM_ERROR("Bad depth %u, bpp %u.\n",
93 par
->set_fb
->format
->depth
,
94 par
->set_fb
->format
->cpp
[0] * 8);
101 static int vmw_fb_check_var(struct fb_var_screeninfo
*var
,
102 struct fb_info
*info
)
104 int depth
= var
->bits_per_pixel
;
105 struct vmw_fb_par
*par
= info
->par
;
106 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
108 switch (var
->bits_per_pixel
) {
110 depth
= (var
->transp
.length
> 0) ? 32 : 24;
113 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
119 var
->red
.offset
= 16;
120 var
->green
.offset
= 8;
121 var
->blue
.offset
= 0;
123 var
->green
.length
= 8;
124 var
->blue
.length
= 8;
125 var
->transp
.length
= 0;
126 var
->transp
.offset
= 0;
129 var
->red
.offset
= 16;
130 var
->green
.offset
= 8;
131 var
->blue
.offset
= 0;
133 var
->green
.length
= 8;
134 var
->blue
.length
= 8;
135 var
->transp
.length
= 8;
136 var
->transp
.offset
= 24;
139 DRM_ERROR("Bad depth %u.\n", depth
);
143 if ((var
->xoffset
+ var
->xres
) > par
->max_width
||
144 (var
->yoffset
+ var
->yres
) > par
->max_height
) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
149 if (!vmw_kms_validate_mode_vram(vmw_priv
,
150 var
->xres
* var
->bits_per_pixel
/8,
151 var
->yoffset
+ var
->yres
)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
159 static int vmw_fb_blank(int blank
, struct fb_info
*info
)
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
167 * @work: The struct work_struct associated with this task.
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
175 static void vmw_fb_dirty_flush(struct work_struct
*work
)
177 struct vmw_fb_par
*par
= container_of(work
, struct vmw_fb_par
,
179 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
180 struct fb_info
*info
= vmw_priv
->fb_info
;
181 unsigned long irq_flags
;
182 s32 dst_x1
, dst_x2
, dst_y1
, dst_y2
, w
= 0, h
= 0;
183 u32 cpp
, max_x
, max_y
;
184 struct drm_clip_rect clip
;
185 struct drm_framebuffer
*cur_fb
;
186 u8
*src_ptr
, *dst_ptr
;
187 struct vmw_dma_buffer
*vbo
= par
->vmw_bo
;
190 if (!READ_ONCE(par
->dirty
.active
))
193 mutex_lock(&par
->bo_mutex
);
194 cur_fb
= par
->set_fb
;
198 (void) ttm_read_lock(&vmw_priv
->reservation_sem
, false);
199 (void) ttm_bo_reserve(&vbo
->base
, false, false, NULL
);
200 virtual = vmw_dma_buffer_map_and_cache(vbo
);
204 spin_lock_irqsave(&par
->dirty
.lock
, irq_flags
);
205 if (!par
->dirty
.active
) {
206 spin_unlock_irqrestore(&par
->dirty
.lock
, irq_flags
);
211 * Handle panning when copying from vmalloc to framebuffer.
212 * Clip dirty area to framebuffer.
214 cpp
= cur_fb
->format
->cpp
[0];
215 max_x
= par
->fb_x
+ cur_fb
->width
;
216 max_y
= par
->fb_y
+ cur_fb
->height
;
218 dst_x1
= par
->dirty
.x1
- par
->fb_x
;
219 dst_y1
= par
->dirty
.y1
- par
->fb_y
;
220 dst_x1
= max_t(s32
, dst_x1
, 0);
221 dst_y1
= max_t(s32
, dst_y1
, 0);
223 dst_x2
= par
->dirty
.x2
- par
->fb_x
;
224 dst_y2
= par
->dirty
.y2
- par
->fb_y
;
225 dst_x2
= min_t(s32
, dst_x2
, max_x
);
226 dst_y2
= min_t(s32
, dst_y2
, max_y
);
229 w
= max_t(s32
, 0, w
);
230 h
= max_t(s32
, 0, h
);
232 par
->dirty
.x1
= par
->dirty
.x2
= 0;
233 par
->dirty
.y1
= par
->dirty
.y2
= 0;
234 spin_unlock_irqrestore(&par
->dirty
.lock
, irq_flags
);
237 dst_ptr
= (u8
*)virtual +
238 (dst_y1
* par
->set_fb
->pitches
[0] + dst_x1
* cpp
);
239 src_ptr
= (u8
*)par
->vmalloc
+
240 ((dst_y1
+ par
->fb_y
) * info
->fix
.line_length
+
241 (dst_x1
+ par
->fb_x
) * cpp
);
244 memcpy(dst_ptr
, src_ptr
, w
*cpp
);
245 dst_ptr
+= par
->set_fb
->pitches
[0];
246 src_ptr
+= info
->fix
.line_length
;
256 ttm_bo_unreserve(&vbo
->base
);
257 ttm_read_unlock(&vmw_priv
->reservation_sem
);
259 WARN_ON_ONCE(par
->set_fb
->funcs
->dirty(cur_fb
, NULL
, 0, 0,
261 vmw_fifo_flush(vmw_priv
, false);
264 mutex_unlock(&par
->bo_mutex
);
267 static void vmw_fb_dirty_mark(struct vmw_fb_par
*par
,
268 unsigned x1
, unsigned y1
,
269 unsigned width
, unsigned height
)
272 unsigned x2
= x1
+ width
;
273 unsigned y2
= y1
+ height
;
275 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
276 if (par
->dirty
.x1
== par
->dirty
.x2
) {
281 /* if we are active start the dirty work
282 * we share the work with the defio system */
283 if (par
->dirty
.active
)
284 schedule_delayed_work(&par
->local_work
,
287 if (x1
< par
->dirty
.x1
)
289 if (y1
< par
->dirty
.y1
)
291 if (x2
> par
->dirty
.x2
)
293 if (y2
> par
->dirty
.y2
)
296 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
299 static int vmw_fb_pan_display(struct fb_var_screeninfo
*var
,
300 struct fb_info
*info
)
302 struct vmw_fb_par
*par
= info
->par
;
304 if ((var
->xoffset
+ var
->xres
) > var
->xres_virtual
||
305 (var
->yoffset
+ var
->yres
) > var
->yres_virtual
) {
306 DRM_ERROR("Requested panning can not fit in framebuffer\n");
310 mutex_lock(&par
->bo_mutex
);
311 par
->fb_x
= var
->xoffset
;
312 par
->fb_y
= var
->yoffset
;
314 vmw_fb_dirty_mark(par
, par
->fb_x
, par
->fb_y
, par
->set_fb
->width
,
315 par
->set_fb
->height
);
316 mutex_unlock(&par
->bo_mutex
);
321 static void vmw_deferred_io(struct fb_info
*info
,
322 struct list_head
*pagelist
)
324 struct vmw_fb_par
*par
= info
->par
;
325 unsigned long start
, end
, min
, max
;
332 list_for_each_entry(page
, pagelist
, lru
) {
333 start
= page
->index
<< PAGE_SHIFT
;
334 end
= start
+ PAGE_SIZE
- 1;
335 min
= min(min
, start
);
340 y1
= min
/ info
->fix
.line_length
;
341 y2
= (max
/ info
->fix
.line_length
) + 1;
343 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
346 par
->dirty
.x2
= info
->var
.xres
;
348 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
351 * Since we've already waited on this work once, try to
354 cancel_delayed_work(&par
->local_work
);
355 schedule_delayed_work(&par
->local_work
, 0);
359 static struct fb_deferred_io vmw_defio
= {
360 .delay
= VMW_DIRTY_DELAY
,
361 .deferred_io
= vmw_deferred_io
,
368 static void vmw_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
370 cfb_fillrect(info
, rect
);
371 vmw_fb_dirty_mark(info
->par
, rect
->dx
, rect
->dy
,
372 rect
->width
, rect
->height
);
375 static void vmw_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
377 cfb_copyarea(info
, region
);
378 vmw_fb_dirty_mark(info
->par
, region
->dx
, region
->dy
,
379 region
->width
, region
->height
);
382 static void vmw_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
384 cfb_imageblit(info
, image
);
385 vmw_fb_dirty_mark(info
->par
, image
->dx
, image
->dy
,
386 image
->width
, image
->height
);
393 static int vmw_fb_create_bo(struct vmw_private
*vmw_priv
,
394 size_t size
, struct vmw_dma_buffer
**out
)
396 struct vmw_dma_buffer
*vmw_bo
;
399 (void) ttm_write_lock(&vmw_priv
->reservation_sem
, false);
401 vmw_bo
= kmalloc(sizeof(*vmw_bo
), GFP_KERNEL
);
407 ret
= vmw_dmabuf_init(vmw_priv
, vmw_bo
, size
,
410 &vmw_dmabuf_bo_free
);
411 if (unlikely(ret
!= 0))
412 goto err_unlock
; /* init frees the buffer on failure */
415 ttm_write_unlock(&vmw_priv
->reservation_sem
);
420 ttm_write_unlock(&vmw_priv
->reservation_sem
);
424 static int vmw_fb_compute_depth(struct fb_var_screeninfo
*var
,
427 switch (var
->bits_per_pixel
) {
429 *depth
= (var
->transp
.length
> 0) ? 32 : 24;
432 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
439 static int vmwgfx_set_config_internal(struct drm_mode_set
*set
)
441 struct drm_crtc
*crtc
= set
->crtc
;
442 struct drm_framebuffer
*fb
;
443 struct drm_crtc
*tmp
;
444 struct drm_device
*dev
= set
->crtc
->dev
;
445 struct drm_modeset_acquire_ctx ctx
;
448 drm_modeset_acquire_init(&ctx
, 0);
452 * NOTE: ->set_config can also disable other crtcs (if we steal all
453 * connectors from it), hence we need to refcount the fbs across all
454 * crtcs. Atomic modeset will have saner semantics ...
456 drm_for_each_crtc(tmp
, dev
)
457 tmp
->primary
->old_fb
= tmp
->primary
->fb
;
461 ret
= crtc
->funcs
->set_config(set
, &ctx
);
463 crtc
->primary
->crtc
= crtc
;
464 crtc
->primary
->fb
= fb
;
467 drm_for_each_crtc(tmp
, dev
) {
468 if (tmp
->primary
->fb
)
469 drm_framebuffer_get(tmp
->primary
->fb
);
470 if (tmp
->primary
->old_fb
)
471 drm_framebuffer_put(tmp
->primary
->old_fb
);
472 tmp
->primary
->old_fb
= NULL
;
475 if (ret
== -EDEADLK
) {
476 drm_modeset_backoff(&ctx
);
480 drm_modeset_drop_locks(&ctx
);
481 drm_modeset_acquire_fini(&ctx
);
486 static int vmw_fb_kms_detach(struct vmw_fb_par
*par
,
490 struct drm_framebuffer
*cur_fb
= par
->set_fb
;
493 /* Detach the KMS framebuffer from crtcs */
495 struct drm_mode_set set
;
497 set
.crtc
= par
->crtc
;
502 set
.num_connectors
= 0;
503 set
.connectors
= &par
->con
;
504 ret
= vmwgfx_set_config_internal(&set
);
506 DRM_ERROR("Could not unset a mode.\n");
509 drm_mode_destroy(par
->vmw_priv
->dev
, par
->set_mode
);
510 par
->set_mode
= NULL
;
514 drm_framebuffer_put(cur_fb
);
518 if (par
->vmw_bo
&& detach_bo
&& unref_bo
)
519 vmw_dmabuf_unreference(&par
->vmw_bo
);
524 static int vmw_fb_kms_framebuffer(struct fb_info
*info
)
526 struct drm_mode_fb_cmd2 mode_cmd
;
527 struct vmw_fb_par
*par
= info
->par
;
528 struct fb_var_screeninfo
*var
= &info
->var
;
529 struct drm_framebuffer
*cur_fb
;
530 struct vmw_framebuffer
*vfb
;
534 ret
= vmw_fb_compute_depth(var
, &depth
);
538 mode_cmd
.width
= var
->xres
;
539 mode_cmd
.height
= var
->yres
;
540 mode_cmd
.pitches
[0] = ((var
->bits_per_pixel
+ 7) / 8) * mode_cmd
.width
;
541 mode_cmd
.pixel_format
=
542 drm_mode_legacy_fb_format(var
->bits_per_pixel
, depth
);
544 cur_fb
= par
->set_fb
;
545 if (cur_fb
&& cur_fb
->width
== mode_cmd
.width
&&
546 cur_fb
->height
== mode_cmd
.height
&&
547 cur_fb
->format
->format
== mode_cmd
.pixel_format
&&
548 cur_fb
->pitches
[0] == mode_cmd
.pitches
[0])
551 /* Need new buffer object ? */
552 new_bo_size
= (size_t) mode_cmd
.pitches
[0] * (size_t) mode_cmd
.height
;
553 ret
= vmw_fb_kms_detach(par
,
554 par
->bo_size
< new_bo_size
||
555 par
->bo_size
> 2*new_bo_size
,
561 ret
= vmw_fb_create_bo(par
->vmw_priv
, new_bo_size
,
564 DRM_ERROR("Failed creating a buffer object for "
568 par
->bo_size
= new_bo_size
;
571 vfb
= vmw_kms_new_framebuffer(par
->vmw_priv
, par
->vmw_bo
, NULL
,
576 par
->set_fb
= &vfb
->base
;
581 static int vmw_fb_set_par(struct fb_info
*info
)
583 struct vmw_fb_par
*par
= info
->par
;
584 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
585 struct drm_mode_set set
;
586 struct fb_var_screeninfo
*var
= &info
->var
;
587 struct drm_display_mode new_mode
= { DRM_MODE("fb_mode",
588 DRM_MODE_TYPE_DRIVER
,
589 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
590 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
)
592 struct drm_display_mode
*old_mode
;
593 struct drm_display_mode
*mode
;
596 old_mode
= par
->set_mode
;
597 mode
= drm_mode_duplicate(vmw_priv
->dev
, &new_mode
);
599 DRM_ERROR("Could not create new fb mode.\n");
603 mode
->hdisplay
= var
->xres
;
604 mode
->vdisplay
= var
->yres
;
605 vmw_guess_mode_timing(mode
);
607 if (old_mode
&& drm_mode_equal(old_mode
, mode
)) {
608 drm_mode_destroy(vmw_priv
->dev
, mode
);
611 } else if (!vmw_kms_validate_mode_vram(vmw_priv
,
613 DIV_ROUND_UP(var
->bits_per_pixel
, 8),
615 drm_mode_destroy(vmw_priv
->dev
, mode
);
619 mutex_lock(&par
->bo_mutex
);
620 ret
= vmw_fb_kms_framebuffer(info
);
624 par
->fb_x
= var
->xoffset
;
625 par
->fb_y
= var
->yoffset
;
627 set
.crtc
= par
->crtc
;
631 set
.fb
= par
->set_fb
;
632 set
.num_connectors
= 1;
633 set
.connectors
= &par
->con
;
635 ret
= vmwgfx_set_config_internal(&set
);
639 vmw_fb_dirty_mark(par
, par
->fb_x
, par
->fb_y
,
640 par
->set_fb
->width
, par
->set_fb
->height
);
642 /* If there already was stuff dirty we wont
643 * schedule a new work, so lets do it now */
645 schedule_delayed_work(&par
->local_work
, 0);
649 drm_mode_destroy(vmw_priv
->dev
, old_mode
);
650 par
->set_mode
= mode
;
652 mutex_unlock(&par
->bo_mutex
);
658 static struct fb_ops vmw_fb_ops
= {
659 .owner
= THIS_MODULE
,
660 .fb_check_var
= vmw_fb_check_var
,
661 .fb_set_par
= vmw_fb_set_par
,
662 .fb_setcolreg
= vmw_fb_setcolreg
,
663 .fb_fillrect
= vmw_fb_fillrect
,
664 .fb_copyarea
= vmw_fb_copyarea
,
665 .fb_imageblit
= vmw_fb_imageblit
,
666 .fb_pan_display
= vmw_fb_pan_display
,
667 .fb_blank
= vmw_fb_blank
,
670 int vmw_fb_init(struct vmw_private
*vmw_priv
)
672 struct device
*device
= &vmw_priv
->dev
->pdev
->dev
;
673 struct vmw_fb_par
*par
;
674 struct fb_info
*info
;
675 unsigned fb_width
, fb_height
;
676 unsigned fb_bpp
, fb_depth
, fb_offset
, fb_pitch
, fb_size
;
677 struct drm_display_mode
*init_mode
;
683 /* XXX As shouldn't these be as well. */
684 fb_width
= min(vmw_priv
->fb_max_width
, (unsigned)2048);
685 fb_height
= min(vmw_priv
->fb_max_height
, (unsigned)2048);
687 fb_pitch
= fb_width
* fb_bpp
/ 8;
688 fb_size
= fb_pitch
* fb_height
;
689 fb_offset
= vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
);
691 info
= framebuffer_alloc(sizeof(*par
), device
);
698 vmw_priv
->fb_info
= info
;
700 memset(par
, 0, sizeof(*par
));
701 INIT_DELAYED_WORK(&par
->local_work
, &vmw_fb_dirty_flush
);
702 par
->vmw_priv
= vmw_priv
;
704 par
->max_width
= fb_width
;
705 par
->max_height
= fb_height
;
707 ret
= vmw_kms_fbdev_init_data(vmw_priv
, 0, par
->max_width
,
708 par
->max_height
, &par
->con
,
709 &par
->crtc
, &init_mode
);
713 info
->var
.xres
= init_mode
->hdisplay
;
714 info
->var
.yres
= init_mode
->vdisplay
;
717 * Create buffers and alloc memory
719 par
->vmalloc
= vzalloc(fb_size
);
720 if (unlikely(par
->vmalloc
== NULL
)) {
728 strcpy(info
->fix
.id
, "svgadrmfb");
729 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
730 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
731 info
->fix
.type_aux
= 0;
732 info
->fix
.xpanstep
= 1; /* doing it in hw */
733 info
->fix
.ypanstep
= 1; /* doing it in hw */
734 info
->fix
.ywrapstep
= 0;
735 info
->fix
.accel
= FB_ACCEL_NONE
;
736 info
->fix
.line_length
= fb_pitch
;
738 info
->fix
.smem_start
= 0;
739 info
->fix
.smem_len
= fb_size
;
741 info
->pseudo_palette
= par
->pseudo_palette
;
742 info
->screen_base
= (char __iomem
*)par
->vmalloc
;
743 info
->screen_size
= fb_size
;
745 info
->fbops
= &vmw_fb_ops
;
747 /* 24 depth per default */
748 info
->var
.red
.offset
= 16;
749 info
->var
.green
.offset
= 8;
750 info
->var
.blue
.offset
= 0;
751 info
->var
.red
.length
= 8;
752 info
->var
.green
.length
= 8;
753 info
->var
.blue
.length
= 8;
754 info
->var
.transp
.offset
= 0;
755 info
->var
.transp
.length
= 0;
757 info
->var
.xres_virtual
= fb_width
;
758 info
->var
.yres_virtual
= fb_height
;
759 info
->var
.bits_per_pixel
= fb_bpp
;
760 info
->var
.xoffset
= 0;
761 info
->var
.yoffset
= 0;
762 info
->var
.activate
= FB_ACTIVATE_NOW
;
763 info
->var
.height
= -1;
764 info
->var
.width
= -1;
766 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
767 info
->apertures
= alloc_apertures(1);
768 if (!info
->apertures
) {
772 info
->apertures
->ranges
[0].base
= vmw_priv
->vram_start
;
773 info
->apertures
->ranges
[0].size
= vmw_priv
->vram_size
;
776 * Dirty & Deferred IO
778 par
->dirty
.x1
= par
->dirty
.x2
= 0;
779 par
->dirty
.y1
= par
->dirty
.y2
= 0;
780 par
->dirty
.active
= true;
781 spin_lock_init(&par
->dirty
.lock
);
782 mutex_init(&par
->bo_mutex
);
783 info
->fbdefio
= &vmw_defio
;
784 fb_deferred_io_init(info
);
786 ret
= register_framebuffer(info
);
787 if (unlikely(ret
!= 0))
790 vmw_fb_set_par(info
);
795 fb_deferred_io_cleanup(info
);
800 framebuffer_release(info
);
801 vmw_priv
->fb_info
= NULL
;
806 int vmw_fb_close(struct vmw_private
*vmw_priv
)
808 struct fb_info
*info
;
809 struct vmw_fb_par
*par
;
811 if (!vmw_priv
->fb_info
)
814 info
= vmw_priv
->fb_info
;
818 fb_deferred_io_cleanup(info
);
819 cancel_delayed_work_sync(&par
->local_work
);
820 unregister_framebuffer(info
);
822 mutex_lock(&par
->bo_mutex
);
823 (void) vmw_fb_kms_detach(par
, true, true);
824 mutex_unlock(&par
->bo_mutex
);
827 framebuffer_release(info
);
832 int vmw_fb_off(struct vmw_private
*vmw_priv
)
834 struct fb_info
*info
;
835 struct vmw_fb_par
*par
;
838 if (!vmw_priv
->fb_info
)
841 info
= vmw_priv
->fb_info
;
844 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
845 par
->dirty
.active
= false;
846 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
848 flush_delayed_work(&info
->deferred_work
);
849 flush_delayed_work(&par
->local_work
);
854 int vmw_fb_on(struct vmw_private
*vmw_priv
)
856 struct fb_info
*info
;
857 struct vmw_fb_par
*par
;
860 if (!vmw_priv
->fb_info
)
863 info
= vmw_priv
->fb_info
;
866 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
867 par
->dirty
.active
= true;
868 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
871 * Need to reschedule a dirty update, because otherwise that's
872 * only done in dirty_mark() if the previous coalesced
873 * dirty region was empty.
875 schedule_delayed_work(&par
->local_work
, 0);