2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_main.c
4 * Copyright 2012 Red Hat Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * Authors: Dave Airlie <airlied@redhat.com>,
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
30 #include <drm/drm_fb_helper.h>
31 #include <drm/drm_crtc_helper.h>
35 #include "vboxvideo_guest.h"
36 #include "vboxvideo_vbe.h"
38 static void vbox_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
40 struct vbox_framebuffer
*vbox_fb
= to_vbox_framebuffer(fb
);
43 drm_gem_object_unreference_unlocked(vbox_fb
->obj
);
45 drm_framebuffer_cleanup(fb
);
49 void vbox_enable_accel(struct vbox_private
*vbox
)
52 struct vbva_buffer
*vbva
;
54 if (!vbox
->vbva_info
|| !vbox
->vbva_buffers
) {
55 /* Should never happen... */
56 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
60 for (i
= 0; i
< vbox
->num_crtcs
; ++i
) {
61 if (vbox
->vbva_info
[i
].vbva
)
64 vbva
= (void *)vbox
->vbva_buffers
+ i
* VBVA_MIN_BUFFER_SIZE
;
65 if (!vbva_enable(&vbox
->vbva_info
[i
],
66 vbox
->guest_pool
, vbva
, i
)) {
67 /* very old host or driver error. */
68 DRM_ERROR("vboxvideo: vbva_enable failed\n");
74 void vbox_disable_accel(struct vbox_private
*vbox
)
78 for (i
= 0; i
< vbox
->num_crtcs
; ++i
)
79 vbva_disable(&vbox
->vbva_info
[i
], vbox
->guest_pool
, i
);
82 void vbox_report_caps(struct vbox_private
*vbox
)
84 u32 caps
= VBVACAPS_DISABLE_CURSOR_INTEGRATION
|
85 VBVACAPS_IRQ
| VBVACAPS_USE_VBVA_ONLY
;
87 if (vbox
->initial_mode_queried
)
88 caps
|= VBVACAPS_VIDEO_MODE_HINTS
;
90 hgsmi_send_caps_info(vbox
->guest_pool
, caps
);
94 * Send information about dirty rectangles to VBVA. If necessary we enable
95 * VBVA first, as this is normally disabled after a change of master in case
96 * the new master does not send dirty rectangle information (is this even
99 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer
*fb
,
100 struct drm_clip_rect
*rects
,
101 unsigned int num_rects
)
103 struct vbox_private
*vbox
= fb
->dev
->dev_private
;
104 struct drm_crtc
*crtc
;
107 mutex_lock(&vbox
->hw_mutex
);
108 list_for_each_entry(crtc
, &fb
->dev
->mode_config
.crtc_list
, head
) {
109 if (CRTC_FB(crtc
) != fb
)
112 vbox_enable_accel(vbox
);
114 for (i
= 0; i
< num_rects
; ++i
) {
115 struct vbva_cmd_hdr cmd_hdr
;
116 unsigned int crtc_id
= to_vbox_crtc(crtc
)->crtc_id
;
118 if ((rects
[i
].x1
> crtc
->x
+ crtc
->hwmode
.hdisplay
) ||
119 (rects
[i
].y1
> crtc
->y
+ crtc
->hwmode
.vdisplay
) ||
120 (rects
[i
].x2
< crtc
->x
) ||
121 (rects
[i
].y2
< crtc
->y
))
124 cmd_hdr
.x
= (s16
)rects
[i
].x1
;
125 cmd_hdr
.y
= (s16
)rects
[i
].y1
;
126 cmd_hdr
.w
= (u16
)rects
[i
].x2
- rects
[i
].x1
;
127 cmd_hdr
.h
= (u16
)rects
[i
].y2
- rects
[i
].y1
;
129 if (!vbva_buffer_begin_update(&vbox
->vbva_info
[crtc_id
],
133 vbva_write(&vbox
->vbva_info
[crtc_id
], vbox
->guest_pool
,
134 &cmd_hdr
, sizeof(cmd_hdr
));
135 vbva_buffer_end_update(&vbox
->vbva_info
[crtc_id
]);
138 mutex_unlock(&vbox
->hw_mutex
);
141 static int vbox_user_framebuffer_dirty(struct drm_framebuffer
*fb
,
142 struct drm_file
*file_priv
,
143 unsigned int flags
, unsigned int color
,
144 struct drm_clip_rect
*rects
,
145 unsigned int num_rects
)
147 vbox_framebuffer_dirty_rectangles(fb
, rects
, num_rects
);
152 static const struct drm_framebuffer_funcs vbox_fb_funcs
= {
153 .destroy
= vbox_user_framebuffer_destroy
,
154 .dirty
= vbox_user_framebuffer_dirty
,
157 int vbox_framebuffer_init(struct drm_device
*dev
,
158 struct vbox_framebuffer
*vbox_fb
,
159 const struct DRM_MODE_FB_CMD
*mode_cmd
,
160 struct drm_gem_object
*obj
)
164 drm_helper_mode_fill_fb_struct(dev
, &vbox_fb
->base
, mode_cmd
);
166 ret
= drm_framebuffer_init(dev
, &vbox_fb
->base
, &vbox_fb_funcs
);
168 DRM_ERROR("framebuffer init failed %d\n", ret
);
175 static struct drm_framebuffer
*vbox_user_framebuffer_create(
176 struct drm_device
*dev
,
177 struct drm_file
*filp
,
178 const struct drm_mode_fb_cmd2
*mode_cmd
)
180 struct drm_gem_object
*obj
;
181 struct vbox_framebuffer
*vbox_fb
;
184 obj
= drm_gem_object_lookup(filp
, mode_cmd
->handles
[0]);
186 return ERR_PTR(-ENOENT
);
188 vbox_fb
= kzalloc(sizeof(*vbox_fb
), GFP_KERNEL
);
192 ret
= vbox_framebuffer_init(dev
, vbox_fb
, mode_cmd
, obj
);
194 goto err_free_vbox_fb
;
196 return &vbox_fb
->base
;
201 drm_gem_object_unreference_unlocked(obj
);
205 static const struct drm_mode_config_funcs vbox_mode_funcs
= {
206 .fb_create
= vbox_user_framebuffer_create
,
209 static int vbox_accel_init(struct vbox_private
*vbox
)
213 vbox
->vbva_info
= devm_kcalloc(vbox
->dev
->dev
, vbox
->num_crtcs
,
214 sizeof(*vbox
->vbva_info
), GFP_KERNEL
);
215 if (!vbox
->vbva_info
)
218 /* Take a command buffer for each screen from the end of usable VRAM. */
219 vbox
->available_vram_size
-= vbox
->num_crtcs
* VBVA_MIN_BUFFER_SIZE
;
221 vbox
->vbva_buffers
= pci_iomap_range(vbox
->dev
->pdev
, 0,
222 vbox
->available_vram_size
,
224 VBVA_MIN_BUFFER_SIZE
);
225 if (!vbox
->vbva_buffers
)
228 for (i
= 0; i
< vbox
->num_crtcs
; ++i
)
229 vbva_setup_buffer_context(&vbox
->vbva_info
[i
],
230 vbox
->available_vram_size
+
231 i
* VBVA_MIN_BUFFER_SIZE
,
232 VBVA_MIN_BUFFER_SIZE
);
237 static void vbox_accel_fini(struct vbox_private
*vbox
)
239 vbox_disable_accel(vbox
);
240 pci_iounmap(vbox
->dev
->pdev
, vbox
->vbva_buffers
);
243 /** Do we support the 4.3 plus mode hint reporting interface? */
244 static bool have_hgsmi_mode_hints(struct vbox_private
*vbox
)
246 u32 have_hints
, have_cursor
;
249 ret
= hgsmi_query_conf(vbox
->guest_pool
,
250 VBOX_VBVA_CONF32_MODE_HINT_REPORTING
,
255 ret
= hgsmi_query_conf(vbox
->guest_pool
,
256 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING
,
261 return have_hints
== VINF_SUCCESS
&& have_cursor
== VINF_SUCCESS
;
264 static bool vbox_check_supported(u16 id
)
268 vbox_write_ioport(VBE_DISPI_INDEX_ID
, id
);
269 dispi_id
= inw(VBE_DISPI_IOPORT_DATA
);
271 return dispi_id
== id
;
275 * Set up our heaps and data exchange buffers in VRAM before handing the rest
276 * to the memory manager.
278 static int vbox_hw_init(struct vbox_private
*vbox
)
282 vbox
->full_vram_size
= inl(VBE_DISPI_IOPORT_DATA
);
283 vbox
->any_pitch
= vbox_check_supported(VBE_DISPI_ID_ANYX
);
285 DRM_INFO("VRAM %08x\n", vbox
->full_vram_size
);
287 /* Map guest-heap at end of vram */
289 pci_iomap_range(vbox
->dev
->pdev
, 0, GUEST_HEAP_OFFSET(vbox
),
291 if (!vbox
->guest_heap
)
294 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
295 vbox
->guest_pool
= gen_pool_create(4, -1);
296 if (!vbox
->guest_pool
)
297 goto err_unmap_guest_heap
;
299 ret
= gen_pool_add_virt(vbox
->guest_pool
,
300 (unsigned long)vbox
->guest_heap
,
301 GUEST_HEAP_OFFSET(vbox
),
302 GUEST_HEAP_USABLE_SIZE
, -1);
304 goto err_destroy_guest_pool
;
306 ret
= hgsmi_test_query_conf(vbox
->guest_pool
);
308 DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
309 goto err_destroy_guest_pool
;
312 /* Reduce available VRAM size to reflect the guest heap. */
313 vbox
->available_vram_size
= GUEST_HEAP_OFFSET(vbox
);
314 /* Linux drm represents monitors as a 32-bit array. */
315 hgsmi_query_conf(vbox
->guest_pool
, VBOX_VBVA_CONF32_MONITOR_COUNT
,
317 vbox
->num_crtcs
= clamp_t(u32
, vbox
->num_crtcs
, 1, VBOX_MAX_SCREENS
);
319 if (!have_hgsmi_mode_hints(vbox
)) {
321 goto err_destroy_guest_pool
;
324 vbox
->last_mode_hints
= devm_kcalloc(vbox
->dev
->dev
, vbox
->num_crtcs
,
325 sizeof(struct vbva_modehint
),
327 if (!vbox
->last_mode_hints
) {
329 goto err_destroy_guest_pool
;
332 ret
= vbox_accel_init(vbox
);
334 goto err_destroy_guest_pool
;
338 err_destroy_guest_pool
:
339 gen_pool_destroy(vbox
->guest_pool
);
340 err_unmap_guest_heap
:
341 pci_iounmap(vbox
->dev
->pdev
, vbox
->guest_heap
);
345 static void vbox_hw_fini(struct vbox_private
*vbox
)
347 vbox_accel_fini(vbox
);
348 gen_pool_destroy(vbox
->guest_pool
);
349 pci_iounmap(vbox
->dev
->pdev
, vbox
->guest_heap
);
352 int vbox_driver_load(struct drm_device
*dev
, unsigned long flags
)
354 struct vbox_private
*vbox
;
357 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI
))
360 vbox
= devm_kzalloc(dev
->dev
, sizeof(*vbox
), GFP_KERNEL
);
364 dev
->dev_private
= vbox
;
367 mutex_init(&vbox
->hw_mutex
);
369 ret
= vbox_hw_init(vbox
);
373 ret
= vbox_mm_init(vbox
);
377 drm_mode_config_init(dev
);
379 dev
->mode_config
.funcs
= (void *)&vbox_mode_funcs
;
380 dev
->mode_config
.min_width
= 64;
381 dev
->mode_config
.min_height
= 64;
382 dev
->mode_config
.preferred_depth
= 24;
383 dev
->mode_config
.max_width
= VBE_DISPI_MAX_XRES
;
384 dev
->mode_config
.max_height
= VBE_DISPI_MAX_YRES
;
386 ret
= vbox_mode_init(dev
);
388 goto err_drm_mode_cleanup
;
390 ret
= vbox_irq_init(vbox
);
394 ret
= vbox_fbdev_init(dev
);
404 err_drm_mode_cleanup
:
405 drm_mode_config_cleanup(dev
);
412 void vbox_driver_unload(struct drm_device
*dev
)
414 struct vbox_private
*vbox
= dev
->dev_private
;
416 vbox_fbdev_fini(dev
);
419 drm_mode_config_cleanup(dev
);
425 * @note this is described in the DRM framework documentation. AST does not
426 * have it, but we get an oops on driver unload if it is not present.
428 void vbox_driver_lastclose(struct drm_device
*dev
)
430 struct vbox_private
*vbox
= dev
->dev_private
;
433 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox
->fbdev
->helper
);
436 int vbox_gem_create(struct drm_device
*dev
,
437 u32 size
, bool iskernel
, struct drm_gem_object
**obj
)
439 struct vbox_bo
*vboxbo
;
444 size
= roundup(size
, PAGE_SIZE
);
448 ret
= vbox_bo_create(dev
, size
, 0, 0, &vboxbo
);
450 if (ret
!= -ERESTARTSYS
)
451 DRM_ERROR("failed to allocate GEM object\n");
460 int vbox_dumb_create(struct drm_file
*file
,
461 struct drm_device
*dev
, struct drm_mode_create_dumb
*args
)
464 struct drm_gem_object
*gobj
;
467 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
468 args
->size
= args
->pitch
* args
->height
;
470 ret
= vbox_gem_create(dev
, args
->size
, false, &gobj
);
474 ret
= drm_gem_handle_create(file
, gobj
, &handle
);
475 drm_gem_object_unreference_unlocked(gobj
);
479 args
->handle
= handle
;
484 static void vbox_bo_unref(struct vbox_bo
**bo
)
486 struct ttm_buffer_object
*tbo
;
497 void vbox_gem_free_object(struct drm_gem_object
*obj
)
499 struct vbox_bo
*vbox_bo
= gem_to_vbox_bo(obj
);
501 vbox_bo_unref(&vbox_bo
);
504 static inline u64
vbox_bo_mmap_offset(struct vbox_bo
*bo
)
506 return drm_vma_node_offset_addr(&bo
->bo
.vma_node
);
510 vbox_dumb_mmap_offset(struct drm_file
*file
,
511 struct drm_device
*dev
,
512 u32 handle
, u64
*offset
)
514 struct drm_gem_object
*obj
;
518 mutex_lock(&dev
->struct_mutex
);
519 obj
= drm_gem_object_lookup(file
, handle
);
525 bo
= gem_to_vbox_bo(obj
);
526 *offset
= vbox_bo_mmap_offset(bo
);
528 drm_gem_object_unreference(obj
);
532 mutex_unlock(&dev
->struct_mutex
);