]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - ubuntu/vbox/vboxvideo/vbox_main.c
UBUNTU: ubuntu: vbox -- Update to 5.1.16-dfsg-1
[mirror_ubuntu-zesty-kernel.git] / ubuntu / vbox / vboxvideo / vbox_main.c
1 /* $Id: vbox_main.c $ */
2 /** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6 /*
7 * Copyright (C) 2013-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 * --------------------------------------------------------------------
17 *
18 * This code is based on
19 * ast_main.c
20 * with the following copyright and permission notice:
21 *
22 * Copyright 2012 Red Hat Inc.
23 *
24 * Permission is hereby granted, free of charge, to any person obtaining a
25 * copy of this software and associated documentation files (the
26 * "Software"), to deal in the Software without restriction, including
27 * without limitation the rights to use, copy, modify, merge, publish,
28 * distribute, sub license, and/or sell copies of the Software, and to
29 * permit persons to whom the Software is furnished to do so, subject to
30 * the following conditions:
31 *
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
38 * USE OR OTHER DEALINGS IN THE SOFTWARE.
39 *
40 * The above copyright notice and this permission notice (including the
41 * next paragraph) shall be included in all copies or substantial portions
42 * of the Software.
43 *
44 */
45 /*
46 * Authors: Dave Airlie <airlied@redhat.com>
47 */
48 #include "vbox_drv.h"
49
50 #include <VBox/VBoxVideoGuest.h>
51 #include <VBox/VBoxVideo.h>
52
53 #include <drm/drm_fb_helper.h>
54 #include <drm/drm_crtc_helper.h>
55
56 static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
57 {
58 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
59 if (vbox_fb->obj)
60 drm_gem_object_unreference_unlocked(vbox_fb->obj);
61
62 LogFunc(("vboxvideo: %d: vbox_fb=%p, vbox_fb->obj=%p\n", __LINE__,
63 vbox_fb, vbox_fb->obj));
64 drm_framebuffer_cleanup(fb);
65 kfree(fb);
66 }
67
68 void vbox_enable_accel(struct vbox_private *vbox)
69 {
70 unsigned i;
71 struct VBVABUFFER *vbva;
72 uint32_t vram_map_offset = vbox->available_vram_size - vbox->vram_map_start;
73
74 AssertLogRelReturnVoid(vbox->vbva_info != NULL);
75 for (i = 0; i < vbox->num_crtcs; ++i) {
76 if (vbox->vbva_info[i].pVBVA == NULL) {
77 LogFunc(("vboxvideo: enabling VBVA.\n"));
78 vbva = (struct VBVABUFFER *) ( ((uint8_t *)vbox->mapped_vram)
79 + vram_map_offset
80 + i * VBVA_MIN_BUFFER_SIZE);
81 if (!VBoxVBVAEnable(&vbox->vbva_info[i], &vbox->submit_info, vbva, i))
82 AssertReleaseMsgFailed(("VBoxVBVAEnable failed - heap allocation error, very old host or driver error.\n"));
83 }
84 }
85 }
86
87 void vbox_disable_accel(struct vbox_private *vbox)
88 {
89 unsigned i;
90
91 for (i = 0; i < vbox->num_crtcs; ++i)
92 VBoxVBVADisable(&vbox->vbva_info[i], &vbox->submit_info, i);
93 }
94
95 void vbox_report_caps(struct vbox_private *vbox)
96 {
97 uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
98 | VBVACAPS_IRQ
99 | VBVACAPS_USE_VBVA_ONLY;
100 if (vbox->initial_mode_queried)
101 caps |= VBVACAPS_VIDEO_MODE_HINTS;
102 VBoxHGSMISendCapsInfo(&vbox->submit_info, caps);
103 }
104
105 /** Send information about dirty rectangles to VBVA. If necessary we enable
106 * VBVA first, as this is normally disabled after a change of master in case
107 * the new master does not send dirty rectangle information (is this even
108 * allowed?) */
109 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
110 struct drm_clip_rect *rects,
111 unsigned num_rects)
112 {
113 struct vbox_private *vbox = fb->dev->dev_private;
114 struct drm_crtc *crtc;
115 unsigned i;
116
117 LogFunc(("vboxvideo: %d: fb=%p, num_rects=%u, vbox=%p\n", __LINE__, fb,
118 num_rects, vbox));
119 mutex_lock(&vbox->hw_mutex);
120 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
121 if (CRTC_FB(crtc) == fb) {
122 vbox_enable_accel(vbox);
123 for (i = 0; i < num_rects; ++i)
124 {
125 unsigned crtc_id = to_vbox_crtc(crtc)->crtc_id;
126 VBVACMDHDR cmd_hdr;
127
128 if ( rects[i].x1 > crtc->x
129 + crtc->hwmode.hdisplay
130 || rects[i].y1 > crtc->y
131 + crtc->hwmode.vdisplay
132 || rects[i].x2 < crtc->x
133 || rects[i].y2 < crtc->y)
134 continue;
135 cmd_hdr.x = (int16_t)rects[i].x1;
136 cmd_hdr.y = (int16_t)rects[i].y1;
137 cmd_hdr.w = (uint16_t)rects[i].x2 - rects[i].x1;
138 cmd_hdr.h = (uint16_t)rects[i].y2 - rects[i].y1;
139 if (VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
140 &vbox->submit_info))
141 {
142 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], &vbox->submit_info, &cmd_hdr,
143 sizeof(cmd_hdr));
144 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
145 }
146 }
147 }
148 }
149 mutex_unlock(&vbox->hw_mutex);
150 LogFunc(("vboxvideo: %d\n", __LINE__));
151 }
152
153 static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
154 struct drm_file *file_priv,
155 unsigned flags, unsigned color,
156 struct drm_clip_rect *rects,
157 unsigned num_rects)
158 {
159 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
160 return 0;
161 }
162
163 static const struct drm_framebuffer_funcs vbox_fb_funcs = {
164 .destroy = vbox_user_framebuffer_destroy,
165 .dirty = vbox_user_framebuffer_dirty,
166 };
167
168
169 int vbox_framebuffer_init(struct drm_device *dev,
170 struct vbox_framebuffer *vbox_fb,
171 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
172 const
173 #endif
174 struct DRM_MODE_FB_CMD *mode_cmd,
175 struct drm_gem_object *obj)
176 {
177 int ret;
178
179 LogFunc(("vboxvideo: %d: dev=%p, vbox_fb=%p, obj=%p\n", __LINE__, dev,
180 vbox_fb, obj));
181 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
182 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
183 #else
184 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
185 #endif
186 vbox_fb->obj = obj;
187 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
188 if (ret) {
189 DRM_ERROR("framebuffer init failed %d\n", ret);
190 LogFunc(("vboxvideo: %d\n", __LINE__));
191 return ret;
192 }
193 LogFunc(("vboxvideo: %d\n", __LINE__));
194 return 0;
195 }
196
197 static struct drm_framebuffer *
198 vbox_user_framebuffer_create(struct drm_device *dev,
199 struct drm_file *filp,
200 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
201 const
202 #endif
203 struct drm_mode_fb_cmd2 *mode_cmd)
204 {
205 struct drm_gem_object *obj;
206 struct vbox_framebuffer *vbox_fb;
207 int ret;
208
209 LogFunc(("vboxvideo: %d\n", __LINE__));
210 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
211 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
212 #else
213 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
214 #endif
215 if (obj == NULL)
216 return ERR_PTR(-ENOENT);
217
218 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
219 if (!vbox_fb) {
220 drm_gem_object_unreference_unlocked(obj);
221 return ERR_PTR(-ENOMEM);
222 }
223
224 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
225 if (ret) {
226 drm_gem_object_unreference_unlocked(obj);
227 kfree(vbox_fb);
228 return ERR_PTR(ret);
229 }
230 LogFunc(("vboxvideo: %d\n", __LINE__));
231 return &vbox_fb->base;
232 }
233
234 static const struct drm_mode_config_funcs vbox_mode_funcs = {
235 .fb_create = vbox_user_framebuffer_create,
236 };
237
238 static void vbox_accel_fini(struct vbox_private *vbox)
239 {
240 if (vbox->vbva_info)
241 {
242 vbox_disable_accel(vbox);
243 kfree(vbox->vbva_info);
244 vbox->vbva_info = NULL;
245 }
246 }
247
248 static int vbox_accel_init(struct vbox_private *vbox)
249 {
250 unsigned i;
251 LogFunc(("vboxvideo: %d: vbox=%p, vbox->num_crtcs=%u, vbox->vbva_info=%p\n",
252 __LINE__, vbox, (unsigned)vbox->num_crtcs, vbox->vbva_info));
253 if (!vbox->vbva_info)
254 {
255 vbox->vbva_info = kzalloc( sizeof(struct VBVABUFFERCONTEXT)
256 * vbox->num_crtcs,
257 GFP_KERNEL);
258 if (!vbox->vbva_info)
259 return -ENOMEM;
260 }
261 /* Take a command buffer for each screen from the end of usable VRAM. */
262 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
263 for (i = 0; i < vbox->num_crtcs; ++i)
264 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
265 vbox->available_vram_size + i * VBVA_MIN_BUFFER_SIZE,
266 VBVA_MIN_BUFFER_SIZE);
267 LogFunc(("vboxvideo: %d: vbox->vbva_info=%p, vbox->available_vram_size=%u\n",
268 __LINE__, vbox->vbva_info, (unsigned)vbox->available_vram_size));
269 return 0;
270 }
271
272 /** Allocation function for the HGSMI heap and data. */
273 static DECLCALLBACK(void *) alloc_hgsmi_environ(void *environ, HGSMISIZE size)
274 {
275 NOREF(environ);
276 return kmalloc(size, GFP_KERNEL);
277 }
278
279
280 /** Free function for the HGSMI heap and data. */
281 static DECLCALLBACK(void) free_hgsmi_environ(void *environ, void *ptr)
282 {
283 NOREF(environ);
284 kfree(ptr);
285 }
286
287
288 /** Pointers to the HGSMI heap and data manipulation functions. */
289 static HGSMIENV hgsmi_environ =
290 {
291 NULL,
292 alloc_hgsmi_environ,
293 free_hgsmi_environ
294 };
295
296
297 /** Do we support the 4.3 plus mode hint reporting interface? */
298 static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
299 {
300 uint32_t have_hints, have_cursor;
301
302 return RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints))
303 && RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor))
304 && have_hints == VINF_SUCCESS
305 && have_cursor == VINF_SUCCESS;
306 }
307
308 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
309 # define pci_iomap_range(dev, bar, offset, maxlen) \
310 ioremap(pci_resource_start(dev, bar) + offset, maxlen)
311 #endif
312
313 /** Set up our heaps and data exchange buffers in VRAM before handing the rest
314 * to the memory manager. */
315 static int vbox_hw_init(struct vbox_private *vbox)
316 {
317 uint32_t base_offset, map_start, guest_heap_offset, guest_heap_size, host_flags_offset;
318 void *guest_heap;
319
320 vbox->full_vram_size = VBoxVideoGetVRAMSize();
321 vbox->any_pitch = VBoxVideoAnyWidthAllowed();
322 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
323 VBoxHGSMIGetBaseMappingInfo(vbox->full_vram_size, &base_offset, NULL,
324 &guest_heap_offset, &guest_heap_size, &host_flags_offset);
325 map_start = (uint32_t)max((int)base_offset
326 - VBOX_MAX_SCREENS * VBVA_MIN_BUFFER_SIZE, 0);
327 vbox->mapped_vram = pci_iomap_range(vbox->dev->pdev, 0, map_start,
328 vbox->full_vram_size - map_start);
329 if (!vbox->mapped_vram)
330 return -ENOMEM;
331 vbox->vram_map_start = map_start;
332 guest_heap = ((uint8_t *)vbox->mapped_vram) + base_offset - map_start
333 + guest_heap_offset;
334 vbox->host_flags_offset = base_offset - map_start + host_flags_offset;
335 if (RT_FAILURE(VBoxHGSMISetupGuestContext(&vbox->submit_info, guest_heap,
336 guest_heap_size,
337 base_offset + guest_heap_offset,
338 &hgsmi_environ)))
339 return -ENOMEM;
340 /* Reduce available VRAM size to reflect the guest heap. */
341 vbox->available_vram_size = base_offset;
342 /* Linux drm represents monitors as a 32-bit array. */
343 vbox->num_crtcs = min(VBoxHGSMIGetMonitorCount(&vbox->submit_info),
344 (uint32_t)VBOX_MAX_SCREENS);
345 if (!have_hgsmi_mode_hints(vbox))
346 return -ENOTSUPP;
347 vbox->last_mode_hints = kzalloc(sizeof(VBVAMODEHINT) * vbox->num_crtcs, GFP_KERNEL);
348 if (!vbox->last_mode_hints)
349 return -ENOMEM;
350 return vbox_accel_init(vbox);
351 }
352
353 static void vbox_hw_fini(struct vbox_private *vbox)
354 {
355 vbox_accel_fini(vbox);
356 if (vbox->last_mode_hints)
357 kfree(vbox->last_mode_hints);
358 vbox->last_mode_hints = NULL;
359 }
360
361 int vbox_driver_load(struct drm_device *dev, unsigned long flags)
362 {
363 struct vbox_private *vbox;
364 int ret = 0;
365
366 LogFunc(("vboxvideo: %d: dev=%p\n", __LINE__, dev));
367 if (!VBoxHGSMIIsSupported())
368 return -ENODEV;
369 vbox = kzalloc(sizeof(struct vbox_private), GFP_KERNEL);
370 if (!vbox)
371 return -ENOMEM;
372
373 dev->dev_private = vbox;
374 vbox->dev = dev;
375
376 mutex_init(&vbox->hw_mutex);
377
378 ret = vbox_hw_init(vbox);
379 if (ret)
380 goto out_free;
381
382 ret = vbox_mm_init(vbox);
383 if (ret)
384 goto out_free;
385
386 drm_mode_config_init(dev);
387
388 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
389 dev->mode_config.min_width = 64;
390 dev->mode_config.min_height = 64;
391 dev->mode_config.preferred_depth = 24;
392 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
393 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
394
395 ret = vbox_mode_init(dev);
396 if (ret)
397 goto out_free;
398
399 ret = vbox_irq_init(vbox);
400 if (ret)
401 goto out_free;
402
403 ret = vbox_fbdev_init(dev);
404 if (ret)
405 goto out_free;
406 LogFunc(("vboxvideo: %d: vbox=%p, vbox->mapped_vram=%p, vbox->full_vram_size=%u\n",
407 __LINE__, vbox, vbox->mapped_vram, (unsigned)vbox->full_vram_size));
408 return 0;
409 out_free:
410 vbox_driver_unload(dev);
411 LogFunc(("vboxvideo: %d: ret=%d\n", __LINE__, ret));
412 return ret;
413 }
414
415 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
416 void vbox_driver_unload(struct drm_device *dev)
417 #else
418 int vbox_driver_unload(struct drm_device *dev)
419 #endif
420 {
421 struct vbox_private *vbox = dev->dev_private;
422
423 LogFunc(("vboxvideo: %d\n", __LINE__));
424 vbox_fbdev_fini(dev);
425 vbox_irq_fini(vbox);
426 vbox_mode_fini(dev);
427 if (dev->mode_config.funcs)
428 drm_mode_config_cleanup(dev);
429
430 vbox_hw_fini(vbox);
431 vbox_mm_fini(vbox);
432 if (vbox->mapped_vram)
433 pci_iounmap(dev->pdev, vbox->mapped_vram);
434 kfree(vbox);
435 dev->dev_private = NULL;
436 LogFunc(("vboxvideo: %d\n", __LINE__));
437 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
438 return 0;
439 #endif
440 }
441
442 /** @note this is described in the DRM framework documentation. AST does not
443 * have it, but we get an oops on driver unload if it is not present. */
444 void vbox_driver_lastclose(struct drm_device *dev)
445 {
446 struct vbox_private *vbox = dev->dev_private;
447
448 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
449 if (vbox->fbdev)
450 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
451 #else
452 drm_modeset_lock_all(dev);
453 if (vbox->fbdev)
454 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
455 drm_modeset_unlock_all(dev);
456 #endif
457 }
458
459 int vbox_gem_create(struct drm_device *dev,
460 u32 size, bool iskernel,
461 struct drm_gem_object **obj)
462 {
463 struct vbox_bo *vboxbo;
464 int ret;
465
466 LogFunc(("vboxvideo: %d: dev=%p, size=%u, iskernel=%u\n", __LINE__,
467 dev, (unsigned)size, (unsigned)iskernel));
468 *obj = NULL;
469
470 size = roundup(size, PAGE_SIZE);
471 if (size == 0)
472 return -EINVAL;
473
474 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
475 if (ret) {
476 if (ret != -ERESTARTSYS)
477 DRM_ERROR("failed to allocate GEM object\n");
478 return ret;
479 }
480 *obj = &vboxbo->gem;
481 LogFunc(("vboxvideo: %d: obj=%p\n", __LINE__, obj));
482 return 0;
483 }
484
485 int vbox_dumb_create(struct drm_file *file,
486 struct drm_device *dev,
487 struct drm_mode_create_dumb *args)
488 {
489 int ret;
490 struct drm_gem_object *gobj;
491 u32 handle;
492
493 LogFunc(("vboxvideo: %d: args->width=%u, args->height=%u, args->bpp=%u\n",
494 __LINE__, (unsigned)args->width, (unsigned)args->height,
495 (unsigned)args->bpp));
496 args->pitch = args->width * ((args->bpp + 7) / 8);
497 args->size = args->pitch * args->height;
498
499 ret = vbox_gem_create(dev, args->size, false,
500 &gobj);
501 if (ret)
502 return ret;
503
504 ret = drm_gem_handle_create(file, gobj, &handle);
505 drm_gem_object_unreference_unlocked(gobj);
506 if (ret)
507 return ret;
508
509 args->handle = handle;
510 LogFunc(("vboxvideo: %d: args->handle=%u\n", __LINE__,
511 (unsigned)args->handle));
512 return 0;
513 }
514
515 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
516 int vbox_dumb_destroy(struct drm_file *file,
517 struct drm_device *dev,
518 uint32_t handle)
519 {
520 LogFunc(("vboxvideo: %d: dev=%p, handle=%u\n", __LINE__, dev,
521 (unsigned)handle));
522 return drm_gem_handle_delete(file, handle);
523 }
524 #endif
525
526 static void vbox_bo_unref(struct vbox_bo **bo)
527 {
528 struct ttm_buffer_object *tbo;
529
530 if ((*bo) == NULL)
531 return;
532
533 LogFunc(("vboxvideo: %d: bo=%p\n", __LINE__, bo));
534 tbo = &((*bo)->bo);
535 ttm_bo_unref(&tbo);
536 if (tbo == NULL)
537 *bo = NULL;
538
539 }
540 void vbox_gem_free_object(struct drm_gem_object *obj)
541 {
542 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
543
544 LogFunc(("vboxvideo: %d: vbox_bo=%p\n", __LINE__, vbox_bo));
545 vbox_bo_unref(&vbox_bo);
546 }
547
548
549 static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
550 {
551 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
552 return bo->bo.addr_space_offset;
553 #else
554 return drm_vma_node_offset_addr(&bo->bo.vma_node);
555 #endif
556 }
557 int
558 vbox_dumb_mmap_offset(struct drm_file *file,
559 struct drm_device *dev,
560 uint32_t handle,
561 uint64_t *offset)
562 {
563 struct drm_gem_object *obj;
564 int ret;
565 struct vbox_bo *bo;
566
567 LogFunc(("vboxvideo: %d: dev=%p, handle=%u\n", __LINE__,
568 dev, (unsigned)handle));
569 mutex_lock(&dev->struct_mutex);
570 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
571 obj = drm_gem_object_lookup(file, handle);
572 #else
573 obj = drm_gem_object_lookup(dev, file, handle);
574 #endif
575 if (obj == NULL) {
576 ret = -ENOENT;
577 goto out_unlock;
578 }
579
580 bo = gem_to_vbox_bo(obj);
581 *offset = vbox_bo_mmap_offset(bo);
582
583 drm_gem_object_unreference(obj);
584 ret = 0;
585 LogFunc(("vboxvideo: %d: bo=%p, *offset=%llu\n", __LINE__,
586 bo, (unsigned long long)*offset));
587 out_unlock:
588 mutex_unlock(&dev->struct_mutex);
589 return ret;
590
591 }