-Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_5.1.6-dfsg-1_all.deb
-Version: 5.1.6-dfsg-1
+Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_5.1.10-dfsg-2_all.deb
+Version: 5.1.10-dfsg-2
PACKAGE_NAME="virtualbox-guest"
-PACKAGE_VERSION="5.1.6"
+PACKAGE_VERSION="5.1.10"
CLEAN="rm -f *.*o"
BUILT_MODULE_NAME[0]="vboxguest"
BUILT_MODULE_LOCATION[0]="vboxguest"
/** The virtual monitor has been blanked by the guest and should be blacked
* out by the host. */
#define VBVA_SCREEN_F_BLANK 0x0004
+/** The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using the previous mode values for width. height, etc. */
+#define VBVA_SCREEN_F_BLANK2 0x0008
typedef struct VBVAINFOSCREEN
{
#define VERR_PGM_INVALID_LARGE_PAGE_RANGE (-1645)
/** Don't mess around with ballooned pages. */
#define VERR_PGM_PHYS_PAGE_BALLOONED (-1646)
+/** Internal processing error \#1 in page access handler code. */
+#define VERR_PGM_HANDLER_IPE_1 (-1647)
/** pgmPhysPageMapCommon encountered PGMPAGETYPE_MMIO2_ALIAS_MMIO. */
#define VERR_PGM_PCI_PASSTHRU_MISCONFIG (-1682)
/** Too many MMIO2 ranges. */
#define VERR_PGM_TOO_MANY_MMIO2_RANGES (-1683)
-/** Internal processing error in the PGM physial page mapping code dealing
+/** Internal processing error in the PGM physical page mapping code dealing
* with MMIO2 pages. */
#define VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE (-1684)
+/** Internal processing error in the PGM physcal page handling code related to
+ * MMIO/MMIO2. */
+#define VERR_PGM_PHYS_MMIO_EX_IPE (-1685)
/** @} */
/** The maximum number of pages that can be allocated and mapped
* by various MM, PGM and SUP APIs. */
-#define VBOX_MAX_ALLOC_PAGE_COUNT (256U * _1M / PAGE_SIZE)
+#if ARCH_BITS == 64
+# define VBOX_MAX_ALLOC_PAGE_COUNT (_512M / PAGE_SIZE)
+#else
+# define VBOX_MAX_ALLOC_PAGE_COUNT (_256M / PAGE_SIZE)
+#endif
/** @def VBOX_WITH_PAGE_SHARING
* Enables the page sharing code.
#endif
/** The default size of the below 4GB RAM hole. */
#define MM_RAM_HOLE_SIZE_DEFAULT (512U * _1M)
+/** The maximum 64-bit MMIO BAR size.
+ * @remarks There isn't really any limit here other than the size of the
+ * tracking structures we need (around 1/256 of the size). */
+#if HC_ARCH_BITS == 64
+# define MM_MMIO_64_MAX _1T
+#else
+# define MM_MMIO_64_MAX (_1G64 * 16)
+#endif
+/** The maximum 32-bit MMIO BAR size. */
+#define MM_MMIO_32_MAX _2G
+
/** @} */
/** RC pointer to a PDM Device Instance. */
typedef RCPTRTYPE(PPDMDEVINS) PPDMDEVINSRC;
+/** Pointer to a PDM PCI device structure. */
+typedef struct PDMPCIDEV *PPDMPCIDEV;
+
/** Pointer to a PDM USB Device Instance. */
typedef struct PDMUSBINS *PPDMUSBINS;
/** Pointer to a pointer to a PDM USB Device Instance. */
/** Process does not have the increase quota (IQ) privilege needed for
* creating a process as a given user. IQ is also called 'Increase quotas'. */
#define VERR_PROC_IQ_PRIV_NOT_HELD (-22413)
+/** The system has too many CPUs. */
+#define VERR_MP_TOO_MANY_CPUS (-22414)
/** @} */
# define RTMpGetCount RT_MANGLER(RTMpGetCount)
# define RTMpGetCurFrequency RT_MANGLER(RTMpGetCurFrequency)
# define RTMpGetDescription RT_MANGLER(RTMpGetDescription)
+# define RTMpGetCpuGroupCounts RT_MANGLER(RTMpGetCpuGroupCounts)
+# define RTMpGetMaxCpuGroupCount RT_MANGLER(RTMpGetMaxCpuGroupCount)
# define RTMpGetMaxCpuId RT_MANGLER(RTMpGetMaxCpuId)
# define RTMpGetMaxFrequency RT_MANGLER(RTMpGetMaxFrequency)
# define RTMpGetOnlineCount RT_MANGLER(RTMpGetOnlineCount)
# define RTMpOnPairIsConcurrentExecSupported RT_MANGLER(RTMpOnPairIsConcurrentExecSupported) /* r0drv */
# define RTMpOnSpecific RT_MANGLER(RTMpOnSpecific) /* r0drv */
# define RTMpPokeCpu RT_MANGLER(RTMpPokeCpu) /* r0drv */
+# define RTMpSetIndexFromCpuGroupMember RT_MANGLER(RTMpSetIndexFromCpuGroupMember)
# define RTMsgError RT_MANGLER(RTMsgError)
# define RTMsgErrorExit RT_MANGLER(RTMsgErrorExit)
# define RTMsgErrorExitV RT_MANGLER(RTMsgErrorExitV)
# define RTTimeNanoTSLegacyAsyncUseApicId_EndProc RT_MANGLER(RTTimeNanoTSLegacyAsyncUseApicId_EndProc)
# define RTTimeNanoTSLegacyAsyncUseRdtscp RT_MANGLER(RTTimeNanoTSLegacyAsyncUseRdtscp)
# define RTTimeNanoTSLegacyAsyncUseRdtscp_EndProc RT_MANGLER(RTTimeNanoTSLegacyAsyncUseRdtscp_EndProc)
+# define RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl RT_MANGLER(RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl)
+# define RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl_EndProc RT_MANGLER(RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl_EndProc)
# define RTTimeNanoTSLegacyAsyncUseIdtrLim RT_MANGLER(RTTimeNanoTSLegacyAsyncUseIdtrLim)
# define RTTimeNanoTSLegacyAsyncUseIdtrLim_EndProc RT_MANGLER(RTTimeNanoTSLegacyAsyncUseIdtrLim_EndProc)
# define RTTimeNanoTSLegacySyncInvarNoDelta RT_MANGLER(RTTimeNanoTSLegacySyncInvarNoDelta)
# define RTTimeNanoTSLFenceAsyncUseApicId_EndProc RT_MANGLER(RTTimeNanoTSLFenceAsyncUseApicId_EndProc)
# define RTTimeNanoTSLFenceAsyncUseRdtscp RT_MANGLER(RTTimeNanoTSLFenceAsyncUseRdtscp)
# define RTTimeNanoTSLFenceAsyncUseRdtscp_EndProc RT_MANGLER(RTTimeNanoTSLFenceAsyncUseRdtscp_EndProc)
+# define RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl RT_MANGLER(RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl)
+# define RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl_EndProc RT_MANGLER(RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl_EndProc)
# define RTTimeNanoTSLFenceAsyncUseIdtrLim RT_MANGLER(RTTimeNanoTSLFenceAsyncUseIdtrLim)
# define RTTimeNanoTSLFenceAsyncUseIdtrLim_EndProc RT_MANGLER(RTTimeNanoTSLFenceAsyncUseIdtrLim_EndProc)
# define RTTimeNanoTSLFenceSyncInvarNoDelta RT_MANGLER(RTTimeNanoTSLFenceSyncInvarNoDelta)
*/
RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu);
+/**
+ * Translates an NT process group member to a CPU set index.
+ *
+ * @returns CPU set index, -1 if not valid.
+ * @param idxGroup The CPU group.
+ * @param idxMember The CPU group member number.
+ *
+ * @remarks Only available on Windows.
+ */
+RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember);
+
+/**
+ * Gets the member numbers for a CPU group.
+ *
+ * @returns Maximum number of group members.
+ * @param idxGroup The CPU group.
+ * @param pcActive Where to return the number of active members.
+ *
+ * @remarks Only available on Windows.
+ */
+RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive);
+
+/**
+ * Get the maximum number of CPU groups.
+ *
+ * @returns Maximum number of CPU groups.
+ *
+ * @remarks Only available on Windows.
+ */
+RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void);
+
/**
* Gets the max CPU identifier (inclusive).
*
#ifdef IN_RING3
RTDECL(uint64_t) RTTimeNanoTSLegacyAsyncUseApicId(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLegacyAsyncUseRdtscp(PRTTIMENANOTSDATA pData);
+RTDECL(uint64_t) RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLegacyAsyncUseIdtrLim(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLFenceAsyncUseApicId(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLFenceAsyncUseRdtscp(PRTTIMENANOTSDATA pData);
+RTDECL(uint64_t) RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLFenceAsyncUseIdtrLim(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId(PRTTIMENANOTSDATA pData);
RTDECL(uint64_t) RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp(PRTTIMENANOTSDATA pData);
if (R0Process == RTR0ProcHandleSelf())
rc = get_user_pages(R3Ptr, /* Where from. */
cPages, /* How many pages. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ fWrite ? FOLL_WRITE | /* Write to memory. */
+ FOLL_FORCE /* force write access. */
+ : 0, /* Write to memory. */
+# else
fWrite, /* Write to memory. */
fWrite, /* force write access. */
+# endif
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
/*
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ fWrite ? FOLL_WRITE | /* Write to memory. */
+ FOLL_FORCE /* force write access. */
+ : 0, /* Write to memory. */
+# else
fWrite, /* Write to memory. */
fWrite, /* force write access. */
+# endif
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) */
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ fWrite ? FOLL_WRITE | /* Write to memory. */
+ FOLL_FORCE /* force write access. */
+ : 0, /* Write to memory. */
+# else
fWrite, /* Write to memory. */
fWrite, /* force write access. */
+# endif
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) */
#include <iprt/thread.h>
#include "r0drv/mp-r0drv.h"
+#ifdef nr_cpumask_bits
+# define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
+#else
+# define VBOX_NR_CPUMASK_BITS NR_CPUS
+#endif
RTDECL(RTCPUID) RTMpCpuId(void)
{
RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
{
- return idCpu < RTCPUSET_MAX_CPUS && idCpu < NR_CPUS ? (int)idCpu : -1;
+ return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
}
RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
{
- return iCpu < NR_CPUS ? (RTCPUID)iCpu : NIL_RTCPUID;
+ return iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
}
RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
{
- return NR_CPUS - 1; //???
+ return VBOX_NR_CPUMASK_BITS - 1; //???
}
RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
{
#if defined(CONFIG_SMP)
- if (RT_UNLIKELY(idCpu >= NR_CPUS))
+ if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
return false;
# if defined(cpu_possible)
RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
{
#ifdef CONFIG_SMP
- if (RT_UNLIKELY(idCpu >= NR_CPUS))
+ if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
return false;
# ifdef cpu_online
return cpu_online(idCpu);
*
* All entries in the wake-up list gets signalled and moved to the woken-up
* list.
+ * At least on Windows this function can be invoked concurrently from
+ * different VCPUs. So, be thread-safe.
*
* @param pDevExt The device extension.
*/
PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
if (!pWait)
break;
+ /* Prevent other threads from accessing pWait when spinlock is released. */
+ RTListNodeRemove(&pWait->ListNode);
+
pWait->fPendingWakeUp = true;
RTSpinlockRelease(pDevExt->EventSpinlock);
AssertRC(rc);
RTSpinlockAcquire(pDevExt->EventSpinlock);
+ Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
+ RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
pWait->fPendingWakeUp = false;
- if (!pWait->fFreeMe)
- {
- RTListNodeRemove(&pWait->ListNode);
- RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
- }
+ if (RT_LIKELY(!pWait->fFreeMe))
+ { /* likely */ }
else
{
pWait->fFreeMe = false;
{
LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->pfnNotify, pNotify->pvUser));
+#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
+ VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
+#else
RTSpinlockAcquire(pDevExt->EventSpinlock);
pDevExt->MouseNotifyCallback = *pNotify;
RTSpinlockRelease(pDevExt->EventSpinlock);
+#endif
return VINF_SUCCESS;
}
#endif
}
+/**
+ * Simply checks whether the IRQ is ours or not, does not do any interrupt
+ * procesing.
+ *
+ * @returns true if it was our interrupt, false if it wasn't.
+ * @param pDevExt The VBoxGuest device extension.
+ */
+bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
+{
+ RTSpinlockAcquire(pDevExt->EventSpinlock);
+ bool const fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
+ RTSpinlockRelease(pDevExt->EventSpinlock);
+
+ return fOurIrq;
+}
+
+
/**
* Common interrupt service routine.
*
{
fMousePositionChanged = true;
fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
-#ifndef RT_OS_WINDOWS
+#if !defined(RT_OS_WINDOWS) && !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
if (pDevExt->MouseNotifyCallback.pfnNotify)
pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
#endif
RTSpinlockRelease(pDevExt->EventSpinlock);
+ /*
+ * Execute the mouse notification callback here if it cannot be executed while
+ * holding the interrupt safe spinlock, see @bugref{8639}.
+ */
+#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
+ if ( fMousePositionChanged
+ && pDevExt->MouseNotifyCallback.pfnNotify)
+ pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
+#endif
+
#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
/*
* Do wake-ups.
# define VBOXGUEST_USE_DEFERRED_WAKE_UP
#endif
+/** @def VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
+ * The mouse notification callback can cause preemption and must not be invoked
+ * while holding a high-level spinlock.
+ */
+#if defined(RT_OS_SOLARIS) || defined(DOXYGEN_RUNNING)
+# define VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
+#endif
/** Pointer to the VBoxGuest per session data. */
typedef struct VBOXGUESTSESSION *PVBOXGUESTSESSION;
int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase, void *pvMMIOBase, uint32_t cbMMIO,
VBOXOSTYPE enmOSType, uint32_t fEvents);
+bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt);
bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt);
void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt);
int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType);
int VGDrvNtIOCtl_DpcLatencyChecker(void);
#endif
+#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
+int VGDrvNativeSetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify);
+#endif
+
RT_C_DECLS_END
#endif
-#define VBOX_SVN_REV 110634
+#define VBOX_SVN_REV 112026
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 1
-#define VBOX_VERSION_BUILD 6
-#define VBOX_VERSION_STRING_RAW "5.1.6"
-#define VBOX_VERSION_STRING "5.1.6_Ubuntu"
+#define VBOX_VERSION_BUILD 10
+#define VBOX_VERSION_STRING_RAW "5.1.10"
+#define VBOX_VERSION_STRING "5.1.10_Ubuntu"
#define VBOX_API_VERSION_STRING "5_1"
#define VBOX_PRIVATE_BUILD_DESC "Private build by root"
* @param old_dentry old directory cache entry
* @param new_parent inode of the new parent directory
* @param new_dentry new directory cache entry
+ * @param flags flags
* @returns 0 on success, Linux error code otherwise
*/
static int sf_rename(struct inode *old_parent, struct dentry *old_dentry,
- struct inode *new_parent, struct dentry *new_dentry)
+ struct inode *new_parent, struct dentry *new_dentry
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ , unsigned flags
+#endif
+ )
{
int err = 0, rc = VINF_SUCCESS;
struct sf_glob_info *sf_g = GET_GLOB_INFO(old_parent->i_sb);
TRACE();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ if (flags)
+ {
+ LogFunc(("rename with flags=%x\n", flags));
+ return -EINVAL;
+ }
+#endif
+
if (sf_g != GET_GLOB_INFO(new_parent->i_sb))
{
LogFunc(("rename with different roots\n"));
-#define VBOX_SVN_REV 110634
+#define VBOX_SVN_REV 112026
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 1
-#define VBOX_VERSION_BUILD 6
-#define VBOX_VERSION_STRING_RAW "5.1.6"
-#define VBOX_VERSION_STRING "5.1.6_Ubuntu"
+#define VBOX_VERSION_BUILD 10
+#define VBOX_VERSION_STRING_RAW "5.1.10"
+#define VBOX_VERSION_STRING "5.1.10_Ubuntu"
#define VBOX_API_VERSION_STRING "5_1"
#define VBOX_PRIVATE_BUILD_DESC "Private build by root"
-#define VBOX_SVN_REV 110634
+#define VBOX_SVN_REV 112026
* do not advertise dynamic modes on the first query and send a
* tentative hotplug notification after that to see if they query again. */
vbox->initial_mode_queried = false;
+ mutex_lock(&vbox->hw_mutex);
+ /* Disable VBVA when someone releases master in case the next person tries
+ * to do VESA. */
+ /** @todo work out if anyone is likely to and whether it will even work. */
+ /* Update: we also disable it because if the new master does not do dirty
+ * rectangle reporting (e.g. old versions of Plymouth) then at least the
+ * first screen will still be updated. We enable it as soon as we
+ * receive a dirty rectangle report. */
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
return 0;
}
#endif
{
struct vbox_private *vbox = dev->dev_private;
+ /* See vbox_master_set() */
vbox->initial_mode_queried = false;
mutex_lock(&vbox->hw_mutex);
- /* Disable VBVA when someone releases master in case the next person tries
- * to do VESA. */
- /** @todo work out if anyone is likely to and whether it will even work. */
- /* Update: we also disable it because if the new master does not do dirty
- * rectangle reporting (e.g. old versions of Plymouth) then at least the
- * first screen will still be updated. We enable it as soon as we
- * receive a dirty rectangle report. */
vbox_disable_accel(vbox);
mutex_unlock(&vbox->hw_mutex);
}
static int __init vbox_init(void)
{
- unsigned i;
-
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && vbox_modeset == -1)
return -EINVAL;
if (vbox_modeset == 0)
return -EINVAL;
- /* Do not load if any of the virtual consoles is in graphics mode to be
- * sure that we do not pick a fight with a user-mode driver or VESA. */
- for (i = 0; i < MAX_NR_CONSOLES - 1; ++i)
- if (vc_cons[i].d && vc_cons[i].d->vc_mode == KD_GRAPHICS)
- return -EINVAL;
-
return drm_pci_init(&driver, &vbox_pci_driver);
}
static void __exit vbox_exit(void)
void vbox_enable_accel(struct vbox_private *vbox);
void vbox_disable_accel(struct vbox_private *vbox);
-void vbox_enable_caps(struct vbox_private *vbox);
+void vbox_report_caps(struct vbox_private *vbox);
void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
struct drm_clip_rect *rects,
VBoxVBVADisable(&vbox->vbva_info[i], &vbox->submit_info, i);
}
-void vbox_enable_caps(struct vbox_private *vbox)
+void vbox_report_caps(struct vbox_private *vbox)
{
- uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
+ uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
| VBVACAPS_IRQ
| VBVACAPS_USE_VBVA_ONLY;
if (vbox->initial_mode_queried)
crtc->x * bpp / 8 + crtc->y * pitch,
pitch, width, height,
vbox_crtc->blanked ? 0 : bpp, flags);
- VBoxHGSMIReportFlagsLocation(&vbox->submit_info, vbox->vram_map_start
- + vbox->host_flags_offset);
LogFunc(("vboxvideo: %d\n", __LINE__));
}
LogFunc(("vboxvideo: %d: connector=%p\n", __LINE__, connector));
vbox_connector = to_vbox_connector(connector);
vbox = connector->dev->dev_private;
+ /* Heuristic: we do not want to tell the host that we support dynamic
+ * resizing unless we feel confident that the user space client using
+ * the video driver can handle hot-plug events. So the first time modes
+ * are queried after a "master" switch we tell the host that we do not,
+ * and immediately after we send the client a hot-plug notification as
+ * a test to see if they will respond and query again.
+ * That is also the reason why capabilities are reported to the host at
+ * this place in the code rather than elsewhere.
+ * We need to report the flags location before reporting the IRQ
+ * capability. */
+ VBoxHGSMIReportFlagsLocation(&vbox->submit_info, vbox->vram_map_start
+ + vbox->host_flags_offset);
if (vbox_connector->vbox_crtc->crtc_id == 0)
- vbox_enable_caps(vbox);
+ vbox_report_caps(vbox);
if (!vbox->initial_mode_queried) {
if (vbox_connector->vbox_crtc->crtc_id == 0) {
vbox->initial_mode_queried = true;
return ret;
if ( caps & VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER
|| !(caps & VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE))
- return -EINVAL;
+ /* -EINVAL means cursor_set2() not supported, -EAGAIN means
+ * retry at once. */
+ return -EBUSY;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
obj = drm_gem_object_lookup(file_priv, handle);
int r;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-#else
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
+#else
+ r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
#endif
return r;
}
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 1
-#define VBOX_VERSION_BUILD 6
-#define VBOX_VERSION_STRING_RAW "5.1.6"
-#define VBOX_VERSION_STRING "5.1.6_Ubuntu"
+#define VBOX_VERSION_BUILD 10
+#define VBOX_VERSION_STRING_RAW "5.1.10"
+#define VBOX_VERSION_STRING "5.1.10_Ubuntu"
#define VBOX_API_VERSION_STRING "5_1"
#define VBOX_PRIVATE_BUILD_DESC "Private build by root"