1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/console.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/mem_encrypt.h>
34 #include <drm/drm_aperture.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_ioctl.h>
37 #include <drm/drm_sysfs.h>
38 #include <drm/ttm/ttm_bo_driver.h>
39 #include <drm/ttm/ttm_range_manager.h>
40 #include <drm/ttm/ttm_placement.h>
41 #include <generated/utsrelease.h>
43 #include "ttm_object.h"
44 #include "vmwgfx_binding.h"
45 #include "vmwgfx_devcaps.h"
46 #include "vmwgfx_drv.h"
47 #include "vmwgfx_mksstat.h"
49 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
51 #define VMW_MIN_INITIAL_WIDTH 800
52 #define VMW_MIN_INITIAL_HEIGHT 600
54 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
58 * Fully encoded drm commands. Might move to vmw_drm.h
61 #define DRM_IOCTL_VMW_GET_PARAM \
62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
63 struct drm_vmw_getparam_arg)
64 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
65 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
66 union drm_vmw_alloc_dmabuf_arg)
67 #define DRM_IOCTL_VMW_UNREF_DMABUF \
68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
69 struct drm_vmw_unref_dmabuf_arg)
70 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
72 struct drm_vmw_cursor_bypass_arg)
74 #define DRM_IOCTL_VMW_CONTROL_STREAM \
75 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
76 struct drm_vmw_control_stream_arg)
77 #define DRM_IOCTL_VMW_CLAIM_STREAM \
78 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
79 struct drm_vmw_stream_arg)
80 #define DRM_IOCTL_VMW_UNREF_STREAM \
81 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
82 struct drm_vmw_stream_arg)
84 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
85 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
86 struct drm_vmw_context_arg)
87 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
88 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
89 struct drm_vmw_context_arg)
90 #define DRM_IOCTL_VMW_CREATE_SURFACE \
91 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
92 union drm_vmw_surface_create_arg)
93 #define DRM_IOCTL_VMW_UNREF_SURFACE \
94 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
95 struct drm_vmw_surface_arg)
96 #define DRM_IOCTL_VMW_REF_SURFACE \
97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
98 union drm_vmw_surface_reference_arg)
99 #define DRM_IOCTL_VMW_EXECBUF \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
101 struct drm_vmw_execbuf_arg)
102 #define DRM_IOCTL_VMW_GET_3D_CAP \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
104 struct drm_vmw_get_3d_cap_arg)
105 #define DRM_IOCTL_VMW_FENCE_WAIT \
106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
107 struct drm_vmw_fence_wait_arg)
108 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
109 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
110 struct drm_vmw_fence_signaled_arg)
111 #define DRM_IOCTL_VMW_FENCE_UNREF \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
113 struct drm_vmw_fence_arg)
114 #define DRM_IOCTL_VMW_FENCE_EVENT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
116 struct drm_vmw_fence_event_arg)
117 #define DRM_IOCTL_VMW_PRESENT \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
119 struct drm_vmw_present_arg)
120 #define DRM_IOCTL_VMW_PRESENT_READBACK \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
122 struct drm_vmw_present_readback_arg)
123 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
124 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
125 struct drm_vmw_update_layout_arg)
126 #define DRM_IOCTL_VMW_CREATE_SHADER \
127 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
128 struct drm_vmw_shader_create_arg)
129 #define DRM_IOCTL_VMW_UNREF_SHADER \
130 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
131 struct drm_vmw_shader_arg)
132 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
134 union drm_vmw_gb_surface_create_arg)
135 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
136 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
137 union drm_vmw_gb_surface_reference_arg)
138 #define DRM_IOCTL_VMW_SYNCCPU \
139 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
140 struct drm_vmw_synccpu_arg)
141 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
142 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
143 struct drm_vmw_context_arg)
144 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
145 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
146 union drm_vmw_gb_surface_create_ext_arg)
147 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
149 union drm_vmw_gb_surface_reference_ext_arg)
150 #define DRM_IOCTL_VMW_MSG \
151 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
152 struct drm_vmw_msg_arg)
153 #define DRM_IOCTL_VMW_MKSSTAT_RESET \
154 DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
155 #define DRM_IOCTL_VMW_MKSSTAT_ADD \
156 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \
157 struct drm_vmw_mksstat_add_arg)
158 #define DRM_IOCTL_VMW_MKSSTAT_REMOVE \
159 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \
160 struct drm_vmw_mksstat_remove_arg)
166 static const struct drm_ioctl_desc vmw_ioctls
[] = {
167 DRM_IOCTL_DEF_DRV(VMW_GET_PARAM
, vmw_getparam_ioctl
,
169 DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF
, vmw_bo_alloc_ioctl
,
171 DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF
, vmw_bo_unref_ioctl
,
173 DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS
,
174 vmw_kms_cursor_bypass_ioctl
,
177 DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM
, vmw_overlay_ioctl
,
179 DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM
, vmw_stream_claim_ioctl
,
181 DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM
, vmw_stream_unref_ioctl
,
184 DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT
, vmw_context_define_ioctl
,
186 DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT
, vmw_context_destroy_ioctl
,
188 DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE
, vmw_surface_define_ioctl
,
190 DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE
, vmw_surface_destroy_ioctl
,
192 DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE
, vmw_surface_reference_ioctl
,
194 DRM_IOCTL_DEF_DRV(VMW_EXECBUF
, vmw_execbuf_ioctl
,
196 DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT
, vmw_fence_obj_wait_ioctl
,
198 DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED
,
199 vmw_fence_obj_signaled_ioctl
,
201 DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF
, vmw_fence_obj_unref_ioctl
,
203 DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT
, vmw_fence_event_ioctl
,
205 DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP
, vmw_get_cap_3d_ioctl
,
208 /* these allow direct access to the framebuffers mark as master only */
209 DRM_IOCTL_DEF_DRV(VMW_PRESENT
, vmw_present_ioctl
,
210 DRM_MASTER
| DRM_AUTH
),
211 DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK
,
212 vmw_present_readback_ioctl
,
213 DRM_MASTER
| DRM_AUTH
),
215 * The permissions of the below ioctl are overridden in
216 * vmw_generic_ioctl(). We require either
217 * DRM_MASTER or capable(CAP_SYS_ADMIN).
219 DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT
,
220 vmw_kms_update_layout_ioctl
,
222 DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER
,
223 vmw_shader_define_ioctl
,
225 DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER
,
226 vmw_shader_destroy_ioctl
,
228 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE
,
229 vmw_gb_surface_define_ioctl
,
231 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF
,
232 vmw_gb_surface_reference_ioctl
,
234 DRM_IOCTL_DEF_DRV(VMW_SYNCCPU
,
235 vmw_user_bo_synccpu_ioctl
,
237 DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT
,
238 vmw_extended_context_define_ioctl
,
240 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT
,
241 vmw_gb_surface_define_ext_ioctl
,
243 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT
,
244 vmw_gb_surface_reference_ext_ioctl
,
246 DRM_IOCTL_DEF_DRV(VMW_MSG
,
249 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET
,
250 vmw_mksstat_reset_ioctl
,
252 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD
,
253 vmw_mksstat_add_ioctl
,
255 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE
,
256 vmw_mksstat_remove_ioctl
,
260 static const struct pci_device_id vmw_pci_id_list
[] = {
261 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2
) },
262 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3
) },
265 MODULE_DEVICE_TABLE(pci
, vmw_pci_id_list
);
267 static int enable_fbdev
= IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON
);
268 static int vmw_restrict_iommu
;
269 static int vmw_force_coherent
;
270 static int vmw_restrict_dma_mask
;
271 static int vmw_assume_16bpp
;
273 static int vmw_probe(struct pci_dev
*, const struct pci_device_id
*);
274 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
277 MODULE_PARM_DESC(enable_fbdev
, "Enable vmwgfx fbdev");
278 module_param_named(enable_fbdev
, enable_fbdev
, int, 0600);
279 MODULE_PARM_DESC(restrict_iommu
, "Try to limit IOMMU usage for TTM pages");
280 module_param_named(restrict_iommu
, vmw_restrict_iommu
, int, 0600);
281 MODULE_PARM_DESC(force_coherent
, "Force coherent TTM pages");
282 module_param_named(force_coherent
, vmw_force_coherent
, int, 0600);
283 MODULE_PARM_DESC(restrict_dma_mask
, "Restrict DMA mask to 44 bits with IOMMU");
284 module_param_named(restrict_dma_mask
, vmw_restrict_dma_mask
, int, 0600);
285 MODULE_PARM_DESC(assume_16bpp
, "Assume 16-bpp when filtering modes");
286 module_param_named(assume_16bpp
, vmw_assume_16bpp
, int, 0600);
294 static const struct bitmap_name cap1_names
[] = {
295 { SVGA_CAP_RECT_COPY
, "rect copy" },
296 { SVGA_CAP_CURSOR
, "cursor" },
297 { SVGA_CAP_CURSOR_BYPASS
, "cursor bypass" },
298 { SVGA_CAP_CURSOR_BYPASS_2
, "cursor bypass 2" },
299 { SVGA_CAP_8BIT_EMULATION
, "8bit emulation" },
300 { SVGA_CAP_ALPHA_CURSOR
, "alpha cursor" },
301 { SVGA_CAP_3D
, "3D" },
302 { SVGA_CAP_EXTENDED_FIFO
, "extended fifo" },
303 { SVGA_CAP_MULTIMON
, "multimon" },
304 { SVGA_CAP_PITCHLOCK
, "pitchlock" },
305 { SVGA_CAP_IRQMASK
, "irq mask" },
306 { SVGA_CAP_DISPLAY_TOPOLOGY
, "display topology" },
307 { SVGA_CAP_GMR
, "gmr" },
308 { SVGA_CAP_TRACES
, "traces" },
309 { SVGA_CAP_GMR2
, "gmr2" },
310 { SVGA_CAP_SCREEN_OBJECT_2
, "screen object 2" },
311 { SVGA_CAP_COMMAND_BUFFERS
, "command buffers" },
312 { SVGA_CAP_CMD_BUFFERS_2
, "command buffers 2" },
313 { SVGA_CAP_GBOBJECTS
, "gbobject" },
314 { SVGA_CAP_DX
, "dx" },
315 { SVGA_CAP_HP_CMD_QUEUE
, "hp cmd queue" },
316 { SVGA_CAP_NO_BB_RESTRICTION
, "no bb restriction" },
317 { SVGA_CAP_CAP2_REGISTER
, "cap2 register" },
321 static const struct bitmap_name cap2_names
[] = {
322 { SVGA_CAP2_GROW_OTABLE
, "grow otable" },
323 { SVGA_CAP2_INTRA_SURFACE_COPY
, "intra surface copy" },
324 { SVGA_CAP2_DX2
, "dx2" },
325 { SVGA_CAP2_GB_MEMSIZE_2
, "gb memsize 2" },
326 { SVGA_CAP2_SCREENDMA_REG
, "screendma reg" },
327 { SVGA_CAP2_OTABLE_PTDEPTH_2
, "otable ptdepth2" },
328 { SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT
, "non ms to ms stretchblt" },
329 { SVGA_CAP2_CURSOR_MOB
, "cursor mob" },
330 { SVGA_CAP2_MSHINT
, "mshint" },
331 { SVGA_CAP2_CB_MAX_SIZE_4MB
, "cb max size 4mb" },
332 { SVGA_CAP2_DX3
, "dx3" },
333 { SVGA_CAP2_FRAME_TYPE
, "frame type" },
334 { SVGA_CAP2_COTABLE_COPY
, "cotable copy" },
335 { SVGA_CAP2_TRACE_FULL_FB
, "trace full fb" },
336 { SVGA_CAP2_EXTRA_REGS
, "extra regs" },
337 { SVGA_CAP2_LO_STAGING
, "lo staging" },
340 static void vmw_print_bitmap(struct drm_device
*drm
,
341 const char *prefix
, uint32_t bitmap
,
342 const struct bitmap_name
*bnames
,
348 for (i
= 0; i
< num_names
; ++i
) {
349 if ((bitmap
& bnames
[i
].value
) != 0) {
350 offset
+= snprintf(buf
+ offset
,
351 ARRAY_SIZE(buf
) - offset
,
352 "%s, ", bnames
[i
].name
);
353 bitmap
&= ~bnames
[i
].value
;
357 drm_info(drm
, "%s: %s\n", prefix
, buf
);
359 drm_dbg(drm
, "%s: unknown enums: %x\n", prefix
, bitmap
);
363 static void vmw_print_sm_type(struct vmw_private
*dev_priv
)
365 static const char *names
[] = {
366 [VMW_SM_LEGACY
] = "Legacy",
368 [VMW_SM_4_1
] = "SM4_1",
370 [VMW_SM_MAX
] = "Invalid"
372 BUILD_BUG_ON(ARRAY_SIZE(names
) != (VMW_SM_MAX
+ 1));
373 drm_info(&dev_priv
->drm
, "Available shader model: %s.\n",
374 names
[dev_priv
->sm_type
]);
378 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
380 * @dev_priv: A device private structure.
382 * This function creates a small buffer object that holds the query
383 * result for dummy queries emitted as query barriers.
384 * The function will then map the first page and initialize a pending
385 * occlusion query result structure, Finally it will unmap the buffer.
386 * No interruptible waits are done within this function.
388 * Returns an error if bo creation or initialization fails.
390 static int vmw_dummy_query_bo_create(struct vmw_private
*dev_priv
)
393 struct vmw_buffer_object
*vbo
;
394 struct ttm_bo_kmap_obj map
;
395 volatile SVGA3dQueryResult
*result
;
399 * Create the vbo as pinned, so that a tryreserve will
400 * immediately succeed. This is because we're the only
401 * user of the bo currently.
403 vbo
= kzalloc(sizeof(*vbo
), GFP_KERNEL
);
407 ret
= vmw_bo_init(dev_priv
, vbo
, PAGE_SIZE
,
408 &vmw_sys_placement
, false, true,
410 if (unlikely(ret
!= 0))
413 ret
= ttm_bo_reserve(&vbo
->base
, false, true, NULL
);
415 vmw_bo_pin_reserved(vbo
, true);
417 ret
= ttm_bo_kmap(&vbo
->base
, 0, 1, &map
);
418 if (likely(ret
== 0)) {
419 result
= ttm_kmap_obj_virtual(&map
, &dummy
);
420 result
->totalSize
= sizeof(*result
);
421 result
->state
= SVGA3D_QUERYSTATE_PENDING
;
422 result
->result32
= 0xff;
425 vmw_bo_pin_reserved(vbo
, false);
426 ttm_bo_unreserve(&vbo
->base
);
428 if (unlikely(ret
!= 0)) {
429 DRM_ERROR("Dummy query buffer map failed.\n");
430 vmw_bo_unreference(&vbo
);
432 dev_priv
->dummy_query_bo
= vbo
;
437 static int vmw_device_init(struct vmw_private
*dev_priv
)
439 bool uses_fb_traces
= false;
441 dev_priv
->enable_state
= vmw_read(dev_priv
, SVGA_REG_ENABLE
);
442 dev_priv
->config_done_state
= vmw_read(dev_priv
, SVGA_REG_CONFIG_DONE
);
443 dev_priv
->traces_state
= vmw_read(dev_priv
, SVGA_REG_TRACES
);
445 vmw_write(dev_priv
, SVGA_REG_ENABLE
, SVGA_REG_ENABLE_ENABLE
|
446 SVGA_REG_ENABLE_HIDE
);
448 uses_fb_traces
= !vmw_cmd_supported(dev_priv
) &&
449 (dev_priv
->capabilities
& SVGA_CAP_TRACES
) != 0;
451 vmw_write(dev_priv
, SVGA_REG_TRACES
, uses_fb_traces
);
452 dev_priv
->fifo
= vmw_fifo_create(dev_priv
);
453 if (IS_ERR(dev_priv
->fifo
)) {
454 int err
= PTR_ERR(dev_priv
->fifo
);
455 dev_priv
->fifo
= NULL
;
457 } else if (!dev_priv
->fifo
) {
458 vmw_write(dev_priv
, SVGA_REG_CONFIG_DONE
, 1);
461 dev_priv
->last_read_seqno
= vmw_fence_read(dev_priv
);
462 atomic_set(&dev_priv
->marker_seq
, dev_priv
->last_read_seqno
);
466 static void vmw_device_fini(struct vmw_private
*vmw
)
471 vmw_write(vmw
, SVGA_REG_SYNC
, SVGA_SYNC_GENERIC
);
472 while (vmw_read(vmw
, SVGA_REG_BUSY
) != 0)
475 vmw
->last_read_seqno
= vmw_fence_read(vmw
);
477 vmw_write(vmw
, SVGA_REG_CONFIG_DONE
,
478 vmw
->config_done_state
);
479 vmw_write(vmw
, SVGA_REG_ENABLE
,
481 vmw_write(vmw
, SVGA_REG_TRACES
,
484 vmw_fifo_destroy(vmw
);
488 * vmw_request_device_late - Perform late device setup
490 * @dev_priv: Pointer to device private.
492 * This function performs setup of otables and enables large command
493 * buffer submission. These tasks are split out to a separate function
494 * because it reverts vmw_release_device_early and is intended to be used
495 * by an error path in the hibernation code.
497 static int vmw_request_device_late(struct vmw_private
*dev_priv
)
501 if (dev_priv
->has_mob
) {
502 ret
= vmw_otables_setup(dev_priv
);
503 if (unlikely(ret
!= 0)) {
504 DRM_ERROR("Unable to initialize "
505 "guest Memory OBjects.\n");
510 if (dev_priv
->cman
) {
511 ret
= vmw_cmdbuf_set_pool_size(dev_priv
->cman
, 256*4096);
513 struct vmw_cmdbuf_man
*man
= dev_priv
->cman
;
515 dev_priv
->cman
= NULL
;
516 vmw_cmdbuf_man_destroy(man
);
523 static int vmw_request_device(struct vmw_private
*dev_priv
)
527 ret
= vmw_device_init(dev_priv
);
528 if (unlikely(ret
!= 0)) {
529 DRM_ERROR("Unable to initialize the device.\n");
532 vmw_fence_fifo_up(dev_priv
->fman
);
533 dev_priv
->cman
= vmw_cmdbuf_man_create(dev_priv
);
534 if (IS_ERR(dev_priv
->cman
)) {
535 dev_priv
->cman
= NULL
;
536 dev_priv
->sm_type
= VMW_SM_LEGACY
;
539 ret
= vmw_request_device_late(dev_priv
);
543 ret
= vmw_dummy_query_bo_create(dev_priv
);
544 if (unlikely(ret
!= 0))
545 goto out_no_query_bo
;
551 vmw_cmdbuf_remove_pool(dev_priv
->cman
);
552 if (dev_priv
->has_mob
) {
553 struct ttm_resource_manager
*man
;
555 man
= ttm_manager_type(&dev_priv
->bdev
, VMW_PL_MOB
);
556 ttm_resource_manager_evict_all(&dev_priv
->bdev
, man
);
557 vmw_otables_takedown(dev_priv
);
560 vmw_cmdbuf_man_destroy(dev_priv
->cman
);
562 vmw_fence_fifo_down(dev_priv
->fman
);
563 vmw_device_fini(dev_priv
);
568 * vmw_release_device_early - Early part of fifo takedown.
570 * @dev_priv: Pointer to device private struct.
572 * This is the first part of command submission takedown, to be called before
573 * buffer management is taken down.
575 static void vmw_release_device_early(struct vmw_private
*dev_priv
)
578 * Previous destructions should've released
582 BUG_ON(dev_priv
->pinned_bo
!= NULL
);
584 vmw_bo_unreference(&dev_priv
->dummy_query_bo
);
586 vmw_cmdbuf_remove_pool(dev_priv
->cman
);
588 if (dev_priv
->has_mob
) {
589 struct ttm_resource_manager
*man
;
591 man
= ttm_manager_type(&dev_priv
->bdev
, VMW_PL_MOB
);
592 ttm_resource_manager_evict_all(&dev_priv
->bdev
, man
);
593 vmw_otables_takedown(dev_priv
);
598 * vmw_release_device_late - Late part of fifo takedown.
600 * @dev_priv: Pointer to device private struct.
602 * This is the last part of the command submission takedown, to be called when
603 * command submission is no longer needed. It may wait on pending fences.
605 static void vmw_release_device_late(struct vmw_private
*dev_priv
)
607 vmw_fence_fifo_down(dev_priv
->fman
);
609 vmw_cmdbuf_man_destroy(dev_priv
->cman
);
611 vmw_device_fini(dev_priv
);
615 * Sets the initial_[width|height] fields on the given vmw_private.
617 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
618 * clamping the value to fb_max_[width|height] fields and the
619 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
620 * If the values appear to be invalid, set them to
621 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
623 static void vmw_get_initial_size(struct vmw_private
*dev_priv
)
628 width
= vmw_read(dev_priv
, SVGA_REG_WIDTH
);
629 height
= vmw_read(dev_priv
, SVGA_REG_HEIGHT
);
631 width
= max_t(uint32_t, width
, VMW_MIN_INITIAL_WIDTH
);
632 height
= max_t(uint32_t, height
, VMW_MIN_INITIAL_HEIGHT
);
634 if (width
> dev_priv
->fb_max_width
||
635 height
> dev_priv
->fb_max_height
) {
638 * This is a host error and shouldn't occur.
641 width
= VMW_MIN_INITIAL_WIDTH
;
642 height
= VMW_MIN_INITIAL_HEIGHT
;
645 dev_priv
->initial_width
= width
;
646 dev_priv
->initial_height
= height
;
650 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
653 * @dev_priv: Pointer to a struct vmw_private
655 * This functions tries to determine what actions need to be taken by the
656 * driver to make system pages visible to the device.
657 * If this function decides that DMA is not possible, it returns -EINVAL.
658 * The driver may then try to disable features of the device that require
661 static int vmw_dma_select_mode(struct vmw_private
*dev_priv
)
663 static const char *names
[vmw_dma_map_max
] = {
664 [vmw_dma_alloc_coherent
] = "Using coherent TTM pages.",
665 [vmw_dma_map_populate
] = "Caching DMA mappings.",
666 [vmw_dma_map_bind
] = "Giving up DMA mappings early."};
668 /* TTM currently doesn't fully support SEV encryption. */
669 if (mem_encrypt_active())
672 if (vmw_force_coherent
)
673 dev_priv
->map_mode
= vmw_dma_alloc_coherent
;
674 else if (vmw_restrict_iommu
)
675 dev_priv
->map_mode
= vmw_dma_map_bind
;
677 dev_priv
->map_mode
= vmw_dma_map_populate
;
679 drm_info(&dev_priv
->drm
,
680 "DMA map mode: %s\n", names
[dev_priv
->map_mode
]);
685 * vmw_dma_masks - set required page- and dma masks
687 * @dev_priv: Pointer to struct drm-device
689 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
690 * restriction also for 64-bit systems.
692 static int vmw_dma_masks(struct vmw_private
*dev_priv
)
694 struct drm_device
*dev
= &dev_priv
->drm
;
697 ret
= dma_set_mask_and_coherent(dev
->dev
, DMA_BIT_MASK(64));
698 if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask
) {
699 drm_info(&dev_priv
->drm
,
700 "Restricting DMA addresses to 44 bits.\n");
701 return dma_set_mask_and_coherent(dev
->dev
, DMA_BIT_MASK(44));
707 static int vmw_vram_manager_init(struct vmw_private
*dev_priv
)
710 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
711 ret
= vmw_thp_init(dev_priv
);
713 ret
= ttm_range_man_init(&dev_priv
->bdev
, TTM_PL_VRAM
, false,
714 dev_priv
->vram_size
>> PAGE_SHIFT
);
716 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv
->bdev
, TTM_PL_VRAM
), false);
720 static void vmw_vram_manager_fini(struct vmw_private
*dev_priv
)
722 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
723 vmw_thp_fini(dev_priv
);
725 ttm_range_man_fini(&dev_priv
->bdev
, TTM_PL_VRAM
);
729 static int vmw_setup_pci_resources(struct vmw_private
*dev
,
732 resource_size_t rmmio_start
;
733 resource_size_t rmmio_size
;
734 resource_size_t fifo_start
;
735 resource_size_t fifo_size
;
737 struct pci_dev
*pdev
= to_pci_dev(dev
->drm
.dev
);
739 pci_set_master(pdev
);
741 ret
= pci_request_regions(pdev
, "vmwgfx probe");
745 dev
->pci_id
= pci_id
;
746 if (pci_id
== VMWGFX_PCI_ID_SVGA3
) {
747 rmmio_start
= pci_resource_start(pdev
, 0);
748 rmmio_size
= pci_resource_len(pdev
, 0);
749 dev
->vram_start
= pci_resource_start(pdev
, 2);
750 dev
->vram_size
= pci_resource_len(pdev
, 2);
753 "Register MMIO at 0x%pa size is %llu kiB\n",
754 &rmmio_start
, (uint64_t)rmmio_size
/ 1024);
755 dev
->rmmio
= devm_ioremap(dev
->drm
.dev
,
760 "Failed mapping registers mmio memory.\n");
761 pci_release_regions(pdev
);
764 } else if (pci_id
== VMWGFX_PCI_ID_SVGA2
) {
765 dev
->io_start
= pci_resource_start(pdev
, 0);
766 dev
->vram_start
= pci_resource_start(pdev
, 1);
767 dev
->vram_size
= pci_resource_len(pdev
, 1);
768 fifo_start
= pci_resource_start(pdev
, 2);
769 fifo_size
= pci_resource_len(pdev
, 2);
772 "FIFO at %pa size is %llu kiB\n",
773 &fifo_start
, (uint64_t)fifo_size
/ 1024);
774 dev
->fifo_mem
= devm_memremap(dev
->drm
.dev
,
779 if (IS_ERR(dev
->fifo_mem
)) {
781 "Failed mapping FIFO memory.\n");
782 pci_release_regions(pdev
);
783 return PTR_ERR(dev
->fifo_mem
);
786 pci_release_regions(pdev
);
791 * This is approximate size of the vram, the exact size will only
792 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
793 * size will be equal to or bigger than the size reported by
794 * SVGA_REG_VRAM_SIZE.
797 "VRAM at %pa size is %llu kiB\n",
798 &dev
->vram_start
, (uint64_t)dev
->vram_size
/ 1024);
803 static int vmw_detect_version(struct vmw_private
*dev
)
807 vmw_write(dev
, SVGA_REG_ID
, vmw_is_svga_v3(dev
) ?
808 SVGA_ID_3
: SVGA_ID_2
);
809 svga_id
= vmw_read(dev
, SVGA_REG_ID
);
810 if (svga_id
!= SVGA_ID_2
&& svga_id
!= SVGA_ID_3
) {
812 "Unsupported SVGA ID 0x%x on chipset 0x%x\n",
813 svga_id
, dev
->pci_id
);
816 BUG_ON(vmw_is_svga_v3(dev
) && (svga_id
!= SVGA_ID_3
));
818 "Running on SVGA version %d.\n", (svga_id
& 0xff));
822 static int vmw_driver_load(struct vmw_private
*dev_priv
, u32 pci_id
)
826 bool refuse_dma
= false;
827 struct pci_dev
*pdev
= to_pci_dev(dev_priv
->drm
.dev
);
829 dev_priv
->drm
.dev_private
= dev_priv
;
831 mutex_init(&dev_priv
->cmdbuf_mutex
);
832 mutex_init(&dev_priv
->binding_mutex
);
833 spin_lock_init(&dev_priv
->resource_lock
);
834 spin_lock_init(&dev_priv
->hw_lock
);
835 spin_lock_init(&dev_priv
->waiter_lock
);
836 spin_lock_init(&dev_priv
->cursor_lock
);
838 ret
= vmw_setup_pci_resources(dev_priv
, pci_id
);
841 ret
= vmw_detect_version(dev_priv
);
843 goto out_no_pci_or_version
;
846 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
) {
847 idr_init_base(&dev_priv
->res_idr
[i
], 1);
848 INIT_LIST_HEAD(&dev_priv
->res_lru
[i
]);
851 init_waitqueue_head(&dev_priv
->fence_queue
);
852 init_waitqueue_head(&dev_priv
->fifo_queue
);
853 dev_priv
->fence_queue_waiters
= 0;
854 dev_priv
->fifo_queue_waiters
= 0;
856 dev_priv
->used_memory_size
= 0;
858 dev_priv
->assume_16bpp
= !!vmw_assume_16bpp
;
860 dev_priv
->enable_fb
= enable_fbdev
;
863 dev_priv
->capabilities
= vmw_read(dev_priv
, SVGA_REG_CAPABILITIES
);
865 if (dev_priv
->capabilities
& SVGA_CAP_CAP2_REGISTER
) {
866 dev_priv
->capabilities2
= vmw_read(dev_priv
, SVGA_REG_CAP2
);
870 ret
= vmw_dma_select_mode(dev_priv
);
871 if (unlikely(ret
!= 0)) {
872 drm_info(&dev_priv
->drm
,
873 "Restricting capabilities since DMA not available.\n");
875 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
)
876 drm_info(&dev_priv
->drm
,
877 "Disabling 3D acceleration.\n");
880 dev_priv
->vram_size
= vmw_read(dev_priv
, SVGA_REG_VRAM_SIZE
);
881 dev_priv
->fifo_mem_size
= vmw_read(dev_priv
, SVGA_REG_MEM_SIZE
);
882 dev_priv
->fb_max_width
= vmw_read(dev_priv
, SVGA_REG_MAX_WIDTH
);
883 dev_priv
->fb_max_height
= vmw_read(dev_priv
, SVGA_REG_MAX_HEIGHT
);
885 vmw_get_initial_size(dev_priv
);
887 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
888 dev_priv
->max_gmr_ids
=
889 vmw_read(dev_priv
, SVGA_REG_GMR_MAX_IDS
);
890 dev_priv
->max_gmr_pages
=
891 vmw_read(dev_priv
, SVGA_REG_GMRS_MAX_PAGES
);
892 dev_priv
->memory_size
=
893 vmw_read(dev_priv
, SVGA_REG_MEMORY_SIZE
);
894 dev_priv
->memory_size
-= dev_priv
->vram_size
;
897 * An arbitrary limit of 512MiB on surface
898 * memory. But all HWV8 hardware supports GMR2.
900 dev_priv
->memory_size
= 512*1024*1024;
902 dev_priv
->max_mob_pages
= 0;
903 dev_priv
->max_mob_size
= 0;
904 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
) {
907 if (dev_priv
->capabilities2
& SVGA_CAP2_GB_MEMSIZE_2
)
908 mem_size
= vmw_read(dev_priv
,
909 SVGA_REG_GBOBJECT_MEM_SIZE_KB
);
913 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB
);
916 * Workaround for low memory 2D VMs to compensate for the
917 * allocation taken by fbdev
919 if (!(dev_priv
->capabilities
& SVGA_CAP_3D
))
922 dev_priv
->max_mob_pages
= mem_size
* 1024 / PAGE_SIZE
;
923 dev_priv
->max_primary_mem
=
924 vmw_read(dev_priv
, SVGA_REG_MAX_PRIMARY_MEM
);
925 dev_priv
->max_mob_size
=
926 vmw_read(dev_priv
, SVGA_REG_MOB_MAX_SIZE
);
927 dev_priv
->stdu_max_width
=
928 vmw_read(dev_priv
, SVGA_REG_SCREENTARGET_MAX_WIDTH
);
929 dev_priv
->stdu_max_height
=
930 vmw_read(dev_priv
, SVGA_REG_SCREENTARGET_MAX_HEIGHT
);
932 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
,
933 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH
);
934 dev_priv
->texture_max_width
= vmw_read(dev_priv
,
936 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
,
937 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT
);
938 dev_priv
->texture_max_height
= vmw_read(dev_priv
,
941 dev_priv
->texture_max_width
= 8192;
942 dev_priv
->texture_max_height
= 8192;
943 dev_priv
->max_primary_mem
= dev_priv
->vram_size
;
945 drm_info(&dev_priv
->drm
,
946 "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
947 (u64
)dev_priv
->vram_size
/ 1024,
948 (u64
)dev_priv
->fifo_mem_size
/ 1024,
949 dev_priv
->memory_size
/ 1024);
951 drm_info(&dev_priv
->drm
,
952 "MOB limits: max mob size = %u kB, max mob pages = %u\n",
953 dev_priv
->max_mob_size
/ 1024, dev_priv
->max_mob_pages
);
955 vmw_print_bitmap(&dev_priv
->drm
, "Capabilities",
956 dev_priv
->capabilities
,
957 cap1_names
, ARRAY_SIZE(cap1_names
));
958 if (dev_priv
->capabilities
& SVGA_CAP_CAP2_REGISTER
)
959 vmw_print_bitmap(&dev_priv
->drm
, "Capabilities2",
960 dev_priv
->capabilities2
,
961 cap2_names
, ARRAY_SIZE(cap2_names
));
963 ret
= vmw_dma_masks(dev_priv
);
964 if (unlikely(ret
!= 0))
967 dma_set_max_seg_size(dev_priv
->drm
.dev
, U32_MAX
);
969 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
970 drm_info(&dev_priv
->drm
,
971 "Max GMR ids is %u\n",
972 (unsigned)dev_priv
->max_gmr_ids
);
973 drm_info(&dev_priv
->drm
,
974 "Max number of GMR pages is %u\n",
975 (unsigned)dev_priv
->max_gmr_pages
);
977 drm_info(&dev_priv
->drm
,
978 "Maximum display memory size is %llu kiB\n",
979 (uint64_t)dev_priv
->max_primary_mem
/ 1024);
981 /* Need mmio memory to check for fifo pitchlock cap. */
982 if (!(dev_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
983 !(dev_priv
->capabilities
& SVGA_CAP_PITCHLOCK
) &&
984 !vmw_fifo_have_pitchlock(dev_priv
)) {
986 DRM_ERROR("Hardware has no pitchlock\n");
990 dev_priv
->tdev
= ttm_object_device_init(&ttm_mem_glob
, 12,
991 &vmw_prime_dmabuf_ops
);
993 if (unlikely(dev_priv
->tdev
== NULL
)) {
994 drm_err(&dev_priv
->drm
,
995 "Unable to initialize TTM object management.\n");
1000 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
) {
1001 ret
= vmw_irq_install(&dev_priv
->drm
, pdev
->irq
);
1003 drm_err(&dev_priv
->drm
,
1004 "Failed installing irq: %d\n", ret
);
1009 dev_priv
->fman
= vmw_fence_manager_init(dev_priv
);
1010 if (unlikely(dev_priv
->fman
== NULL
)) {
1015 drm_vma_offset_manager_init(&dev_priv
->vma_manager
,
1016 DRM_FILE_PAGE_OFFSET_START
,
1017 DRM_FILE_PAGE_OFFSET_SIZE
);
1018 ret
= ttm_device_init(&dev_priv
->bdev
, &vmw_bo_driver
,
1020 dev_priv
->drm
.anon_inode
->i_mapping
,
1021 &dev_priv
->vma_manager
,
1022 dev_priv
->map_mode
== vmw_dma_alloc_coherent
,
1024 if (unlikely(ret
!= 0)) {
1025 drm_err(&dev_priv
->drm
,
1026 "Failed initializing TTM buffer object driver.\n");
1031 * Enable VRAM, but initially don't use it until SVGA is enabled and
1035 ret
= vmw_vram_manager_init(dev_priv
);
1036 if (unlikely(ret
!= 0)) {
1037 drm_err(&dev_priv
->drm
,
1038 "Failed initializing memory manager for VRAM.\n");
1042 ret
= vmw_devcaps_create(dev_priv
);
1043 if (unlikely(ret
!= 0)) {
1044 drm_err(&dev_priv
->drm
,
1045 "Failed initializing device caps.\n");
1050 * "Guest Memory Regions" is an aperture like feature with
1051 * one slot per bo. There is an upper limit of the number of
1052 * slots as well as the bo size.
1054 dev_priv
->has_gmr
= true;
1055 /* TODO: This is most likely not correct */
1056 if (((dev_priv
->capabilities
& (SVGA_CAP_GMR
| SVGA_CAP_GMR2
)) == 0) ||
1058 vmw_gmrid_man_init(dev_priv
, VMW_PL_GMR
) != 0) {
1059 drm_info(&dev_priv
->drm
,
1060 "No GMR memory available. "
1061 "Graphics memory resources are very limited.\n");
1062 dev_priv
->has_gmr
= false;
1065 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
&& !refuse_dma
) {
1066 dev_priv
->has_mob
= true;
1068 if (vmw_gmrid_man_init(dev_priv
, VMW_PL_MOB
) != 0) {
1069 drm_info(&dev_priv
->drm
,
1070 "No MOB memory available. "
1071 "3D will be disabled.\n");
1072 dev_priv
->has_mob
= false;
1076 if (dev_priv
->has_mob
&& (dev_priv
->capabilities
& SVGA_CAP_DX
)) {
1077 if (vmw_devcap_get(dev_priv
, SVGA3D_DEVCAP_DXCONTEXT
))
1078 dev_priv
->sm_type
= VMW_SM_4
;
1081 vmw_validation_mem_init_ttm(dev_priv
, VMWGFX_VALIDATION_MEM_GRAN
);
1083 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1084 if (has_sm4_context(dev_priv
) &&
1085 (dev_priv
->capabilities2
& SVGA_CAP2_DX2
)) {
1086 if (vmw_devcap_get(dev_priv
, SVGA3D_DEVCAP_SM41
))
1087 dev_priv
->sm_type
= VMW_SM_4_1
;
1088 if (has_sm4_1_context(dev_priv
) &&
1089 (dev_priv
->capabilities2
& SVGA_CAP2_DX3
)) {
1090 if (vmw_devcap_get(dev_priv
, SVGA3D_DEVCAP_SM5
))
1091 dev_priv
->sm_type
= VMW_SM_5
;
1095 ret
= vmw_kms_init(dev_priv
);
1096 if (unlikely(ret
!= 0))
1098 vmw_overlay_init(dev_priv
);
1100 ret
= vmw_request_device(dev_priv
);
1104 vmw_print_sm_type(dev_priv
);
1105 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1106 VMWGFX_DRIVER_MAJOR
, VMWGFX_DRIVER_MINOR
,
1107 VMWGFX_DRIVER_PATCHLEVEL
, UTS_RELEASE
);
1109 if (dev_priv
->enable_fb
) {
1110 vmw_fifo_resource_inc(dev_priv
);
1111 vmw_svga_enable(dev_priv
);
1112 vmw_fb_init(dev_priv
);
1115 dev_priv
->pm_nb
.notifier_call
= vmwgfx_pm_notifier
;
1116 register_pm_notifier(&dev_priv
->pm_nb
);
1121 vmw_overlay_close(dev_priv
);
1122 vmw_kms_close(dev_priv
);
1124 if (dev_priv
->has_mob
)
1125 vmw_gmrid_man_fini(dev_priv
, VMW_PL_MOB
);
1126 if (dev_priv
->has_gmr
)
1127 vmw_gmrid_man_fini(dev_priv
, VMW_PL_GMR
);
1128 vmw_devcaps_destroy(dev_priv
);
1129 vmw_vram_manager_fini(dev_priv
);
1131 ttm_device_fini(&dev_priv
->bdev
);
1133 vmw_fence_manager_takedown(dev_priv
->fman
);
1135 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
1136 vmw_irq_uninstall(&dev_priv
->drm
);
1138 ttm_object_device_release(&dev_priv
->tdev
);
1140 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
)
1141 idr_destroy(&dev_priv
->res_idr
[i
]);
1143 if (dev_priv
->ctx
.staged_bindings
)
1144 vmw_binding_state_free(dev_priv
->ctx
.staged_bindings
);
1145 out_no_pci_or_version
:
1146 pci_release_regions(pdev
);
1150 static void vmw_driver_unload(struct drm_device
*dev
)
1152 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1153 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1154 enum vmw_res_type i
;
1156 unregister_pm_notifier(&dev_priv
->pm_nb
);
1158 if (dev_priv
->ctx
.res_ht_initialized
)
1159 drm_ht_remove(&dev_priv
->ctx
.res_ht
);
1160 vfree(dev_priv
->ctx
.cmd_bounce
);
1161 if (dev_priv
->enable_fb
) {
1162 vmw_fb_off(dev_priv
);
1163 vmw_fb_close(dev_priv
);
1164 vmw_fifo_resource_dec(dev_priv
);
1165 vmw_svga_disable(dev_priv
);
1168 vmw_kms_close(dev_priv
);
1169 vmw_overlay_close(dev_priv
);
1171 if (dev_priv
->has_gmr
)
1172 vmw_gmrid_man_fini(dev_priv
, VMW_PL_GMR
);
1174 vmw_release_device_early(dev_priv
);
1175 if (dev_priv
->has_mob
)
1176 vmw_gmrid_man_fini(dev_priv
, VMW_PL_MOB
);
1177 vmw_devcaps_destroy(dev_priv
);
1178 vmw_vram_manager_fini(dev_priv
);
1179 ttm_device_fini(&dev_priv
->bdev
);
1180 drm_vma_offset_manager_destroy(&dev_priv
->vma_manager
);
1181 vmw_release_device_late(dev_priv
);
1182 vmw_fence_manager_takedown(dev_priv
->fman
);
1183 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
1184 vmw_irq_uninstall(&dev_priv
->drm
);
1186 ttm_object_device_release(&dev_priv
->tdev
);
1187 if (dev_priv
->ctx
.staged_bindings
)
1188 vmw_binding_state_free(dev_priv
->ctx
.staged_bindings
);
1190 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
)
1191 idr_destroy(&dev_priv
->res_idr
[i
]);
1193 vmw_mksstat_remove_all(dev_priv
);
1195 pci_release_regions(pdev
);
1198 static void vmw_postclose(struct drm_device
*dev
,
1199 struct drm_file
*file_priv
)
1201 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1203 ttm_object_file_release(&vmw_fp
->tfile
);
1207 static int vmw_driver_open(struct drm_device
*dev
, struct drm_file
*file_priv
)
1209 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1210 struct vmw_fpriv
*vmw_fp
;
1213 vmw_fp
= kzalloc(sizeof(*vmw_fp
), GFP_KERNEL
);
1214 if (unlikely(!vmw_fp
))
1217 vmw_fp
->tfile
= ttm_object_file_init(dev_priv
->tdev
, 10);
1218 if (unlikely(vmw_fp
->tfile
== NULL
))
1221 file_priv
->driver_priv
= vmw_fp
;
1230 static long vmw_generic_ioctl(struct file
*filp
, unsigned int cmd
,
1232 long (*ioctl_func
)(struct file
*, unsigned int,
1235 struct drm_file
*file_priv
= filp
->private_data
;
1236 struct drm_device
*dev
= file_priv
->minor
->dev
;
1237 unsigned int nr
= DRM_IOCTL_NR(cmd
);
1241 * Do extra checking on driver private ioctls.
1244 if ((nr
>= DRM_COMMAND_BASE
) && (nr
< DRM_COMMAND_END
)
1245 && (nr
< DRM_COMMAND_BASE
+ dev
->driver
->num_ioctls
)) {
1246 const struct drm_ioctl_desc
*ioctl
=
1247 &vmw_ioctls
[nr
- DRM_COMMAND_BASE
];
1249 if (nr
== DRM_COMMAND_BASE
+ DRM_VMW_EXECBUF
) {
1250 return ioctl_func(filp
, cmd
, arg
);
1251 } else if (nr
== DRM_COMMAND_BASE
+ DRM_VMW_UPDATE_LAYOUT
) {
1252 if (!drm_is_current_master(file_priv
) &&
1253 !capable(CAP_SYS_ADMIN
))
1257 if (unlikely(ioctl
->cmd
!= cmd
))
1258 goto out_io_encoding
;
1260 flags
= ioctl
->flags
;
1261 } else if (!drm_ioctl_flags(nr
, &flags
))
1264 return ioctl_func(filp
, cmd
, arg
);
1267 DRM_ERROR("Invalid command format, ioctl %d\n",
1268 nr
- DRM_COMMAND_BASE
);
1273 static long vmw_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
1276 return vmw_generic_ioctl(filp
, cmd
, arg
, &drm_ioctl
);
1279 #ifdef CONFIG_COMPAT
1280 static long vmw_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1283 return vmw_generic_ioctl(filp
, cmd
, arg
, &drm_compat_ioctl
);
1287 static void vmw_master_set(struct drm_device
*dev
,
1288 struct drm_file
*file_priv
,
1292 * Inform a new master that the layout may have changed while
1296 drm_sysfs_hotplug_event(dev
);
1299 static void vmw_master_drop(struct drm_device
*dev
,
1300 struct drm_file
*file_priv
)
1302 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1304 vmw_kms_legacy_hotspot_clear(dev_priv
);
1305 if (!dev_priv
->enable_fb
)
1306 vmw_svga_disable(dev_priv
);
1310 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1312 * @dev_priv: Pointer to device private struct.
1313 * Needs the reservation sem to be held in non-exclusive mode.
1315 static void __vmw_svga_enable(struct vmw_private
*dev_priv
)
1317 struct ttm_resource_manager
*man
= ttm_manager_type(&dev_priv
->bdev
, TTM_PL_VRAM
);
1319 if (!ttm_resource_manager_used(man
)) {
1320 vmw_write(dev_priv
, SVGA_REG_ENABLE
, SVGA_REG_ENABLE_ENABLE
);
1321 ttm_resource_manager_set_used(man
, true);
1326 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1328 * @dev_priv: Pointer to device private struct.
1330 void vmw_svga_enable(struct vmw_private
*dev_priv
)
1332 __vmw_svga_enable(dev_priv
);
1336 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1338 * @dev_priv: Pointer to device private struct.
1339 * Needs the reservation sem to be held in exclusive mode.
1340 * Will not empty VRAM. VRAM must be emptied by caller.
1342 static void __vmw_svga_disable(struct vmw_private
*dev_priv
)
1344 struct ttm_resource_manager
*man
= ttm_manager_type(&dev_priv
->bdev
, TTM_PL_VRAM
);
1346 if (ttm_resource_manager_used(man
)) {
1347 ttm_resource_manager_set_used(man
, false);
1348 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
1349 SVGA_REG_ENABLE_HIDE
|
1350 SVGA_REG_ENABLE_ENABLE
);
1355 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1358 * @dev_priv: Pointer to device private struct.
1361 void vmw_svga_disable(struct vmw_private
*dev_priv
)
1363 struct ttm_resource_manager
*man
= ttm_manager_type(&dev_priv
->bdev
, TTM_PL_VRAM
);
1365 * Disabling SVGA will turn off device modesetting capabilities, so
1366 * notify KMS about that so that it doesn't cache atomic state that
1367 * isn't valid anymore, for example crtcs turned on.
1368 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1369 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1370 * end up with lock order reversal. Thus, a master may actually perform
1371 * a new modeset just after we call vmw_kms_lost_device() and race with
1372 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1373 * to be inconsistent with the device, causing modesetting problems.
1376 vmw_kms_lost_device(&dev_priv
->drm
);
1377 if (ttm_resource_manager_used(man
)) {
1378 if (ttm_resource_manager_evict_all(&dev_priv
->bdev
, man
))
1379 DRM_ERROR("Failed evicting VRAM buffers.\n");
1380 ttm_resource_manager_set_used(man
, false);
1381 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
1382 SVGA_REG_ENABLE_HIDE
|
1383 SVGA_REG_ENABLE_ENABLE
);
1387 static void vmw_remove(struct pci_dev
*pdev
)
1389 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1391 ttm_mem_global_release(&ttm_mem_glob
);
1392 drm_dev_unregister(dev
);
1393 vmw_driver_unload(dev
);
1396 static unsigned long
1397 vmw_get_unmapped_area(struct file
*file
, unsigned long uaddr
,
1398 unsigned long len
, unsigned long pgoff
,
1399 unsigned long flags
)
1401 struct drm_file
*file_priv
= file
->private_data
;
1402 struct vmw_private
*dev_priv
= vmw_priv(file_priv
->minor
->dev
);
1404 return drm_get_unmapped_area(file
, uaddr
, len
, pgoff
, flags
,
1405 &dev_priv
->vma_manager
);
1408 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
1411 struct vmw_private
*dev_priv
=
1412 container_of(nb
, struct vmw_private
, pm_nb
);
1415 case PM_HIBERNATION_PREPARE
:
1417 * Take the reservation sem in write mode, which will make sure
1418 * there are no other processes holding a buffer object
1419 * reservation, meaning we should be able to evict all buffer
1420 * objects if needed.
1421 * Once user-space processes have been frozen, we can release
1424 dev_priv
->suspend_locked
= true;
1426 case PM_POST_HIBERNATION
:
1427 case PM_POST_RESTORE
:
1428 if (READ_ONCE(dev_priv
->suspend_locked
)) {
1429 dev_priv
->suspend_locked
= false;
1438 static int vmw_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1440 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1441 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1443 if (dev_priv
->refuse_hibernation
)
1446 pci_save_state(pdev
);
1447 pci_disable_device(pdev
);
1448 pci_set_power_state(pdev
, PCI_D3hot
);
1452 static int vmw_pci_resume(struct pci_dev
*pdev
)
1454 pci_set_power_state(pdev
, PCI_D0
);
1455 pci_restore_state(pdev
);
1456 return pci_enable_device(pdev
);
1459 static int vmw_pm_suspend(struct device
*kdev
)
1461 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1462 struct pm_message dummy
;
1466 return vmw_pci_suspend(pdev
, dummy
);
1469 static int vmw_pm_resume(struct device
*kdev
)
1471 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1473 return vmw_pci_resume(pdev
);
1476 static int vmw_pm_freeze(struct device
*kdev
)
1478 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1479 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1480 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1481 struct ttm_operation_ctx ctx
= {
1482 .interruptible
= false,
1483 .no_wait_gpu
= false
1488 * No user-space processes should be running now.
1490 ret
= vmw_kms_suspend(&dev_priv
->drm
);
1492 DRM_ERROR("Failed to freeze modesetting.\n");
1495 if (dev_priv
->enable_fb
)
1496 vmw_fb_off(dev_priv
);
1498 vmw_execbuf_release_pinned_bo(dev_priv
);
1499 vmw_resource_evict_all(dev_priv
);
1500 vmw_release_device_early(dev_priv
);
1501 while (ttm_device_swapout(&dev_priv
->bdev
, &ctx
, GFP_KERNEL
) > 0);
1502 if (dev_priv
->enable_fb
)
1503 vmw_fifo_resource_dec(dev_priv
);
1504 if (atomic_read(&dev_priv
->num_fifo_resources
) != 0) {
1505 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1506 if (dev_priv
->enable_fb
)
1507 vmw_fifo_resource_inc(dev_priv
);
1508 WARN_ON(vmw_request_device_late(dev_priv
));
1509 dev_priv
->suspend_locked
= false;
1510 if (dev_priv
->suspend_state
)
1511 vmw_kms_resume(dev
);
1512 if (dev_priv
->enable_fb
)
1513 vmw_fb_on(dev_priv
);
1517 vmw_fence_fifo_down(dev_priv
->fman
);
1518 __vmw_svga_disable(dev_priv
);
1520 vmw_release_device_late(dev_priv
);
1524 static int vmw_pm_restore(struct device
*kdev
)
1526 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1527 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1528 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1531 vmw_detect_version(dev_priv
);
1533 if (dev_priv
->enable_fb
)
1534 vmw_fifo_resource_inc(dev_priv
);
1536 ret
= vmw_request_device(dev_priv
);
1540 if (dev_priv
->enable_fb
)
1541 __vmw_svga_enable(dev_priv
);
1543 vmw_fence_fifo_up(dev_priv
->fman
);
1544 dev_priv
->suspend_locked
= false;
1545 if (dev_priv
->suspend_state
)
1546 vmw_kms_resume(&dev_priv
->drm
);
1548 if (dev_priv
->enable_fb
)
1549 vmw_fb_on(dev_priv
);
1554 static const struct dev_pm_ops vmw_pm_ops
= {
1555 .freeze
= vmw_pm_freeze
,
1556 .thaw
= vmw_pm_restore
,
1557 .restore
= vmw_pm_restore
,
1558 .suspend
= vmw_pm_suspend
,
1559 .resume
= vmw_pm_resume
,
1562 static const struct file_operations vmwgfx_driver_fops
= {
1563 .owner
= THIS_MODULE
,
1565 .release
= drm_release
,
1566 .unlocked_ioctl
= vmw_unlocked_ioctl
,
1570 #if defined(CONFIG_COMPAT)
1571 .compat_ioctl
= vmw_compat_ioctl
,
1573 .llseek
= noop_llseek
,
1574 .get_unmapped_area
= vmw_get_unmapped_area
,
1577 static const struct drm_driver driver
= {
1579 DRIVER_MODESET
| DRIVER_RENDER
| DRIVER_ATOMIC
,
1580 .ioctls
= vmw_ioctls
,
1581 .num_ioctls
= ARRAY_SIZE(vmw_ioctls
),
1582 .master_set
= vmw_master_set
,
1583 .master_drop
= vmw_master_drop
,
1584 .open
= vmw_driver_open
,
1585 .postclose
= vmw_postclose
,
1587 .dumb_create
= vmw_dumb_create
,
1588 .dumb_map_offset
= vmw_dumb_map_offset
,
1589 .dumb_destroy
= vmw_dumb_destroy
,
1591 .prime_fd_to_handle
= vmw_prime_fd_to_handle
,
1592 .prime_handle_to_fd
= vmw_prime_handle_to_fd
,
1594 .fops
= &vmwgfx_driver_fops
,
1595 .name
= VMWGFX_DRIVER_NAME
,
1596 .desc
= VMWGFX_DRIVER_DESC
,
1597 .date
= VMWGFX_DRIVER_DATE
,
1598 .major
= VMWGFX_DRIVER_MAJOR
,
1599 .minor
= VMWGFX_DRIVER_MINOR
,
1600 .patchlevel
= VMWGFX_DRIVER_PATCHLEVEL
1603 static struct pci_driver vmw_pci_driver
= {
1604 .name
= VMWGFX_DRIVER_NAME
,
1605 .id_table
= vmw_pci_id_list
,
1607 .remove
= vmw_remove
,
1613 static int vmw_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1615 struct vmw_private
*vmw
;
1618 ret
= drm_aperture_remove_conflicting_pci_framebuffers(pdev
, &driver
);
1622 ret
= pcim_enable_device(pdev
);
1626 vmw
= devm_drm_dev_alloc(&pdev
->dev
, &driver
,
1627 struct vmw_private
, drm
);
1629 return PTR_ERR(vmw
);
1631 pci_set_drvdata(pdev
, &vmw
->drm
);
1633 ret
= ttm_mem_global_init(&ttm_mem_glob
, &pdev
->dev
);
1637 ret
= vmw_driver_load(vmw
, ent
->device
);
1641 ret
= drm_dev_register(&vmw
->drm
, 0);
1643 vmw_driver_unload(&vmw
->drm
);
1650 static int __init
vmwgfx_init(void)
1654 if (vgacon_text_force())
1657 ret
= pci_register_driver(&vmw_pci_driver
);
1659 DRM_ERROR("Failed initializing DRM.\n");
1663 static void __exit
vmwgfx_exit(void)
1665 pci_unregister_driver(&vmw_pci_driver
);
1668 module_init(vmwgfx_init
);
1669 module_exit(vmwgfx_exit
);
1671 MODULE_AUTHOR("VMware Inc. and others");
1672 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1673 MODULE_LICENSE("GPL and additional rights");
1674 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR
) "."
1675 __stringify(VMWGFX_DRIVER_MINOR
) "."
1676 __stringify(VMWGFX_DRIVER_PATCHLEVEL
) "."