]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drm/ttm: move swapout logic around v3
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
CommitLineData
dff96888 1// SPDX-License-Identifier: GPL-2.0 OR MIT
fb1d9738
JB
2/**************************************************************************
3 *
dff96888 4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
fb1d9738
JB
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
6ae8748b 27
96c5d076 28#include <linux/console.h>
9ddac734 29#include <linux/dma-mapping.h>
6ae8748b 30#include <linux/module.h>
36891da8 31#include <linux/pci.h>
3b0d6458 32#include <linux/mem_encrypt.h>
fb1d9738 33
6ae8748b 34#include <drm/drm_drv.h>
31856c8c 35#include <drm/drm_fb_helper.h>
6ae8748b 36#include <drm/drm_ioctl.h>
6ae8748b 37#include <drm/drm_sysfs.h>
760285e7 38#include <drm/ttm/ttm_bo_driver.h>
6ae8748b
SR
39#include <drm/ttm/ttm_placement.h>
40
41#include "ttm_object.h"
42#include "vmwgfx_binding.h"
43#include "vmwgfx_drv.h"
fb1d9738 44
fb1d9738 45#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
fb1d9738 46
eb4f923b
JB
47#define VMW_MIN_INITIAL_WIDTH 800
48#define VMW_MIN_INITIAL_HEIGHT 600
49
fd567467
TH
50#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
51
eb4f923b 52
fb1d9738
JB
53/**
54 * Fully encoded drm commands. Might move to vmw_drm.h
55 */
56
57#define DRM_IOCTL_VMW_GET_PARAM \
58 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
59 struct drm_vmw_getparam_arg)
60#define DRM_IOCTL_VMW_ALLOC_DMABUF \
61 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
62 union drm_vmw_alloc_dmabuf_arg)
63#define DRM_IOCTL_VMW_UNREF_DMABUF \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
65 struct drm_vmw_unref_dmabuf_arg)
66#define DRM_IOCTL_VMW_CURSOR_BYPASS \
67 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
68 struct drm_vmw_cursor_bypass_arg)
69
70#define DRM_IOCTL_VMW_CONTROL_STREAM \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
72 struct drm_vmw_control_stream_arg)
73#define DRM_IOCTL_VMW_CLAIM_STREAM \
74 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
75 struct drm_vmw_stream_arg)
76#define DRM_IOCTL_VMW_UNREF_STREAM \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
78 struct drm_vmw_stream_arg)
79
80#define DRM_IOCTL_VMW_CREATE_CONTEXT \
81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
82 struct drm_vmw_context_arg)
83#define DRM_IOCTL_VMW_UNREF_CONTEXT \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
85 struct drm_vmw_context_arg)
86#define DRM_IOCTL_VMW_CREATE_SURFACE \
87 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
88 union drm_vmw_surface_create_arg)
89#define DRM_IOCTL_VMW_UNREF_SURFACE \
90 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
91 struct drm_vmw_surface_arg)
92#define DRM_IOCTL_VMW_REF_SURFACE \
93 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
94 union drm_vmw_surface_reference_arg)
95#define DRM_IOCTL_VMW_EXECBUF \
96 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
97 struct drm_vmw_execbuf_arg)
ae2a1040
TH
98#define DRM_IOCTL_VMW_GET_3D_CAP \
99 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
100 struct drm_vmw_get_3d_cap_arg)
fb1d9738
JB
101#define DRM_IOCTL_VMW_FENCE_WAIT \
102 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
103 struct drm_vmw_fence_wait_arg)
ae2a1040
TH
104#define DRM_IOCTL_VMW_FENCE_SIGNALED \
105 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
106 struct drm_vmw_fence_signaled_arg)
107#define DRM_IOCTL_VMW_FENCE_UNREF \
108 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
109 struct drm_vmw_fence_arg)
57c5ee79
TH
110#define DRM_IOCTL_VMW_FENCE_EVENT \
111 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
112 struct drm_vmw_fence_event_arg)
2fcd5a73
JB
113#define DRM_IOCTL_VMW_PRESENT \
114 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
115 struct drm_vmw_present_arg)
116#define DRM_IOCTL_VMW_PRESENT_READBACK \
117 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
118 struct drm_vmw_present_readback_arg)
cd2b89e7
TH
119#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
120 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
121 struct drm_vmw_update_layout_arg)
c74c162f
TH
122#define DRM_IOCTL_VMW_CREATE_SHADER \
123 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
124 struct drm_vmw_shader_create_arg)
125#define DRM_IOCTL_VMW_UNREF_SHADER \
126 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
127 struct drm_vmw_shader_arg)
a97e2192
TH
128#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
129 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
130 union drm_vmw_gb_surface_create_arg)
131#define DRM_IOCTL_VMW_GB_SURFACE_REF \
132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
133 union drm_vmw_gb_surface_reference_arg)
1d7a5cbf
TH
134#define DRM_IOCTL_VMW_SYNCCPU \
135 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
136 struct drm_vmw_synccpu_arg)
d80efd5c
TH
137#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
138 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
139 struct drm_vmw_context_arg)
14b1c33e
DR
140#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
141 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
142 union drm_vmw_gb_surface_create_ext_arg)
143#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
144 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
145 union drm_vmw_gb_surface_reference_ext_arg)
cb92a323
RS
146#define DRM_IOCTL_VMW_MSG \
147 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
148 struct drm_vmw_msg_arg)
fb1d9738 149
e68cefd1 150/*
fb1d9738
JB
151 * The core DRM version of this macro doesn't account for
152 * DRM_COMMAND_BASE.
153 */
154
155#define VMW_IOCTL_DEF(ioctl, func, flags) \
7e7392a6 156 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
fb1d9738 157
e68cefd1 158/*
fb1d9738
JB
159 * Ioctl definitions.
160 */
161
baa70943 162static const struct drm_ioctl_desc vmw_ioctls[] = {
1b2f1489 163 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
0d4c19f9 164 DRM_RENDER_ALLOW),
f1d34bfd 165 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
0d4c19f9 166 DRM_RENDER_ALLOW),
f1d34bfd 167 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
f8c47144 168 DRM_RENDER_ALLOW),
1b2f1489 169 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
e1f78003 170 vmw_kms_cursor_bypass_ioctl,
190c462d 171 DRM_MASTER),
fb1d9738 172
1b2f1489 173 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
190c462d 174 DRM_MASTER),
1b2f1489 175 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
190c462d 176 DRM_MASTER),
1b2f1489 177 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
190c462d 178 DRM_MASTER),
fb1d9738 179
1b2f1489 180 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
0d4c19f9 181 DRM_RENDER_ALLOW),
1b2f1489 182 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
f8c47144 183 DRM_RENDER_ALLOW),
1b2f1489 184 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
0d4c19f9 185 DRM_RENDER_ALLOW),
1b2f1489 186 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
f8c47144 187 DRM_RENDER_ALLOW),
1b2f1489 188 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
0d4c19f9
EV
189 DRM_RENDER_ALLOW),
190 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
d80efd5c 191 DRM_RENDER_ALLOW),
ae2a1040 192 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
f8c47144 193 DRM_RENDER_ALLOW),
ae2a1040
TH
194 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
195 vmw_fence_obj_signaled_ioctl,
f8c47144 196 DRM_RENDER_ALLOW),
ae2a1040 197 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
f8c47144 198 DRM_RENDER_ALLOW),
03f80263 199 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
0d4c19f9 200 DRM_RENDER_ALLOW),
f63f6a59 201 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
0d4c19f9 202 DRM_RENDER_ALLOW),
2fcd5a73
JB
203
204 /* these allow direct access to the framebuffers mark as master only */
205 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
f8c47144 206 DRM_MASTER | DRM_AUTH),
2fcd5a73
JB
207 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
208 vmw_present_readback_ioctl,
f8c47144 209 DRM_MASTER | DRM_AUTH),
31788ca8
TH
210 /*
211 * The permissions of the below ioctl are overridden in
212 * vmw_generic_ioctl(). We require either
213 * DRM_MASTER or capable(CAP_SYS_ADMIN).
214 */
cd2b89e7
TH
215 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
216 vmw_kms_update_layout_ioctl,
31788ca8 217 DRM_RENDER_ALLOW),
c74c162f
TH
218 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
219 vmw_shader_define_ioctl,
0d4c19f9 220 DRM_RENDER_ALLOW),
c74c162f
TH
221 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
222 vmw_shader_destroy_ioctl,
f8c47144 223 DRM_RENDER_ALLOW),
a97e2192
TH
224 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
225 vmw_gb_surface_define_ioctl,
0d4c19f9 226 DRM_RENDER_ALLOW),
a97e2192
TH
227 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
228 vmw_gb_surface_reference_ioctl,
0d4c19f9 229 DRM_RENDER_ALLOW),
1d7a5cbf 230 VMW_IOCTL_DEF(VMW_SYNCCPU,
f1d34bfd 231 vmw_user_bo_synccpu_ioctl,
f8c47144 232 DRM_RENDER_ALLOW),
d80efd5c
TH
233 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
234 vmw_extended_context_define_ioctl,
0d4c19f9 235 DRM_RENDER_ALLOW),
14b1c33e
DR
236 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
237 vmw_gb_surface_define_ext_ioctl,
0d4c19f9 238 DRM_RENDER_ALLOW),
14b1c33e
DR
239 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
240 vmw_gb_surface_reference_ext_ioctl,
0d4c19f9 241 DRM_RENDER_ALLOW),
cb92a323
RS
242 VMW_IOCTL_DEF(VMW_MSG,
243 vmw_msg_ioctl,
244 DRM_RENDER_ALLOW),
fb1d9738
JB
245};
246
8046306f 247static const struct pci_device_id vmw_pci_id_list[] = {
8772c0bb
ZR
248 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
249 { }
fb1d9738 250};
c4903429 251MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
fb1d9738 252
5d2afab9 253static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
d92d9851
TH
254static int vmw_force_iommu;
255static int vmw_restrict_iommu;
256static int vmw_force_coherent;
0d00c488 257static int vmw_restrict_dma_mask;
04319d89 258static int vmw_assume_16bpp;
fb1d9738
JB
259
260static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
d9f36a00
TH
261static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
262 void *ptr);
fb1d9738 263
30c78bb8 264MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
50f83737 265module_param_named(enable_fbdev, enable_fbdev, int, 0600);
d92d9851 266MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
50f83737 267module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
d92d9851 268MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
50f83737 269module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
d92d9851 270MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
50f83737 271module_param_named(force_coherent, vmw_force_coherent, int, 0600);
0d00c488 272MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
7a9d2001 273module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
04319d89
SY
274MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
275module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
d92d9851 276
30c78bb8 277
3b4c2511
NB
278static void vmw_print_capabilities2(uint32_t capabilities2)
279{
280 DRM_INFO("Capabilities2:\n");
281 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
282 DRM_INFO(" Grow oTable.\n");
283 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
284 DRM_INFO(" IntraSurface copy.\n");
4dec2805
DR
285 if (capabilities2 & SVGA_CAP2_DX3)
286 DRM_INFO(" DX3.\n");
3b4c2511
NB
287}
288
fb1d9738
JB
289static void vmw_print_capabilities(uint32_t capabilities)
290{
291 DRM_INFO("Capabilities:\n");
292 if (capabilities & SVGA_CAP_RECT_COPY)
293 DRM_INFO(" Rect copy.\n");
294 if (capabilities & SVGA_CAP_CURSOR)
295 DRM_INFO(" Cursor.\n");
296 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
297 DRM_INFO(" Cursor bypass.\n");
298 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
299 DRM_INFO(" Cursor bypass 2.\n");
300 if (capabilities & SVGA_CAP_8BIT_EMULATION)
301 DRM_INFO(" 8bit emulation.\n");
302 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
303 DRM_INFO(" Alpha cursor.\n");
304 if (capabilities & SVGA_CAP_3D)
305 DRM_INFO(" 3D.\n");
306 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
307 DRM_INFO(" Extended Fifo.\n");
308 if (capabilities & SVGA_CAP_MULTIMON)
309 DRM_INFO(" Multimon.\n");
310 if (capabilities & SVGA_CAP_PITCHLOCK)
311 DRM_INFO(" Pitchlock.\n");
312 if (capabilities & SVGA_CAP_IRQMASK)
313 DRM_INFO(" Irq mask.\n");
314 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
315 DRM_INFO(" Display Topology.\n");
316 if (capabilities & SVGA_CAP_GMR)
317 DRM_INFO(" GMR.\n");
318 if (capabilities & SVGA_CAP_TRACES)
319 DRM_INFO(" Traces.\n");
dcca2862
TH
320 if (capabilities & SVGA_CAP_GMR2)
321 DRM_INFO(" GMR2.\n");
322 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
323 DRM_INFO(" Screen Object 2.\n");
c1234db7
TH
324 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
325 DRM_INFO(" Command Buffers.\n");
326 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
327 DRM_INFO(" Command Buffers 2.\n");
328 if (capabilities & SVGA_CAP_GBOBJECTS)
329 DRM_INFO(" Guest Backed Resources.\n");
8ce75f8a
SY
330 if (capabilities & SVGA_CAP_DX)
331 DRM_INFO(" DX Features.\n");
dc366364
TH
332 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
333 DRM_INFO(" HP Command Queue.\n");
fb1d9738
JB
334}
335
e2fa3a76 336/**
4b9e45e6 337 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
e2fa3a76 338 *
4b9e45e6 339 * @dev_priv: A device private structure.
e2fa3a76 340 *
4b9e45e6
TH
341 * This function creates a small buffer object that holds the query
342 * result for dummy queries emitted as query barriers.
343 * The function will then map the first page and initialize a pending
344 * occlusion query result structure, Finally it will unmap the buffer.
345 * No interruptible waits are done within this function.
e2fa3a76 346 *
4b9e45e6 347 * Returns an error if bo creation or initialization fails.
e2fa3a76 348 */
4b9e45e6 349static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
e2fa3a76 350{
4b9e45e6 351 int ret;
f1d34bfd 352 struct vmw_buffer_object *vbo;
e2fa3a76
TH
353 struct ttm_bo_kmap_obj map;
354 volatile SVGA3dQueryResult *result;
355 bool dummy;
e2fa3a76 356
4b9e45e6 357 /*
459d0fa7 358 * Create the vbo as pinned, so that a tryreserve will
4b9e45e6
TH
359 * immediately succeed. This is because we're the only
360 * user of the bo currently.
361 */
459d0fa7
TH
362 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
363 if (!vbo)
364 return -ENOMEM;
4b9e45e6 365
f1d34bfd 366 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
fbe86ca5 367 &vmw_sys_placement, false, true,
f1d34bfd 368 &vmw_bo_bo_free);
e2fa3a76 369 if (unlikely(ret != 0))
4b9e45e6
TH
370 return ret;
371
dfd5e50e 372 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
4b9e45e6 373 BUG_ON(ret != 0);
459d0fa7 374 vmw_bo_pin_reserved(vbo, true);
e2fa3a76 375
459d0fa7 376 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
e2fa3a76
TH
377 if (likely(ret == 0)) {
378 result = ttm_kmap_obj_virtual(&map, &dummy);
379 result->totalSize = sizeof(*result);
380 result->state = SVGA3D_QUERYSTATE_PENDING;
381 result->result32 = 0xff;
382 ttm_bo_kunmap(&map);
4b9e45e6 383 }
459d0fa7
TH
384 vmw_bo_pin_reserved(vbo, false);
385 ttm_bo_unreserve(&vbo->base);
e2fa3a76 386
4b9e45e6
TH
387 if (unlikely(ret != 0)) {
388 DRM_ERROR("Dummy query buffer map failed.\n");
f1d34bfd 389 vmw_bo_unreference(&vbo);
4b9e45e6 390 } else
459d0fa7 391 dev_priv->dummy_query_bo = vbo;
e2fa3a76 392
4b9e45e6 393 return ret;
e2fa3a76
TH
394}
395
153b3d5b
TH
396/**
397 * vmw_request_device_late - Perform late device setup
398 *
399 * @dev_priv: Pointer to device private.
400 *
401 * This function performs setup of otables and enables large command
402 * buffer submission. These tasks are split out to a separate function
403 * because it reverts vmw_release_device_early and is intended to be used
404 * by an error path in the hibernation code.
405 */
406static int vmw_request_device_late(struct vmw_private *dev_priv)
fb1d9738
JB
407{
408 int ret;
409
3530bdc3
TH
410 if (dev_priv->has_mob) {
411 ret = vmw_otables_setup(dev_priv);
412 if (unlikely(ret != 0)) {
413 DRM_ERROR("Unable to initialize "
414 "guest Memory OBjects.\n");
153b3d5b 415 return ret;
3530bdc3
TH
416 }
417 }
153b3d5b 418
3eab3d9e 419 if (dev_priv->cman) {
8426ed9c 420 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
3eab3d9e
TH
421 if (ret) {
422 struct vmw_cmdbuf_man *man = dev_priv->cman;
423
424 dev_priv->cman = NULL;
425 vmw_cmdbuf_man_destroy(man);
426 }
427 }
428
153b3d5b
TH
429 return 0;
430}
431
fb1d9738
JB
432static int vmw_request_device(struct vmw_private *dev_priv)
433{
434 int ret;
435
fb1d9738
JB
436 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("Unable to initialize FIFO.\n");
439 return ret;
440 }
ae2a1040 441 vmw_fence_fifo_up(dev_priv->fman);
3eab3d9e 442 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
d80efd5c 443 if (IS_ERR(dev_priv->cman)) {
3eab3d9e 444 dev_priv->cman = NULL;
878c6ecd 445 dev_priv->sm_type = VMW_SM_LEGACY;
3530bdc3 446 }
153b3d5b
TH
447
448 ret = vmw_request_device_late(dev_priv);
449 if (ret)
450 goto out_no_mob;
451
e2fa3a76
TH
452 ret = vmw_dummy_query_bo_create(dev_priv);
453 if (unlikely(ret != 0))
454 goto out_no_query_bo;
fb1d9738
JB
455
456 return 0;
e2fa3a76
TH
457
458out_no_query_bo:
3eab3d9e
TH
459 if (dev_priv->cman)
460 vmw_cmdbuf_remove_pool(dev_priv->cman);
153b3d5b 461 if (dev_priv->has_mob) {
4ce032d6
CK
462 struct ttm_resource_manager *man;
463
464 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
465 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
3530bdc3 466 vmw_otables_takedown(dev_priv);
153b3d5b 467 }
3eab3d9e
TH
468 if (dev_priv->cman)
469 vmw_cmdbuf_man_destroy(dev_priv->cman);
3530bdc3 470out_no_mob:
e2fa3a76
TH
471 vmw_fence_fifo_down(dev_priv->fman);
472 vmw_fifo_release(dev_priv, &dev_priv->fifo);
473 return ret;
fb1d9738
JB
474}
475
153b3d5b
TH
476/**
477 * vmw_release_device_early - Early part of fifo takedown.
478 *
479 * @dev_priv: Pointer to device private struct.
480 *
481 * This is the first part of command submission takedown, to be called before
482 * buffer management is taken down.
483 */
484static void vmw_release_device_early(struct vmw_private *dev_priv)
fb1d9738 485{
e2fa3a76
TH
486 /*
487 * Previous destructions should've released
488 * the pinned bo.
489 */
490
491 BUG_ON(dev_priv->pinned_bo != NULL);
492
f1d34bfd 493 vmw_bo_unreference(&dev_priv->dummy_query_bo);
3eab3d9e
TH
494 if (dev_priv->cman)
495 vmw_cmdbuf_remove_pool(dev_priv->cman);
30c78bb8 496
153b3d5b 497 if (dev_priv->has_mob) {
4ce032d6
CK
498 struct ttm_resource_manager *man;
499
500 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
501 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
3530bdc3 502 vmw_otables_takedown(dev_priv);
30c78bb8 503 }
fb1d9738
JB
504}
505
05730b32 506/**
153b3d5b
TH
507 * vmw_release_device_late - Late part of fifo takedown.
508 *
509 * @dev_priv: Pointer to device private struct.
510 *
511 * This is the last part of the command submission takedown, to be called when
512 * command submission is no longer needed. It may wait on pending fences.
05730b32 513 */
153b3d5b 514static void vmw_release_device_late(struct vmw_private *dev_priv)
30c78bb8 515{
153b3d5b 516 vmw_fence_fifo_down(dev_priv->fman);
3eab3d9e
TH
517 if (dev_priv->cman)
518 vmw_cmdbuf_man_destroy(dev_priv->cman);
30c78bb8 519
153b3d5b 520 vmw_fifo_release(dev_priv, &dev_priv->fifo);
30c78bb8
TH
521}
522
e68cefd1 523/*
eb4f923b
JB
524 * Sets the initial_[width|height] fields on the given vmw_private.
525 *
526 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
67d4a87b
TH
527 * clamping the value to fb_max_[width|height] fields and the
528 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
529 * If the values appear to be invalid, set them to
eb4f923b
JB
530 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
531 */
532static void vmw_get_initial_size(struct vmw_private *dev_priv)
533{
534 uint32_t width;
535 uint32_t height;
536
537 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
538 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
539
540 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
eb4f923b 541 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
67d4a87b
TH
542
543 if (width > dev_priv->fb_max_width ||
544 height > dev_priv->fb_max_height) {
545
546 /*
547 * This is a host error and shouldn't occur.
548 */
549
550 width = VMW_MIN_INITIAL_WIDTH;
551 height = VMW_MIN_INITIAL_HEIGHT;
552 }
eb4f923b
JB
553
554 dev_priv->initial_width = width;
555 dev_priv->initial_height = height;
556}
557
d92d9851
TH
558/**
559 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
560 * system.
561 *
562 * @dev_priv: Pointer to a struct vmw_private
563 *
81103355
TH
564 * This functions tries to determine what actions need to be taken by the
565 * driver to make system pages visible to the device.
d92d9851
TH
566 * If this function decides that DMA is not possible, it returns -EINVAL.
567 * The driver may then try to disable features of the device that require
568 * DMA.
569 */
570static int vmw_dma_select_mode(struct vmw_private *dev_priv)
571{
d92d9851
TH
572 static const char *names[vmw_dma_map_max] = {
573 [vmw_dma_phys] = "Using physical TTM page addresses.",
574 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
81103355 575 [vmw_dma_map_populate] = "Caching DMA mappings.",
d92d9851
TH
576 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
577
3b0d6458
TH
578 /* TTM currently doesn't fully support SEV encryption. */
579 if (mem_encrypt_active())
580 return -EINVAL;
581
05f9467e
CH
582 if (vmw_force_coherent)
583 dev_priv->map_mode = vmw_dma_alloc_coherent;
81103355
TH
584 else if (vmw_restrict_iommu)
585 dev_priv->map_mode = vmw_dma_map_bind;
2b3cd624 586 else
d92d9851 587 dev_priv->map_mode = vmw_dma_map_populate;
d92d9851 588
d92d9851 589 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
d92d9851
TH
590 return 0;
591}
592
0d00c488
TH
593/**
594 * vmw_dma_masks - set required page- and dma masks
595 *
e68cefd1 596 * @dev_priv: Pointer to struct drm-device
0d00c488
TH
597 *
598 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
599 * restriction also for 64-bit systems.
600 */
0d00c488
TH
601static int vmw_dma_masks(struct vmw_private *dev_priv)
602{
9703bb32 603 struct drm_device *dev = &dev_priv->drm;
4cbfa1e6 604 int ret = 0;
0d00c488 605
4cbfa1e6
TH
606 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
607 if (dev_priv->map_mode != vmw_dma_phys &&
0d00c488
TH
608 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
609 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
4cbfa1e6 610 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
0d00c488 611 }
4cbfa1e6
TH
612
613 return ret;
0d00c488 614}
0d00c488 615
252f8d7b
DA
616static int vmw_vram_manager_init(struct vmw_private *dev_priv)
617{
618 int ret;
619#ifdef CONFIG_TRANSPARENT_HUGEPAGE
620 ret = vmw_thp_init(dev_priv);
621#else
9c3006a4 622 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
0fe438ce 623 dev_priv->vram_size >> PAGE_SHIFT);
252f8d7b 624#endif
9de59bc2 625 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
252f8d7b
DA
626 return ret;
627}
e0830704
DA
628
629static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
630{
631#ifdef CONFIG_TRANSPARENT_HUGEPAGE
632 vmw_thp_fini(dev_priv);
633#else
a3431602 634 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
e0830704
DA
635#endif
636}
637
8772c0bb
ZR
638static int vmw_setup_pci_resources(struct vmw_private *dev,
639 unsigned long pci_id)
fb1d9738 640{
8772c0bb
ZR
641 resource_size_t fifo_start;
642 resource_size_t fifo_size;
fb1d9738 643 int ret;
8772c0bb
ZR
644 struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
645
646 pci_set_master(pdev);
647
648 ret = pci_request_regions(pdev, "vmwgfx probe");
649 if (ret)
650 return ret;
651
652 dev->io_start = pci_resource_start(pdev, 0);
653 dev->vram_start = pci_resource_start(pdev, 1);
654 dev->vram_size = pci_resource_len(pdev, 1);
655 fifo_start = pci_resource_start(pdev, 2);
656 fifo_size = pci_resource_len(pdev, 2);
657
658 DRM_INFO("FIFO at %pa size is %llu kiB\n",
659 &fifo_start, (uint64_t)fifo_size / 1024);
660 dev->fifo_mem = devm_memremap(dev->drm.dev,
661 fifo_start,
662 fifo_size,
663 MEMREMAP_WB);
664
f3ebd4e6 665 if (IS_ERR(dev->fifo_mem)) {
8772c0bb 666 DRM_ERROR("Failed mapping FIFO memory.\n");
f3ebd4e6
DC
667 pci_release_regions(pdev);
668 return PTR_ERR(dev->fifo_mem);
8772c0bb
ZR
669 }
670
671 /*
672 * This is approximate size of the vram, the exact size will only
673 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
674 * size will be equal to or bigger than the size reported by
675 * SVGA_REG_VRAM_SIZE.
676 */
677 DRM_INFO("VRAM at %pa size is %llu kiB\n",
678 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
679
680 return 0;
681}
682
683static int vmw_detect_version(struct vmw_private *dev)
684{
c188660f 685 uint32_t svga_id;
8772c0bb
ZR
686
687 vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
688 svga_id = vmw_read(dev, SVGA_REG_ID);
689 if (svga_id != SVGA_ID_2) {
690 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
691 svga_id, dev->vmw_chipset);
692 return -ENOSYS;
693 }
694 return 0;
695}
696
697static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
698{
699 int ret;
c0951b79 700 enum vmw_res_type i;
d92d9851 701 bool refuse_dma = false;
f9217913 702 char host_log[100] = {0};
9703bb32 703 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
fb1d9738 704
8772c0bb 705 dev_priv->vmw_chipset = pci_id;
6bcd8d3c 706 dev_priv->last_read_seqno = (uint32_t) -100;
8772c0bb
ZR
707 dev_priv->drm.dev_private = dev_priv;
708
709 ret = vmw_setup_pci_resources(dev_priv, pci_id);
710 if (ret)
711 return ret;
712 ret = vmw_detect_version(dev_priv);
713 if (ret)
75ec69c7 714 goto out_no_pci_or_version;
8772c0bb 715
fb1d9738 716 mutex_init(&dev_priv->cmdbuf_mutex);
30c78bb8 717 mutex_init(&dev_priv->release_mutex);
173fb7d4 718 mutex_init(&dev_priv->binding_mutex);
93cd1681 719 mutex_init(&dev_priv->global_kms_state_mutex);
294adf7d 720 ttm_lock_init(&dev_priv->reservation_sem);
13289241 721 spin_lock_init(&dev_priv->resource_lock);
496eb6fd
TH
722 spin_lock_init(&dev_priv->hw_lock);
723 spin_lock_init(&dev_priv->waiter_lock);
724 spin_lock_init(&dev_priv->cap_lock);
36cc79bc 725 spin_lock_init(&dev_priv->cursor_lock);
c0951b79
TH
726
727 for (i = vmw_res_context; i < vmw_res_max; ++i) {
728 idr_init(&dev_priv->res_idr[i]);
729 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
730 }
731
fb1d9738
JB
732 init_waitqueue_head(&dev_priv->fence_queue);
733 init_waitqueue_head(&dev_priv->fifo_queue);
4f73a96b 734 dev_priv->fence_queue_waiters = 0;
d2e8851a 735 dev_priv->fifo_queue_waiters = 0;
c0951b79 736
5bb39e81 737 dev_priv->used_memory_size = 0;
fb1d9738 738
04319d89
SY
739 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
740
30c78bb8
TH
741 dev_priv->enable_fb = enable_fbdev;
742
c188660f 743
fb1d9738 744 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
3b4c2511
NB
745
746 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
747 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
748 }
749
750
d92d9851
TH
751 ret = vmw_dma_select_mode(dev_priv);
752 if (unlikely(ret != 0)) {
81a00960 753 DRM_INFO("Restricting capabilities since DMA not available.\n");
d92d9851 754 refuse_dma = true;
81a00960
TH
755 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
756 DRM_INFO("Disabling 3D acceleration.\n");
d92d9851 757 }
fb1d9738 758
5bb39e81 759 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
be4f77ac 760 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
5bb39e81
TH
761 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
762 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
eb4f923b
JB
763
764 vmw_get_initial_size(dev_priv);
765
0d00c488 766 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
fb1d9738
JB
767 dev_priv->max_gmr_ids =
768 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
fb17f189
TH
769 dev_priv->max_gmr_pages =
770 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
771 dev_priv->memory_size =
772 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
5bb39e81
TH
773 dev_priv->memory_size -= dev_priv->vram_size;
774 } else {
775 /*
776 * An arbitrary limit of 512MiB on surface
777 * memory. But all HWV8 hardware supports GMR2.
778 */
779 dev_priv->memory_size = 512*1024*1024;
fb17f189 780 }
6da768aa 781 dev_priv->max_mob_pages = 0;
857aea1c 782 dev_priv->max_mob_size = 0;
6da768aa 783 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
7ebb47c9
DR
784 uint64_t mem_size;
785
786 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
787 mem_size = vmw_read(dev_priv,
788 SVGA_REG_GBOBJECT_MEM_SIZE_KB);
789 else
790 mem_size =
791 vmw_read(dev_priv,
792 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
6da768aa 793
7c20d213
SY
794 /*
795 * Workaround for low memory 2D VMs to compensate for the
796 * allocation taken by fbdev
797 */
798 if (!(dev_priv->capabilities & SVGA_CAP_3D))
cef75036 799 mem_size *= 3;
7c20d213 800
6da768aa 801 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
afb0e50f
TH
802 dev_priv->prim_bb_mem =
803 vmw_read(dev_priv,
804 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
857aea1c
CL
805 dev_priv->max_mob_size =
806 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
35c05125
SY
807 dev_priv->stdu_max_width =
808 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
809 dev_priv->stdu_max_height =
810 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
811
812 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
813 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
814 dev_priv->texture_max_width = vmw_read(dev_priv,
815 SVGA_REG_DEV_CAP);
816 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
817 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
818 dev_priv->texture_max_height = vmw_read(dev_priv,
819 SVGA_REG_DEV_CAP);
df45e9d4
TH
820 } else {
821 dev_priv->texture_max_width = 8192;
822 dev_priv->texture_max_height = 8192;
afb0e50f 823 dev_priv->prim_bb_mem = dev_priv->vram_size;
df45e9d4
TH
824 }
825
35c05125 826 vmw_print_capabilities(dev_priv->capabilities);
3b4c2511
NB
827 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
828 vmw_print_capabilities2(dev_priv->capabilities2);
fb1d9738 829
0d00c488 830 ret = vmw_dma_masks(dev_priv);
496eb6fd 831 if (unlikely(ret != 0))
0d00c488
TH
832 goto out_err0;
833
9703bb32 834 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
39916897 835
0d00c488 836 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
fb1d9738
JB
837 DRM_INFO("Max GMR ids is %u\n",
838 (unsigned)dev_priv->max_gmr_ids);
fb17f189
TH
839 DRM_INFO("Max number of GMR pages is %u\n",
840 (unsigned)dev_priv->max_gmr_pages);
5bb39e81
TH
841 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
842 (unsigned)dev_priv->memory_size / 1024);
fb17f189 843 }
be4f77ac
ZR
844 DRM_INFO("Maximum display memory size is %llu kiB\n",
845 (uint64_t)dev_priv->prim_bb_mem / 1024);
fb1d9738 846
d7e1958d
JB
847 /* Need mmio memory to check for fifo pitchlock cap. */
848 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
849 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
850 !vmw_fifo_have_pitchlock(dev_priv)) {
851 ret = -ENOSYS;
852 DRM_ERROR("Hardware has no pitchlock\n");
be4f77ac 853 goto out_err0;
d7e1958d
JB
854 }
855
27eb1fa9
CK
856 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
857 &vmw_prime_dmabuf_ops);
fb1d9738
JB
858
859 if (unlikely(dev_priv->tdev == NULL)) {
860 DRM_ERROR("Unable to initialize TTM object management.\n");
861 ret = -ENOMEM;
be4f77ac 862 goto out_err0;
fb1d9738
JB
863 }
864
506ff75c 865 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
9703bb32 866 ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
506ff75c
TH
867 if (ret != 0) {
868 DRM_ERROR("Failed installing irq: %d\n", ret);
869 goto out_no_irq;
870 }
871 }
872
ae2a1040 873 dev_priv->fman = vmw_fence_manager_init(dev_priv);
14bbf20c
WY
874 if (unlikely(dev_priv->fman == NULL)) {
875 ret = -ENOMEM;
ae2a1040 876 goto out_no_fman;
14bbf20c 877 }
56d1c78d 878
293f86b3
GH
879 drm_vma_offset_manager_init(&dev_priv->vma_manager,
880 DRM_FILE_PAGE_OFFSET_START,
881 DRM_FILE_PAGE_OFFSET_SIZE);
8af8a109
CK
882 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
883 dev_priv->drm.dev,
884 dev_priv->drm.anon_inode->i_mapping,
885 &dev_priv->vma_manager,
886 dev_priv->map_mode == vmw_dma_alloc_coherent,
887 false);
153b3d5b
TH
888 if (unlikely(ret != 0)) {
889 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
890 goto out_no_bdev;
891 }
3458390b 892
153b3d5b
TH
893 /*
894 * Enable VRAM, but initially don't use it until SVGA is enabled and
895 * unhidden.
896 */
252f8d7b
DA
897
898 ret = vmw_vram_manager_init(dev_priv);
3458390b
TH
899 if (unlikely(ret != 0)) {
900 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
901 goto out_no_vram;
902 }
903
3629ca5d
CK
904 /*
905 * "Guest Memory Regions" is an aperture like feature with
906 * one slot per bo. There is an upper limit of the number of
907 * slots as well as the bo size.
908 */
3458390b 909 dev_priv->has_gmr = true;
3629ca5d 910 /* TODO: This is most likely not correct */
3458390b 911 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
62161778
DA
912 refuse_dma ||
913 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
3458390b
TH
914 DRM_INFO("No GMR memory available. "
915 "Graphics memory resources are very limited.\n");
916 dev_priv->has_gmr = false;
917 }
918
81a00960 919 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
3458390b 920 dev_priv->has_mob = true;
62161778
DA
921
922 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
3458390b
TH
923 DRM_INFO("No MOB memory available. "
924 "3D will be disabled.\n");
925 dev_priv->has_mob = false;
926 }
927 }
928
ef7c7b74 929 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
d80efd5c 930 spin_lock(&dev_priv->cap_lock);
dc75e733 931 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
878c6ecd
DR
932 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
933 dev_priv->sm_type = VMW_SM_4;
d80efd5c
TH
934 spin_unlock(&dev_priv->cap_lock);
935 }
936
fd567467 937 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
878c6ecd
DR
938
939 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
940 if (has_sm4_context(dev_priv) &&
941 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
942 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
943
944 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
945 dev_priv->sm_type = VMW_SM_4_1;
4dec2805
DR
946
947 if (has_sm4_1_context(dev_priv) &&
948 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
949 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
950 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
951 dev_priv->sm_type = VMW_SM_5;
952 }
878c6ecd
DR
953 }
954
7a1c2f6c
TH
955 ret = vmw_kms_init(dev_priv);
956 if (unlikely(ret != 0))
957 goto out_no_kms;
f2d12b8e 958 vmw_overlay_init(dev_priv);
56d1c78d 959
153b3d5b
TH
960 ret = vmw_request_device(dev_priv);
961 if (ret)
962 goto out_no_fifo;
963
4dec2805
DR
964 if (dev_priv->sm_type == VMW_SM_5)
965 DRM_INFO("SM5 support available.\n");
878c6ecd
DR
966 if (dev_priv->sm_type == VMW_SM_4_1)
967 DRM_INFO("SM4_1 support available.\n");
968 if (dev_priv->sm_type == VMW_SM_4)
969 DRM_INFO("SM4 support available.\n");
d80efd5c 970
f9217913
SY
971 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
972 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
973 VMWGFX_DRIVER_PATCHLEVEL);
974 vmw_host_log(host_log);
975
30c78bb8 976 if (dev_priv->enable_fb) {
153b3d5b
TH
977 vmw_fifo_resource_inc(dev_priv);
978 vmw_svga_enable(dev_priv);
30c78bb8 979 vmw_fb_init(dev_priv);
7a1c2f6c
TH
980 }
981
d9f36a00
TH
982 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
983 register_pm_notifier(&dev_priv->pm_nb);
984
fb1d9738
JB
985 return 0;
986
506ff75c 987out_no_fifo:
56d1c78d
JB
988 vmw_overlay_close(dev_priv);
989 vmw_kms_close(dev_priv);
990out_no_kms:
3458390b 991 if (dev_priv->has_mob)
6eee6675 992 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
3458390b 993 if (dev_priv->has_gmr)
6eee6675 994 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
e0830704 995 vmw_vram_manager_fini(dev_priv);
3458390b 996out_no_vram:
8af8a109 997 ttm_device_fini(&dev_priv->bdev);
153b3d5b 998out_no_bdev:
ae2a1040
TH
999 vmw_fence_manager_takedown(dev_priv->fman);
1000out_no_fman:
506ff75c 1001 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
9703bb32 1002 vmw_irq_uninstall(&dev_priv->drm);
506ff75c 1003out_no_irq:
fb1d9738 1004 ttm_object_device_release(&dev_priv->tdev);
fb1d9738 1005out_err0:
c0951b79
TH
1006 for (i = vmw_res_context; i < vmw_res_max; ++i)
1007 idr_destroy(&dev_priv->res_idr[i]);
1008
d80efd5c
TH
1009 if (dev_priv->ctx.staged_bindings)
1010 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
75ec69c7
ZR
1011out_no_pci_or_version:
1012 pci_release_regions(pdev);
fb1d9738
JB
1013 return ret;
1014}
1015
11b3c20b 1016static void vmw_driver_unload(struct drm_device *dev)
fb1d9738
JB
1017{
1018 struct vmw_private *dev_priv = vmw_priv(dev);
840462e6 1019 struct pci_dev *pdev = to_pci_dev(dev->dev);
c0951b79 1020 enum vmw_res_type i;
fb1d9738 1021
d9f36a00
TH
1022 unregister_pm_notifier(&dev_priv->pm_nb);
1023
c0951b79
TH
1024 if (dev_priv->ctx.res_ht_initialized)
1025 drm_ht_remove(&dev_priv->ctx.res_ht);
a3a1a667 1026 vfree(dev_priv->ctx.cmd_bounce);
30c78bb8 1027 if (dev_priv->enable_fb) {
05c95018 1028 vmw_fb_off(dev_priv);
30c78bb8 1029 vmw_fb_close(dev_priv);
153b3d5b
TH
1030 vmw_fifo_resource_dec(dev_priv);
1031 vmw_svga_disable(dev_priv);
30c78bb8 1032 }
153b3d5b 1033
f2d12b8e
TH
1034 vmw_kms_close(dev_priv);
1035 vmw_overlay_close(dev_priv);
3458390b 1036
3458390b 1037 if (dev_priv->has_gmr)
6eee6675 1038 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
3458390b 1039
153b3d5b
TH
1040 vmw_release_device_early(dev_priv);
1041 if (dev_priv->has_mob)
6eee6675 1042 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
e0830704 1043 vmw_vram_manager_fini(dev_priv);
8af8a109 1044 ttm_device_fini(&dev_priv->bdev);
293f86b3 1045 drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
153b3d5b 1046 vmw_release_device_late(dev_priv);
ae2a1040 1047 vmw_fence_manager_takedown(dev_priv->fman);
506ff75c 1048 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
9703bb32 1049 vmw_irq_uninstall(&dev_priv->drm);
f2d12b8e 1050
fb1d9738 1051 ttm_object_device_release(&dev_priv->tdev);
d80efd5c
TH
1052 if (dev_priv->ctx.staged_bindings)
1053 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
c0951b79
TH
1054
1055 for (i = vmw_res_context; i < vmw_res_max; ++i)
1056 idr_destroy(&dev_priv->res_idr[i]);
fb1d9738 1057
75ec69c7 1058 pci_release_regions(pdev);
fb1d9738
JB
1059}
1060
1061static void vmw_postclose(struct drm_device *dev,
1062 struct drm_file *file_priv)
1063{
9c84aeba 1064 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
c4249855
TH
1065
1066 ttm_object_file_release(&vmw_fp->tfile);
fb1d9738
JB
1067 kfree(vmw_fp);
1068}
1069
1070static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1071{
1072 struct vmw_private *dev_priv = vmw_priv(dev);
1073 struct vmw_fpriv *vmw_fp;
1074 int ret = -ENOMEM;
1075
1076 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1a4adb05 1077 if (unlikely(!vmw_fp))
fb1d9738
JB
1078 return ret;
1079
1080 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1081 if (unlikely(vmw_fp->tfile == NULL))
1082 goto out_no_tfile;
1083
1084 file_priv->driver_priv = vmw_fp;
fb1d9738
JB
1085
1086 return 0;
1087
1088out_no_tfile:
1089 kfree(vmw_fp);
1090 return ret;
1091}
1092
64190bde
TH
1093static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1094 unsigned long arg,
1095 long (*ioctl_func)(struct file *, unsigned int,
1096 unsigned long))
fb1d9738
JB
1097{
1098 struct drm_file *file_priv = filp->private_data;
1099 struct drm_device *dev = file_priv->minor->dev;
1100 unsigned int nr = DRM_IOCTL_NR(cmd);
64190bde 1101 unsigned int flags;
fb1d9738
JB
1102
1103 /*
e1f78003 1104 * Do extra checking on driver private ioctls.
fb1d9738
JB
1105 */
1106
1107 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1108 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
baa70943 1109 const struct drm_ioctl_desc *ioctl =
64190bde 1110 &vmw_ioctls[nr - DRM_COMMAND_BASE];
fb1d9738 1111
d80efd5c 1112 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
cbfbe47f 1113 return ioctl_func(filp, cmd, arg);
31788ca8
TH
1114 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1115 if (!drm_is_current_master(file_priv) &&
1116 !capable(CAP_SYS_ADMIN))
1117 return -EACCES;
fb1d9738 1118 }
d80efd5c
TH
1119
1120 if (unlikely(ioctl->cmd != cmd))
1121 goto out_io_encoding;
1122
64190bde
TH
1123 flags = ioctl->flags;
1124 } else if (!drm_ioctl_flags(nr, &flags))
1125 return -EINVAL;
1126
9c84aeba 1127 return ioctl_func(filp, cmd, arg);
d80efd5c
TH
1128
1129out_io_encoding:
1130 DRM_ERROR("Invalid command format, ioctl %d\n",
1131 nr - DRM_COMMAND_BASE);
1132
1133 return -EINVAL;
64190bde
TH
1134}
1135
1136static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1137 unsigned long arg)
1138{
1139 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
fb1d9738
JB
1140}
1141
64190bde
TH
1142#ifdef CONFIG_COMPAT
1143static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1144 unsigned long arg)
1145{
1146 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1147}
1148#endif
1149
907f5320
EV
1150static void vmw_master_set(struct drm_device *dev,
1151 struct drm_file *file_priv,
1152 bool from_open)
fb1d9738 1153{
63cb4444
TH
1154 /*
1155 * Inform a new master that the layout may have changed while
1156 * it was gone.
1157 */
1158 if (!from_open)
1159 drm_sysfs_hotplug_event(dev);
fb1d9738
JB
1160}
1161
1162static void vmw_master_drop(struct drm_device *dev,
d6ed682e 1163 struct drm_file *file_priv)
fb1d9738
JB
1164{
1165 struct vmw_private *dev_priv = vmw_priv(dev);
fb1d9738 1166
8fbf9d92 1167 vmw_kms_legacy_hotspot_clear(dev_priv);
153b3d5b
TH
1168 if (!dev_priv->enable_fb)
1169 vmw_svga_disable(dev_priv);
fb1d9738
JB
1170}
1171
153b3d5b
TH
1172/**
1173 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1174 *
1175 * @dev_priv: Pointer to device private struct.
1176 * Needs the reservation sem to be held in non-exclusive mode.
1177 */
b9eb1a61 1178static void __vmw_svga_enable(struct vmw_private *dev_priv)
153b3d5b 1179{
9de59bc2 1180 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
089cafc1 1181
9de59bc2 1182 if (!ttm_resource_manager_used(man)) {
153b3d5b 1183 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
9de59bc2 1184 ttm_resource_manager_set_used(man, true);
153b3d5b 1185 }
153b3d5b
TH
1186}
1187
1188/**
1189 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1190 *
1191 * @dev_priv: Pointer to device private struct.
1192 */
1193void vmw_svga_enable(struct vmw_private *dev_priv)
1194{
f08c86c3 1195 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
153b3d5b
TH
1196 __vmw_svga_enable(dev_priv);
1197 ttm_read_unlock(&dev_priv->reservation_sem);
1198}
1199
1200/**
1201 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1202 *
1203 * @dev_priv: Pointer to device private struct.
1204 * Needs the reservation sem to be held in exclusive mode.
1205 * Will not empty VRAM. VRAM must be emptied by caller.
1206 */
b9eb1a61 1207static void __vmw_svga_disable(struct vmw_private *dev_priv)
153b3d5b 1208{
9de59bc2 1209 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
089cafc1 1210
9de59bc2
DA
1211 if (ttm_resource_manager_used(man)) {
1212 ttm_resource_manager_set_used(man, false);
153b3d5b 1213 vmw_write(dev_priv, SVGA_REG_ENABLE,
8ce75f8a
SY
1214 SVGA_REG_ENABLE_HIDE |
1215 SVGA_REG_ENABLE_ENABLE);
153b3d5b 1216 }
153b3d5b
TH
1217}
1218
1219/**
1220 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1221 * running.
1222 *
1223 * @dev_priv: Pointer to device private struct.
1224 * Will empty VRAM.
1225 */
1226void vmw_svga_disable(struct vmw_private *dev_priv)
1227{
9de59bc2 1228 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
140bcaa2
TH
1229 /*
1230 * Disabling SVGA will turn off device modesetting capabilities, so
1231 * notify KMS about that so that it doesn't cache atomic state that
1232 * isn't valid anymore, for example crtcs turned on.
1233 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1234 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1235 * end up with lock order reversal. Thus, a master may actually perform
1236 * a new modeset just after we call vmw_kms_lost_device() and race with
1237 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1238 * to be inconsistent with the device, causing modesetting problems.
1239 *
1240 */
9703bb32 1241 vmw_kms_lost_device(&dev_priv->drm);
153b3d5b 1242 ttm_write_lock(&dev_priv->reservation_sem, false);
9de59bc2 1243 if (ttm_resource_manager_used(man)) {
4ce032d6 1244 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
153b3d5b 1245 DRM_ERROR("Failed evicting VRAM buffers.\n");
ade94143 1246 ttm_resource_manager_set_used(man, false);
8ce75f8a
SY
1247 vmw_write(dev_priv, SVGA_REG_ENABLE,
1248 SVGA_REG_ENABLE_HIDE |
1249 SVGA_REG_ENABLE_ENABLE);
ff36baf8 1250 }
153b3d5b
TH
1251 ttm_write_unlock(&dev_priv->reservation_sem);
1252}
fb1d9738
JB
1253
1254static void vmw_remove(struct pci_dev *pdev)
1255{
1256 struct drm_device *dev = pci_get_drvdata(pdev);
1257
f07069da 1258 ttm_mem_global_release(&ttm_mem_glob);
36891da8
TZ
1259 drm_dev_unregister(dev);
1260 vmw_driver_unload(dev);
fb1d9738
JB
1261}
1262
9431042d
THV
1263static unsigned long
1264vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1265 unsigned long len, unsigned long pgoff,
1266 unsigned long flags)
1267{
1268 struct drm_file *file_priv = file->private_data;
1269 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1270
1271 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1272 &dev_priv->vma_manager);
1273}
1274
d9f36a00
TH
1275static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1276 void *ptr)
1277{
1278 struct vmw_private *dev_priv =
1279 container_of(nb, struct vmw_private, pm_nb);
d9f36a00
TH
1280
1281 switch (val) {
1282 case PM_HIBERNATION_PREPARE:
153b3d5b 1283 /*
c3b9b165
TH
1284 * Take the reservation sem in write mode, which will make sure
1285 * there are no other processes holding a buffer object
1286 * reservation, meaning we should be able to evict all buffer
1287 * objects if needed.
1288 * Once user-space processes have been frozen, we can release
1289 * the lock again.
d9f36a00 1290 */
c3b9b165
TH
1291 ttm_suspend_lock(&dev_priv->reservation_sem);
1292 dev_priv->suspend_locked = true;
d9f36a00
TH
1293 break;
1294 case PM_POST_HIBERNATION:
094e0fa8 1295 case PM_POST_RESTORE:
c3b9b165
TH
1296 if (READ_ONCE(dev_priv->suspend_locked)) {
1297 dev_priv->suspend_locked = false;
1298 ttm_suspend_unlock(&dev_priv->reservation_sem);
1299 }
d9f36a00 1300 break;
d9f36a00
TH
1301 default:
1302 break;
1303 }
1304 return 0;
1305}
1306
7fbd721a 1307static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
d9f36a00 1308{
094e0fa8
TH
1309 struct drm_device *dev = pci_get_drvdata(pdev);
1310 struct vmw_private *dev_priv = vmw_priv(dev);
1311
153b3d5b 1312 if (dev_priv->refuse_hibernation)
094e0fa8 1313 return -EBUSY;
094e0fa8 1314
d9f36a00
TH
1315 pci_save_state(pdev);
1316 pci_disable_device(pdev);
1317 pci_set_power_state(pdev, PCI_D3hot);
1318 return 0;
1319}
1320
7fbd721a 1321static int vmw_pci_resume(struct pci_dev *pdev)
d9f36a00
TH
1322{
1323 pci_set_power_state(pdev, PCI_D0);
1324 pci_restore_state(pdev);
1325 return pci_enable_device(pdev);
1326}
1327
7fbd721a
TH
1328static int vmw_pm_suspend(struct device *kdev)
1329{
1330 struct pci_dev *pdev = to_pci_dev(kdev);
1331 struct pm_message dummy;
1332
1333 dummy.event = 0;
1334
1335 return vmw_pci_suspend(pdev, dummy);
1336}
1337
1338static int vmw_pm_resume(struct device *kdev)
1339{
1340 struct pci_dev *pdev = to_pci_dev(kdev);
1341
1342 return vmw_pci_resume(pdev);
1343}
1344
153b3d5b 1345static int vmw_pm_freeze(struct device *kdev)
7fbd721a
TH
1346{
1347 struct pci_dev *pdev = to_pci_dev(kdev);
1348 struct drm_device *dev = pci_get_drvdata(pdev);
1349 struct vmw_private *dev_priv = vmw_priv(dev);
d7c59750
CK
1350 struct ttm_operation_ctx ctx = {
1351 .interruptible = false,
1352 .no_wait_gpu = false
1353 };
c3b9b165 1354 int ret;
7fbd721a 1355
c3b9b165
TH
1356 /*
1357 * Unlock for vmw_kms_suspend.
1358 * No user-space processes should be running now.
1359 */
1360 ttm_suspend_unlock(&dev_priv->reservation_sem);
9703bb32 1361 ret = vmw_kms_suspend(&dev_priv->drm);
c3b9b165
TH
1362 if (ret) {
1363 ttm_suspend_lock(&dev_priv->reservation_sem);
1364 DRM_ERROR("Failed to freeze modesetting.\n");
1365 return ret;
1366 }
7fbd721a 1367 if (dev_priv->enable_fb)
c3b9b165 1368 vmw_fb_off(dev_priv);
7fbd721a 1369
c3b9b165
TH
1370 ttm_suspend_lock(&dev_priv->reservation_sem);
1371 vmw_execbuf_release_pinned_bo(dev_priv);
1372 vmw_resource_evict_all(dev_priv);
1373 vmw_release_device_early(dev_priv);
ebd59851 1374 while (ttm_global_swapout(&ctx, GFP_KERNEL) > 0);
c3b9b165
TH
1375 if (dev_priv->enable_fb)
1376 vmw_fifo_resource_dec(dev_priv);
153b3d5b
TH
1377 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1378 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
7fbd721a 1379 if (dev_priv->enable_fb)
153b3d5b
TH
1380 vmw_fifo_resource_inc(dev_priv);
1381 WARN_ON(vmw_request_device_late(dev_priv));
c3b9b165
TH
1382 dev_priv->suspend_locked = false;
1383 ttm_suspend_unlock(&dev_priv->reservation_sem);
1384 if (dev_priv->suspend_state)
1385 vmw_kms_resume(dev);
1386 if (dev_priv->enable_fb)
1387 vmw_fb_on(dev_priv);
7fbd721a
TH
1388 return -EBUSY;
1389 }
1390
c3b9b165
TH
1391 vmw_fence_fifo_down(dev_priv->fman);
1392 __vmw_svga_disable(dev_priv);
840462e6 1393
153b3d5b 1394 vmw_release_device_late(dev_priv);
7fbd721a
TH
1395 return 0;
1396}
1397
153b3d5b 1398static int vmw_pm_restore(struct device *kdev)
7fbd721a
TH
1399{
1400 struct pci_dev *pdev = to_pci_dev(kdev);
1401 struct drm_device *dev = pci_get_drvdata(pdev);
1402 struct vmw_private *dev_priv = vmw_priv(dev);
153b3d5b 1403 int ret;
7fbd721a 1404
95e8f6a2
TH
1405 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1406 (void) vmw_read(dev_priv, SVGA_REG_ID);
95e8f6a2 1407
7fbd721a 1408 if (dev_priv->enable_fb)
153b3d5b
TH
1409 vmw_fifo_resource_inc(dev_priv);
1410
1411 ret = vmw_request_device(dev_priv);
1412 if (ret)
1413 return ret;
1414
1415 if (dev_priv->enable_fb)
1416 __vmw_svga_enable(dev_priv);
7fbd721a 1417
c3b9b165
TH
1418 vmw_fence_fifo_up(dev_priv->fman);
1419 dev_priv->suspend_locked = false;
1420 ttm_suspend_unlock(&dev_priv->reservation_sem);
1421 if (dev_priv->suspend_state)
9703bb32 1422 vmw_kms_resume(&dev_priv->drm);
c3b9b165
TH
1423
1424 if (dev_priv->enable_fb)
1425 vmw_fb_on(dev_priv);
1426
153b3d5b 1427 return 0;
7fbd721a
TH
1428}
1429
1430static const struct dev_pm_ops vmw_pm_ops = {
153b3d5b
TH
1431 .freeze = vmw_pm_freeze,
1432 .thaw = vmw_pm_restore,
1433 .restore = vmw_pm_restore,
7fbd721a
TH
1434 .suspend = vmw_pm_suspend,
1435 .resume = vmw_pm_resume,
1436};
1437
e08e96de
AV
1438static const struct file_operations vmwgfx_driver_fops = {
1439 .owner = THIS_MODULE,
1440 .open = drm_open,
1441 .release = drm_release,
1442 .unlocked_ioctl = vmw_unlocked_ioctl,
1443 .mmap = vmw_mmap,
1444 .poll = vmw_fops_poll,
1445 .read = vmw_fops_read,
e08e96de 1446#if defined(CONFIG_COMPAT)
64190bde 1447 .compat_ioctl = vmw_compat_ioctl,
e08e96de
AV
1448#endif
1449 .llseek = noop_llseek,
9431042d 1450 .get_unmapped_area = vmw_get_unmapped_area,
e08e96de
AV
1451};
1452
70a59dd8 1453static const struct drm_driver driver = {
1ff49481 1454 .driver_features =
0424fdaf 1455 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
fb1d9738 1456 .ioctls = vmw_ioctls,
f95aeb17 1457 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
fb1d9738
JB
1458 .master_set = vmw_master_set,
1459 .master_drop = vmw_master_drop,
1460 .open = vmw_driver_open,
1461 .postclose = vmw_postclose,
5e1782d2
DA
1462
1463 .dumb_create = vmw_dumb_create,
1464 .dumb_map_offset = vmw_dumb_map_offset,
1465 .dumb_destroy = vmw_dumb_destroy,
1466
69977ff5
TH
1467 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1468 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1469
e08e96de 1470 .fops = &vmwgfx_driver_fops,
fb1d9738
JB
1471 .name = VMWGFX_DRIVER_NAME,
1472 .desc = VMWGFX_DRIVER_DESC,
1473 .date = VMWGFX_DRIVER_DATE,
1474 .major = VMWGFX_DRIVER_MAJOR,
1475 .minor = VMWGFX_DRIVER_MINOR,
1476 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1477};
1478
8410ea3b
DA
1479static struct pci_driver vmw_pci_driver = {
1480 .name = VMWGFX_DRIVER_NAME,
1481 .id_table = vmw_pci_id_list,
1482 .probe = vmw_probe,
1483 .remove = vmw_remove,
1484 .driver = {
1485 .pm = &vmw_pm_ops
1486 }
1487};
1488
fb1d9738
JB
1489static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1490{
9703bb32 1491 struct vmw_private *vmw;
36891da8
TZ
1492 int ret;
1493
31856c8c
ZR
1494 ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
1495 if (ret)
1496 return ret;
1497
9703bb32 1498 ret = pcim_enable_device(pdev);
36891da8
TZ
1499 if (ret)
1500 return ret;
1501
9703bb32
ZR
1502 vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1503 struct vmw_private, drm);
1504 if (IS_ERR(vmw))
1505 return PTR_ERR(vmw);
36891da8 1506
9703bb32 1507 pci_set_drvdata(pdev, &vmw->drm);
36891da8 1508
f07069da
CK
1509 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
1510 if (ret)
1511 return ret;
1512
9703bb32 1513 ret = vmw_driver_load(vmw, ent->device);
36891da8 1514 if (ret)
9703bb32 1515 return ret;
36891da8 1516
9703bb32
ZR
1517 ret = drm_dev_register(&vmw->drm, 0);
1518 if (ret) {
1519 vmw_driver_unload(&vmw->drm);
1520 return ret;
1521 }
36891da8
TZ
1522
1523 return 0;
fb1d9738
JB
1524}
1525
1526static int __init vmwgfx_init(void)
1527{
1528 int ret;
96c5d076 1529
96c5d076
RC
1530 if (vgacon_text_force())
1531 return -EINVAL;
96c5d076 1532
10631d72 1533 ret = pci_register_driver(&vmw_pci_driver);
fb1d9738
JB
1534 if (ret)
1535 DRM_ERROR("Failed initializing DRM.\n");
1536 return ret;
1537}
1538
1539static void __exit vmwgfx_exit(void)
1540{
10631d72 1541 pci_unregister_driver(&vmw_pci_driver);
fb1d9738
JB
1542}
1543
1544module_init(vmwgfx_init);
1545module_exit(vmwgfx_exit);
1546
1547MODULE_AUTHOR("VMware Inc. and others");
1548MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1549MODULE_LICENSE("GPL and additional rights");
73558ead
TH
1550MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1551 __stringify(VMWGFX_DRIVER_MINOR) "."
1552 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1553 "0");