]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
jfs: fix error path in ialloc
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include <linux/module.h>
28
29 #include <drm/drmP.h>
30 #include "vmwgfx_drv.h"
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_object.h>
34 #include <drm/ttm/ttm_module.h>
35
36 #define VMWGFX_DRIVER_NAME "vmwgfx"
37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38 #define VMWGFX_CHIP_SVGAII 0
39 #define VMW_FB_RESERVATION 0
40
41 #define VMW_MIN_INITIAL_WIDTH 800
42 #define VMW_MIN_INITIAL_HEIGHT 600
43
44
45 /**
46 * Fully encoded drm commands. Might move to vmw_drm.h
47 */
48
49 #define DRM_IOCTL_VMW_GET_PARAM \
50 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
51 struct drm_vmw_getparam_arg)
52 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
53 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
54 union drm_vmw_alloc_dmabuf_arg)
55 #define DRM_IOCTL_VMW_UNREF_DMABUF \
56 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
57 struct drm_vmw_unref_dmabuf_arg)
58 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
60 struct drm_vmw_cursor_bypass_arg)
61
62 #define DRM_IOCTL_VMW_CONTROL_STREAM \
63 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
64 struct drm_vmw_control_stream_arg)
65 #define DRM_IOCTL_VMW_CLAIM_STREAM \
66 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
67 struct drm_vmw_stream_arg)
68 #define DRM_IOCTL_VMW_UNREF_STREAM \
69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
70 struct drm_vmw_stream_arg)
71
72 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
73 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
74 struct drm_vmw_context_arg)
75 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
76 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
77 struct drm_vmw_context_arg)
78 #define DRM_IOCTL_VMW_CREATE_SURFACE \
79 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
80 union drm_vmw_surface_create_arg)
81 #define DRM_IOCTL_VMW_UNREF_SURFACE \
82 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
83 struct drm_vmw_surface_arg)
84 #define DRM_IOCTL_VMW_REF_SURFACE \
85 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
86 union drm_vmw_surface_reference_arg)
87 #define DRM_IOCTL_VMW_EXECBUF \
88 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
89 struct drm_vmw_execbuf_arg)
90 #define DRM_IOCTL_VMW_GET_3D_CAP \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
92 struct drm_vmw_get_3d_cap_arg)
93 #define DRM_IOCTL_VMW_FENCE_WAIT \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
95 struct drm_vmw_fence_wait_arg)
96 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
98 struct drm_vmw_fence_signaled_arg)
99 #define DRM_IOCTL_VMW_FENCE_UNREF \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
101 struct drm_vmw_fence_arg)
102 #define DRM_IOCTL_VMW_FENCE_EVENT \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
104 struct drm_vmw_fence_event_arg)
105 #define DRM_IOCTL_VMW_PRESENT \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
107 struct drm_vmw_present_arg)
108 #define DRM_IOCTL_VMW_PRESENT_READBACK \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
110 struct drm_vmw_present_readback_arg)
111 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
113 struct drm_vmw_update_layout_arg)
114
115 /**
116 * The core DRM version of this macro doesn't account for
117 * DRM_COMMAND_BASE.
118 */
119
120 #define VMW_IOCTL_DEF(ioctl, func, flags) \
121 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
122
123 /**
124 * Ioctl definitions.
125 */
126
127 static const struct drm_ioctl_desc vmw_ioctls[] = {
128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
131 DRM_AUTH | DRM_UNLOCKED),
132 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
133 DRM_AUTH | DRM_UNLOCKED),
134 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
135 vmw_kms_cursor_bypass_ioctl,
136 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
137
138 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
139 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
140 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
141 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
144
145 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
146 DRM_AUTH | DRM_UNLOCKED),
147 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
148 DRM_AUTH | DRM_UNLOCKED),
149 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
150 DRM_AUTH | DRM_UNLOCKED),
151 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
152 DRM_AUTH | DRM_UNLOCKED),
153 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
154 DRM_AUTH | DRM_UNLOCKED),
155 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
156 DRM_AUTH | DRM_UNLOCKED),
157 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
158 DRM_AUTH | DRM_UNLOCKED),
159 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
160 vmw_fence_obj_signaled_ioctl,
161 DRM_AUTH | DRM_UNLOCKED),
162 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
163 DRM_AUTH | DRM_UNLOCKED),
164 VMW_IOCTL_DEF(VMW_FENCE_EVENT,
165 vmw_fence_event_ioctl,
166 DRM_AUTH | DRM_UNLOCKED),
167 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
168 DRM_AUTH | DRM_UNLOCKED),
169
170 /* these allow direct access to the framebuffers mark as master only */
171 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
172 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
173 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
174 vmw_present_readback_ioctl,
175 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
176 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
177 vmw_kms_update_layout_ioctl,
178 DRM_MASTER | DRM_UNLOCKED),
179 };
180
181 static struct pci_device_id vmw_pci_id_list[] = {
182 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
183 {0, 0, 0}
184 };
185 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
186
187 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
188
189 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
190 static void vmw_master_init(struct vmw_master *);
191 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
192 void *ptr);
193
194 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
195 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
196
197 static void vmw_print_capabilities(uint32_t capabilities)
198 {
199 DRM_INFO("Capabilities:\n");
200 if (capabilities & SVGA_CAP_RECT_COPY)
201 DRM_INFO(" Rect copy.\n");
202 if (capabilities & SVGA_CAP_CURSOR)
203 DRM_INFO(" Cursor.\n");
204 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
205 DRM_INFO(" Cursor bypass.\n");
206 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
207 DRM_INFO(" Cursor bypass 2.\n");
208 if (capabilities & SVGA_CAP_8BIT_EMULATION)
209 DRM_INFO(" 8bit emulation.\n");
210 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
211 DRM_INFO(" Alpha cursor.\n");
212 if (capabilities & SVGA_CAP_3D)
213 DRM_INFO(" 3D.\n");
214 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
215 DRM_INFO(" Extended Fifo.\n");
216 if (capabilities & SVGA_CAP_MULTIMON)
217 DRM_INFO(" Multimon.\n");
218 if (capabilities & SVGA_CAP_PITCHLOCK)
219 DRM_INFO(" Pitchlock.\n");
220 if (capabilities & SVGA_CAP_IRQMASK)
221 DRM_INFO(" Irq mask.\n");
222 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
223 DRM_INFO(" Display Topology.\n");
224 if (capabilities & SVGA_CAP_GMR)
225 DRM_INFO(" GMR.\n");
226 if (capabilities & SVGA_CAP_TRACES)
227 DRM_INFO(" Traces.\n");
228 if (capabilities & SVGA_CAP_GMR2)
229 DRM_INFO(" GMR2.\n");
230 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
231 DRM_INFO(" Screen Object 2.\n");
232 }
233
234
235 /**
236 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
237 * the start of a buffer object.
238 *
239 * @dev_priv: The device private structure.
240 *
241 * This function will idle the buffer using an uninterruptible wait, then
242 * map the first page and initialize a pending occlusion query result structure,
243 * Finally it will unmap the buffer.
244 *
245 * TODO: Since we're only mapping a single page, we should optimize the map
246 * to use kmap_atomic / iomap_atomic.
247 */
248 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
249 {
250 struct ttm_bo_kmap_obj map;
251 volatile SVGA3dQueryResult *result;
252 bool dummy;
253 int ret;
254 struct ttm_bo_device *bdev = &dev_priv->bdev;
255 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
256
257 ttm_bo_reserve(bo, false, false, false, 0);
258 spin_lock(&bdev->fence_lock);
259 ret = ttm_bo_wait(bo, false, false, false);
260 spin_unlock(&bdev->fence_lock);
261 if (unlikely(ret != 0))
262 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
263 10*HZ);
264
265 ret = ttm_bo_kmap(bo, 0, 1, &map);
266 if (likely(ret == 0)) {
267 result = ttm_kmap_obj_virtual(&map, &dummy);
268 result->totalSize = sizeof(*result);
269 result->state = SVGA3D_QUERYSTATE_PENDING;
270 result->result32 = 0xff;
271 ttm_bo_kunmap(&map);
272 } else
273 DRM_ERROR("Dummy query buffer map failed.\n");
274 ttm_bo_unreserve(bo);
275 }
276
277
278 /**
279 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
280 *
281 * @dev_priv: A device private structure.
282 *
283 * This function creates a small buffer object that holds the query
284 * result for dummy queries emitted as query barriers.
285 * No interruptible waits are done within this function.
286 *
287 * Returns an error if bo creation fails.
288 */
289 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
290 {
291 return ttm_bo_create(&dev_priv->bdev,
292 PAGE_SIZE,
293 ttm_bo_type_device,
294 &vmw_vram_sys_placement,
295 0, false, NULL,
296 &dev_priv->dummy_query_bo);
297 }
298
299
300 static int vmw_request_device(struct vmw_private *dev_priv)
301 {
302 int ret;
303
304 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
305 if (unlikely(ret != 0)) {
306 DRM_ERROR("Unable to initialize FIFO.\n");
307 return ret;
308 }
309 vmw_fence_fifo_up(dev_priv->fman);
310 ret = vmw_dummy_query_bo_create(dev_priv);
311 if (unlikely(ret != 0))
312 goto out_no_query_bo;
313 vmw_dummy_query_bo_prepare(dev_priv);
314
315 return 0;
316
317 out_no_query_bo:
318 vmw_fence_fifo_down(dev_priv->fman);
319 vmw_fifo_release(dev_priv, &dev_priv->fifo);
320 return ret;
321 }
322
323 static void vmw_release_device(struct vmw_private *dev_priv)
324 {
325 /*
326 * Previous destructions should've released
327 * the pinned bo.
328 */
329
330 BUG_ON(dev_priv->pinned_bo != NULL);
331
332 ttm_bo_unref(&dev_priv->dummy_query_bo);
333 vmw_fence_fifo_down(dev_priv->fman);
334 vmw_fifo_release(dev_priv, &dev_priv->fifo);
335 }
336
337 /**
338 * Increase the 3d resource refcount.
339 * If the count was prevously zero, initialize the fifo, switching to svga
340 * mode. Note that the master holds a ref as well, and may request an
341 * explicit switch to svga mode if fb is not running, using @unhide_svga.
342 */
343 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
344 bool unhide_svga)
345 {
346 int ret = 0;
347
348 mutex_lock(&dev_priv->release_mutex);
349 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
350 ret = vmw_request_device(dev_priv);
351 if (unlikely(ret != 0))
352 --dev_priv->num_3d_resources;
353 } else if (unhide_svga) {
354 mutex_lock(&dev_priv->hw_mutex);
355 vmw_write(dev_priv, SVGA_REG_ENABLE,
356 vmw_read(dev_priv, SVGA_REG_ENABLE) &
357 ~SVGA_REG_ENABLE_HIDE);
358 mutex_unlock(&dev_priv->hw_mutex);
359 }
360
361 mutex_unlock(&dev_priv->release_mutex);
362 return ret;
363 }
364
365 /**
366 * Decrease the 3d resource refcount.
367 * If the count reaches zero, disable the fifo, switching to vga mode.
368 * Note that the master holds a refcount as well, and may request an
369 * explicit switch to vga mode when it releases its refcount to account
370 * for the situation of an X server vt switch to VGA with 3d resources
371 * active.
372 */
373 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
374 bool hide_svga)
375 {
376 int32_t n3d;
377
378 mutex_lock(&dev_priv->release_mutex);
379 if (unlikely(--dev_priv->num_3d_resources == 0))
380 vmw_release_device(dev_priv);
381 else if (hide_svga) {
382 mutex_lock(&dev_priv->hw_mutex);
383 vmw_write(dev_priv, SVGA_REG_ENABLE,
384 vmw_read(dev_priv, SVGA_REG_ENABLE) |
385 SVGA_REG_ENABLE_HIDE);
386 mutex_unlock(&dev_priv->hw_mutex);
387 }
388
389 n3d = (int32_t) dev_priv->num_3d_resources;
390 mutex_unlock(&dev_priv->release_mutex);
391
392 BUG_ON(n3d < 0);
393 }
394
395 /**
396 * Sets the initial_[width|height] fields on the given vmw_private.
397 *
398 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
399 * clamping the value to fb_max_[width|height] fields and the
400 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
401 * If the values appear to be invalid, set them to
402 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
403 */
404 static void vmw_get_initial_size(struct vmw_private *dev_priv)
405 {
406 uint32_t width;
407 uint32_t height;
408
409 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
410 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
411
412 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
413 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
414
415 if (width > dev_priv->fb_max_width ||
416 height > dev_priv->fb_max_height) {
417
418 /*
419 * This is a host error and shouldn't occur.
420 */
421
422 width = VMW_MIN_INITIAL_WIDTH;
423 height = VMW_MIN_INITIAL_HEIGHT;
424 }
425
426 dev_priv->initial_width = width;
427 dev_priv->initial_height = height;
428 }
429
430 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
431 {
432 struct vmw_private *dev_priv;
433 int ret;
434 uint32_t svga_id;
435 enum vmw_res_type i;
436
437 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
438 if (unlikely(dev_priv == NULL)) {
439 DRM_ERROR("Failed allocating a device private struct.\n");
440 return -ENOMEM;
441 }
442
443 pci_set_master(dev->pdev);
444
445 dev_priv->dev = dev;
446 dev_priv->vmw_chipset = chipset;
447 dev_priv->last_read_seqno = (uint32_t) -100;
448 mutex_init(&dev_priv->hw_mutex);
449 mutex_init(&dev_priv->cmdbuf_mutex);
450 mutex_init(&dev_priv->release_mutex);
451 rwlock_init(&dev_priv->resource_lock);
452
453 for (i = vmw_res_context; i < vmw_res_max; ++i) {
454 idr_init(&dev_priv->res_idr[i]);
455 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
456 }
457
458 mutex_init(&dev_priv->init_mutex);
459 init_waitqueue_head(&dev_priv->fence_queue);
460 init_waitqueue_head(&dev_priv->fifo_queue);
461 dev_priv->fence_queue_waiters = 0;
462 atomic_set(&dev_priv->fifo_queue_waiters, 0);
463
464 dev_priv->used_memory_size = 0;
465
466 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
467 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
468 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
469
470 dev_priv->enable_fb = enable_fbdev;
471
472 mutex_lock(&dev_priv->hw_mutex);
473
474 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
475 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
476 if (svga_id != SVGA_ID_2) {
477 ret = -ENOSYS;
478 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
479 mutex_unlock(&dev_priv->hw_mutex);
480 goto out_err0;
481 }
482
483 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
484
485 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
486 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
487 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
488 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
489
490 vmw_get_initial_size(dev_priv);
491
492 if (dev_priv->capabilities & SVGA_CAP_GMR) {
493 dev_priv->max_gmr_descriptors =
494 vmw_read(dev_priv,
495 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
496 dev_priv->max_gmr_ids =
497 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
498 }
499 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
500 dev_priv->max_gmr_pages =
501 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
502 dev_priv->memory_size =
503 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
504 dev_priv->memory_size -= dev_priv->vram_size;
505 } else {
506 /*
507 * An arbitrary limit of 512MiB on surface
508 * memory. But all HWV8 hardware supports GMR2.
509 */
510 dev_priv->memory_size = 512*1024*1024;
511 }
512
513 mutex_unlock(&dev_priv->hw_mutex);
514
515 vmw_print_capabilities(dev_priv->capabilities);
516
517 if (dev_priv->capabilities & SVGA_CAP_GMR) {
518 DRM_INFO("Max GMR ids is %u\n",
519 (unsigned)dev_priv->max_gmr_ids);
520 DRM_INFO("Max GMR descriptors is %u\n",
521 (unsigned)dev_priv->max_gmr_descriptors);
522 }
523 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
524 DRM_INFO("Max number of GMR pages is %u\n",
525 (unsigned)dev_priv->max_gmr_pages);
526 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
527 (unsigned)dev_priv->memory_size / 1024);
528 }
529 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
530 dev_priv->vram_start, dev_priv->vram_size / 1024);
531 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
532 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
533
534 ret = vmw_ttm_global_init(dev_priv);
535 if (unlikely(ret != 0))
536 goto out_err0;
537
538
539 vmw_master_init(&dev_priv->fbdev_master);
540 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
541 dev_priv->active_master = &dev_priv->fbdev_master;
542
543
544 ret = ttm_bo_device_init(&dev_priv->bdev,
545 dev_priv->bo_global_ref.ref.object,
546 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
547 false);
548 if (unlikely(ret != 0)) {
549 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
550 goto out_err1;
551 }
552
553 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
554 (dev_priv->vram_size >> PAGE_SHIFT));
555 if (unlikely(ret != 0)) {
556 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
557 goto out_err2;
558 }
559
560 dev_priv->has_gmr = true;
561 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
562 dev_priv->max_gmr_ids) != 0) {
563 DRM_INFO("No GMR memory available. "
564 "Graphics memory resources are very limited.\n");
565 dev_priv->has_gmr = false;
566 }
567
568 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
569 dev_priv->mmio_size);
570
571 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
572 dev_priv->mmio_size);
573
574 if (unlikely(dev_priv->mmio_virt == NULL)) {
575 ret = -ENOMEM;
576 DRM_ERROR("Failed mapping MMIO.\n");
577 goto out_err3;
578 }
579
580 /* Need mmio memory to check for fifo pitchlock cap. */
581 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
582 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
583 !vmw_fifo_have_pitchlock(dev_priv)) {
584 ret = -ENOSYS;
585 DRM_ERROR("Hardware has no pitchlock\n");
586 goto out_err4;
587 }
588
589 dev_priv->tdev = ttm_object_device_init
590 (dev_priv->mem_global_ref.object, 12);
591
592 if (unlikely(dev_priv->tdev == NULL)) {
593 DRM_ERROR("Unable to initialize TTM object management.\n");
594 ret = -ENOMEM;
595 goto out_err4;
596 }
597
598 dev->dev_private = dev_priv;
599
600 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
601 dev_priv->stealth = (ret != 0);
602 if (dev_priv->stealth) {
603 /**
604 * Request at least the mmio PCI resource.
605 */
606
607 DRM_INFO("It appears like vesafb is loaded. "
608 "Ignore above error if any.\n");
609 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
610 if (unlikely(ret != 0)) {
611 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
612 goto out_no_device;
613 }
614 }
615
616 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
617 ret = drm_irq_install(dev);
618 if (ret != 0) {
619 DRM_ERROR("Failed installing irq: %d\n", ret);
620 goto out_no_irq;
621 }
622 }
623
624 dev_priv->fman = vmw_fence_manager_init(dev_priv);
625 if (unlikely(dev_priv->fman == NULL)) {
626 ret = -ENOMEM;
627 goto out_no_fman;
628 }
629
630 vmw_kms_save_vga(dev_priv);
631
632 /* Start kms and overlay systems, needs fifo. */
633 ret = vmw_kms_init(dev_priv);
634 if (unlikely(ret != 0))
635 goto out_no_kms;
636 vmw_overlay_init(dev_priv);
637
638 if (dev_priv->enable_fb) {
639 ret = vmw_3d_resource_inc(dev_priv, true);
640 if (unlikely(ret != 0))
641 goto out_no_fifo;
642 vmw_fb_init(dev_priv);
643 }
644
645 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
646 register_pm_notifier(&dev_priv->pm_nb);
647
648 return 0;
649
650 out_no_fifo:
651 vmw_overlay_close(dev_priv);
652 vmw_kms_close(dev_priv);
653 out_no_kms:
654 vmw_kms_restore_vga(dev_priv);
655 vmw_fence_manager_takedown(dev_priv->fman);
656 out_no_fman:
657 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
658 drm_irq_uninstall(dev_priv->dev);
659 out_no_irq:
660 if (dev_priv->stealth)
661 pci_release_region(dev->pdev, 2);
662 else
663 pci_release_regions(dev->pdev);
664 out_no_device:
665 ttm_object_device_release(&dev_priv->tdev);
666 out_err4:
667 iounmap(dev_priv->mmio_virt);
668 out_err3:
669 arch_phys_wc_del(dev_priv->mmio_mtrr);
670 if (dev_priv->has_gmr)
671 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
672 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
673 out_err2:
674 (void)ttm_bo_device_release(&dev_priv->bdev);
675 out_err1:
676 vmw_ttm_global_release(dev_priv);
677 out_err0:
678 for (i = vmw_res_context; i < vmw_res_max; ++i)
679 idr_destroy(&dev_priv->res_idr[i]);
680
681 kfree(dev_priv);
682 return ret;
683 }
684
685 static int vmw_driver_unload(struct drm_device *dev)
686 {
687 struct vmw_private *dev_priv = vmw_priv(dev);
688 enum vmw_res_type i;
689
690 unregister_pm_notifier(&dev_priv->pm_nb);
691
692 if (dev_priv->ctx.res_ht_initialized)
693 drm_ht_remove(&dev_priv->ctx.res_ht);
694 if (dev_priv->ctx.cmd_bounce)
695 vfree(dev_priv->ctx.cmd_bounce);
696 if (dev_priv->enable_fb) {
697 vmw_fb_close(dev_priv);
698 vmw_kms_restore_vga(dev_priv);
699 vmw_3d_resource_dec(dev_priv, false);
700 }
701 vmw_kms_close(dev_priv);
702 vmw_overlay_close(dev_priv);
703 vmw_fence_manager_takedown(dev_priv->fman);
704 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
705 drm_irq_uninstall(dev_priv->dev);
706 if (dev_priv->stealth)
707 pci_release_region(dev->pdev, 2);
708 else
709 pci_release_regions(dev->pdev);
710
711 ttm_object_device_release(&dev_priv->tdev);
712 iounmap(dev_priv->mmio_virt);
713 arch_phys_wc_del(dev_priv->mmio_mtrr);
714 if (dev_priv->has_gmr)
715 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
716 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
717 (void)ttm_bo_device_release(&dev_priv->bdev);
718 vmw_ttm_global_release(dev_priv);
719
720 for (i = vmw_res_context; i < vmw_res_max; ++i)
721 idr_destroy(&dev_priv->res_idr[i]);
722
723 kfree(dev_priv);
724
725 return 0;
726 }
727
728 static void vmw_preclose(struct drm_device *dev,
729 struct drm_file *file_priv)
730 {
731 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
732 struct vmw_private *dev_priv = vmw_priv(dev);
733
734 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
735 }
736
737 static void vmw_postclose(struct drm_device *dev,
738 struct drm_file *file_priv)
739 {
740 struct vmw_fpriv *vmw_fp;
741
742 vmw_fp = vmw_fpriv(file_priv);
743 ttm_object_file_release(&vmw_fp->tfile);
744 if (vmw_fp->locked_master)
745 drm_master_put(&vmw_fp->locked_master);
746 kfree(vmw_fp);
747 }
748
749 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
750 {
751 struct vmw_private *dev_priv = vmw_priv(dev);
752 struct vmw_fpriv *vmw_fp;
753 int ret = -ENOMEM;
754
755 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
756 if (unlikely(vmw_fp == NULL))
757 return ret;
758
759 INIT_LIST_HEAD(&vmw_fp->fence_events);
760 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
761 if (unlikely(vmw_fp->tfile == NULL))
762 goto out_no_tfile;
763
764 file_priv->driver_priv = vmw_fp;
765 dev_priv->bdev.dev_mapping = dev->dev_mapping;
766
767 return 0;
768
769 out_no_tfile:
770 kfree(vmw_fp);
771 return ret;
772 }
773
774 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
775 unsigned long arg)
776 {
777 struct drm_file *file_priv = filp->private_data;
778 struct drm_device *dev = file_priv->minor->dev;
779 unsigned int nr = DRM_IOCTL_NR(cmd);
780
781 /*
782 * Do extra checking on driver private ioctls.
783 */
784
785 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
786 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
787 const struct drm_ioctl_desc *ioctl =
788 &vmw_ioctls[nr - DRM_COMMAND_BASE];
789
790 if (unlikely(ioctl->cmd_drv != cmd)) {
791 DRM_ERROR("Invalid command format, ioctl %d\n",
792 nr - DRM_COMMAND_BASE);
793 return -EINVAL;
794 }
795 }
796
797 return drm_ioctl(filp, cmd, arg);
798 }
799
800 static void vmw_lastclose(struct drm_device *dev)
801 {
802 struct drm_crtc *crtc;
803 struct drm_mode_set set;
804 int ret;
805
806 set.x = 0;
807 set.y = 0;
808 set.fb = NULL;
809 set.mode = NULL;
810 set.connectors = NULL;
811 set.num_connectors = 0;
812
813 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
814 set.crtc = crtc;
815 ret = drm_mode_set_config_internal(&set);
816 WARN_ON(ret != 0);
817 }
818
819 }
820
821 static void vmw_master_init(struct vmw_master *vmaster)
822 {
823 ttm_lock_init(&vmaster->lock);
824 INIT_LIST_HEAD(&vmaster->fb_surf);
825 mutex_init(&vmaster->fb_surf_mutex);
826 }
827
828 static int vmw_master_create(struct drm_device *dev,
829 struct drm_master *master)
830 {
831 struct vmw_master *vmaster;
832
833 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
834 if (unlikely(vmaster == NULL))
835 return -ENOMEM;
836
837 vmw_master_init(vmaster);
838 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
839 master->driver_priv = vmaster;
840
841 return 0;
842 }
843
844 static void vmw_master_destroy(struct drm_device *dev,
845 struct drm_master *master)
846 {
847 struct vmw_master *vmaster = vmw_master(master);
848
849 master->driver_priv = NULL;
850 kfree(vmaster);
851 }
852
853
854 static int vmw_master_set(struct drm_device *dev,
855 struct drm_file *file_priv,
856 bool from_open)
857 {
858 struct vmw_private *dev_priv = vmw_priv(dev);
859 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
860 struct vmw_master *active = dev_priv->active_master;
861 struct vmw_master *vmaster = vmw_master(file_priv->master);
862 int ret = 0;
863
864 if (!dev_priv->enable_fb) {
865 ret = vmw_3d_resource_inc(dev_priv, true);
866 if (unlikely(ret != 0))
867 return ret;
868 vmw_kms_save_vga(dev_priv);
869 mutex_lock(&dev_priv->hw_mutex);
870 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
871 mutex_unlock(&dev_priv->hw_mutex);
872 }
873
874 if (active) {
875 BUG_ON(active != &dev_priv->fbdev_master);
876 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
877 if (unlikely(ret != 0))
878 goto out_no_active_lock;
879
880 ttm_lock_set_kill(&active->lock, true, SIGTERM);
881 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
882 if (unlikely(ret != 0)) {
883 DRM_ERROR("Unable to clean VRAM on "
884 "master drop.\n");
885 }
886
887 dev_priv->active_master = NULL;
888 }
889
890 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
891 if (!from_open) {
892 ttm_vt_unlock(&vmaster->lock);
893 BUG_ON(vmw_fp->locked_master != file_priv->master);
894 drm_master_put(&vmw_fp->locked_master);
895 }
896
897 dev_priv->active_master = vmaster;
898
899 return 0;
900
901 out_no_active_lock:
902 if (!dev_priv->enable_fb) {
903 vmw_kms_restore_vga(dev_priv);
904 vmw_3d_resource_dec(dev_priv, true);
905 mutex_lock(&dev_priv->hw_mutex);
906 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
907 mutex_unlock(&dev_priv->hw_mutex);
908 }
909 return ret;
910 }
911
912 static void vmw_master_drop(struct drm_device *dev,
913 struct drm_file *file_priv,
914 bool from_release)
915 {
916 struct vmw_private *dev_priv = vmw_priv(dev);
917 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
918 struct vmw_master *vmaster = vmw_master(file_priv->master);
919 int ret;
920
921 /**
922 * Make sure the master doesn't disappear while we have
923 * it locked.
924 */
925
926 vmw_fp->locked_master = drm_master_get(file_priv->master);
927 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
928 vmw_execbuf_release_pinned_bo(dev_priv);
929
930 if (unlikely((ret != 0))) {
931 DRM_ERROR("Unable to lock TTM at VT switch.\n");
932 drm_master_put(&vmw_fp->locked_master);
933 }
934
935 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
936
937 if (!dev_priv->enable_fb) {
938 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
939 if (unlikely(ret != 0))
940 DRM_ERROR("Unable to clean VRAM on master drop.\n");
941 vmw_kms_restore_vga(dev_priv);
942 vmw_3d_resource_dec(dev_priv, true);
943 mutex_lock(&dev_priv->hw_mutex);
944 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
945 mutex_unlock(&dev_priv->hw_mutex);
946 }
947
948 dev_priv->active_master = &dev_priv->fbdev_master;
949 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
950 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
951
952 if (dev_priv->enable_fb)
953 vmw_fb_on(dev_priv);
954 }
955
956
957 static void vmw_remove(struct pci_dev *pdev)
958 {
959 struct drm_device *dev = pci_get_drvdata(pdev);
960
961 drm_put_dev(dev);
962 }
963
964 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
965 void *ptr)
966 {
967 struct vmw_private *dev_priv =
968 container_of(nb, struct vmw_private, pm_nb);
969 struct vmw_master *vmaster = dev_priv->active_master;
970
971 switch (val) {
972 case PM_HIBERNATION_PREPARE:
973 case PM_SUSPEND_PREPARE:
974 ttm_suspend_lock(&vmaster->lock);
975
976 /**
977 * This empties VRAM and unbinds all GMR bindings.
978 * Buffer contents is moved to swappable memory.
979 */
980 vmw_execbuf_release_pinned_bo(dev_priv);
981 vmw_resource_evict_all(dev_priv);
982 ttm_bo_swapout_all(&dev_priv->bdev);
983
984 break;
985 case PM_POST_HIBERNATION:
986 case PM_POST_SUSPEND:
987 case PM_POST_RESTORE:
988 ttm_suspend_unlock(&vmaster->lock);
989
990 break;
991 case PM_RESTORE_PREPARE:
992 break;
993 default:
994 break;
995 }
996 return 0;
997 }
998
999 /**
1000 * These might not be needed with the virtual SVGA device.
1001 */
1002
1003 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1004 {
1005 struct drm_device *dev = pci_get_drvdata(pdev);
1006 struct vmw_private *dev_priv = vmw_priv(dev);
1007
1008 if (dev_priv->num_3d_resources != 0) {
1009 DRM_INFO("Can't suspend or hibernate "
1010 "while 3D resources are active.\n");
1011 return -EBUSY;
1012 }
1013
1014 pci_save_state(pdev);
1015 pci_disable_device(pdev);
1016 pci_set_power_state(pdev, PCI_D3hot);
1017 return 0;
1018 }
1019
1020 static int vmw_pci_resume(struct pci_dev *pdev)
1021 {
1022 pci_set_power_state(pdev, PCI_D0);
1023 pci_restore_state(pdev);
1024 return pci_enable_device(pdev);
1025 }
1026
1027 static int vmw_pm_suspend(struct device *kdev)
1028 {
1029 struct pci_dev *pdev = to_pci_dev(kdev);
1030 struct pm_message dummy;
1031
1032 dummy.event = 0;
1033
1034 return vmw_pci_suspend(pdev, dummy);
1035 }
1036
1037 static int vmw_pm_resume(struct device *kdev)
1038 {
1039 struct pci_dev *pdev = to_pci_dev(kdev);
1040
1041 return vmw_pci_resume(pdev);
1042 }
1043
1044 static int vmw_pm_prepare(struct device *kdev)
1045 {
1046 struct pci_dev *pdev = to_pci_dev(kdev);
1047 struct drm_device *dev = pci_get_drvdata(pdev);
1048 struct vmw_private *dev_priv = vmw_priv(dev);
1049
1050 /**
1051 * Release 3d reference held by fbdev and potentially
1052 * stop fifo.
1053 */
1054 dev_priv->suspended = true;
1055 if (dev_priv->enable_fb)
1056 vmw_3d_resource_dec(dev_priv, true);
1057
1058 if (dev_priv->num_3d_resources != 0) {
1059
1060 DRM_INFO("Can't suspend or hibernate "
1061 "while 3D resources are active.\n");
1062
1063 if (dev_priv->enable_fb)
1064 vmw_3d_resource_inc(dev_priv, true);
1065 dev_priv->suspended = false;
1066 return -EBUSY;
1067 }
1068
1069 return 0;
1070 }
1071
1072 static void vmw_pm_complete(struct device *kdev)
1073 {
1074 struct pci_dev *pdev = to_pci_dev(kdev);
1075 struct drm_device *dev = pci_get_drvdata(pdev);
1076 struct vmw_private *dev_priv = vmw_priv(dev);
1077
1078 mutex_lock(&dev_priv->hw_mutex);
1079 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1080 (void) vmw_read(dev_priv, SVGA_REG_ID);
1081 mutex_unlock(&dev_priv->hw_mutex);
1082
1083 /**
1084 * Reclaim 3d reference held by fbdev and potentially
1085 * start fifo.
1086 */
1087 if (dev_priv->enable_fb)
1088 vmw_3d_resource_inc(dev_priv, false);
1089
1090 dev_priv->suspended = false;
1091 }
1092
1093 static const struct dev_pm_ops vmw_pm_ops = {
1094 .prepare = vmw_pm_prepare,
1095 .complete = vmw_pm_complete,
1096 .suspend = vmw_pm_suspend,
1097 .resume = vmw_pm_resume,
1098 };
1099
1100 static const struct file_operations vmwgfx_driver_fops = {
1101 .owner = THIS_MODULE,
1102 .open = drm_open,
1103 .release = drm_release,
1104 .unlocked_ioctl = vmw_unlocked_ioctl,
1105 .mmap = vmw_mmap,
1106 .poll = vmw_fops_poll,
1107 .read = vmw_fops_read,
1108 #if defined(CONFIG_COMPAT)
1109 .compat_ioctl = drm_compat_ioctl,
1110 #endif
1111 .llseek = noop_llseek,
1112 };
1113
1114 static struct drm_driver driver = {
1115 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1116 DRIVER_MODESET,
1117 .load = vmw_driver_load,
1118 .unload = vmw_driver_unload,
1119 .lastclose = vmw_lastclose,
1120 .irq_preinstall = vmw_irq_preinstall,
1121 .irq_postinstall = vmw_irq_postinstall,
1122 .irq_uninstall = vmw_irq_uninstall,
1123 .irq_handler = vmw_irq_handler,
1124 .get_vblank_counter = vmw_get_vblank_counter,
1125 .enable_vblank = vmw_enable_vblank,
1126 .disable_vblank = vmw_disable_vblank,
1127 .ioctls = vmw_ioctls,
1128 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1129 .master_create = vmw_master_create,
1130 .master_destroy = vmw_master_destroy,
1131 .master_set = vmw_master_set,
1132 .master_drop = vmw_master_drop,
1133 .open = vmw_driver_open,
1134 .preclose = vmw_preclose,
1135 .postclose = vmw_postclose,
1136
1137 .dumb_create = vmw_dumb_create,
1138 .dumb_map_offset = vmw_dumb_map_offset,
1139 .dumb_destroy = vmw_dumb_destroy,
1140
1141 .fops = &vmwgfx_driver_fops,
1142 .name = VMWGFX_DRIVER_NAME,
1143 .desc = VMWGFX_DRIVER_DESC,
1144 .date = VMWGFX_DRIVER_DATE,
1145 .major = VMWGFX_DRIVER_MAJOR,
1146 .minor = VMWGFX_DRIVER_MINOR,
1147 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1148 };
1149
1150 static struct pci_driver vmw_pci_driver = {
1151 .name = VMWGFX_DRIVER_NAME,
1152 .id_table = vmw_pci_id_list,
1153 .probe = vmw_probe,
1154 .remove = vmw_remove,
1155 .driver = {
1156 .pm = &vmw_pm_ops
1157 }
1158 };
1159
1160 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1161 {
1162 return drm_get_pci_dev(pdev, ent, &driver);
1163 }
1164
1165 static int __init vmwgfx_init(void)
1166 {
1167 int ret;
1168 ret = drm_pci_init(&driver, &vmw_pci_driver);
1169 if (ret)
1170 DRM_ERROR("Failed initializing DRM.\n");
1171 return ret;
1172 }
1173
1174 static void __exit vmwgfx_exit(void)
1175 {
1176 drm_pci_exit(&driver, &vmw_pci_driver);
1177 }
1178
1179 module_init(vmwgfx_init);
1180 module_exit(vmwgfx_exit);
1181
1182 MODULE_AUTHOR("VMware Inc. and others");
1183 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1184 MODULE_LICENSE("GPL and additional rights");
1185 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1186 __stringify(VMWGFX_DRIVER_MINOR) "."
1187 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1188 "0");